diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 23106888..105be092 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -7,15 +7,13 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [macos-latest, ubuntu-latest]
+ os: [macos-13, ubuntu-latest]
steps:
- - name: Set up Go 1.x
- uses: actions/setup-go@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-go@v4
with:
- go-version: ^1.17
- id: go
- - name: Check out code into the Go module directory
- uses: actions/checkout@v2
+ go-version-file: go.mod
+ check-latest: true
- name: Compile
run: make install
test:
@@ -23,15 +21,17 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [macos-latest, ubuntu-latest]
+ os: [macos-13, ubuntu-latest]
steps:
- - name: Set up Go 1.x
- uses: actions/setup-go@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-go@v4
with:
- go-version: ^1.17
- id: go
- - name: Check out code into the Go module directory
- uses: actions/checkout@v2
+ go-version-file: go.mod
+ check-latest: true
+ - name: Install certutil on macos
+ if: ${{ matrix.os == 'macos-13' }}
+ run: |
+ brew install nss
- name: Test
run: make test
vet:
@@ -39,12 +39,13 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [macos-latest, ubuntu-latest]
+ os: [macos-13, ubuntu-latest]
steps:
- - name: Check out code into the Go module directory
- uses: actions/checkout@v2
- - name: golangci-lint
- uses: golangci/golangci-lint-action@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-go@v4
+ with:
+ go-version-file: go.mod
+ check-latest: true
+ - uses: golangci/golangci-lint-action@v3
with:
- version: v1.35
args: --timeout 180s
diff --git a/Makefile b/Makefile
index 7a7c181e..be12fd07 100644
--- a/Makefile
+++ b/Makefile
@@ -10,8 +10,8 @@ build:
.PHONY: vet
vet:
- docker run --rm -v $$(pwd):/app -w /app golangci/golangci-lint:latest golangci-lint run -v
+ docker run --rm -v $(CURDIR):/app -w /app golangci/golangci-lint:latest golangci-lint run -v
.PHONY: test
test:
- go test ./... -timeout=180s -coverprofile=c.out -covermode=atomic -count=1 -race -v
+ go test ./... -timeout=5m -coverprofile=c.out -covermode=atomic -count=1 -race -v
diff --git a/app.go b/app.go
index 217b43c1..43d5e4ea 100644
--- a/app.go
+++ b/app.go
@@ -2,9 +2,9 @@ package candy
import (
"fmt"
- "io/ioutil"
"net"
"net/url"
+ "os"
"path/filepath"
"strconv"
"strings"
@@ -29,7 +29,7 @@ type AppService struct {
}
func (f *AppService) FindApps() ([]App, error) {
- files, err := ioutil.ReadDir(f.cfg.HostRoot)
+ files, err := os.ReadDir(f.cfg.HostRoot)
if err != nil {
return nil, err
}
@@ -41,7 +41,7 @@ func (f *AppService) FindApps() ([]App, error) {
continue
}
- b, err := ioutil.ReadFile(filepath.Join(f.cfg.HostRoot, file.Name()))
+ b, err := os.ReadFile(filepath.Join(f.cfg.HostRoot, file.Name()))
if err != nil {
return nil, err
}
diff --git a/app_test.go b/app_test.go
index ecbc120a..1e52da38 100644
--- a/app_test.go
+++ b/app_test.go
@@ -1,7 +1,7 @@
package candy
import (
- "io/ioutil"
+ "os"
"path/filepath"
"testing"
@@ -104,7 +104,7 @@ func Test_AppService_FindApps(t *testing.T) {
dir := t.TempDir()
for k, v := range cc.Hosts {
- if err := ioutil.WriteFile(filepath.Join(dir, k), []byte(v), 0o0644); err != nil {
+ if err := os.WriteFile(filepath.Join(dir, k), []byte(v), 0o0644); err != nil {
t.Fatalf("error writing test hosts: %s", err)
}
}
diff --git a/caddy/server.go b/caddy/server.go
index feb3cbef..11950b79 100644
--- a/caddy/server.go
+++ b/caddy/server.go
@@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"net"
"net/http"
"strconv"
@@ -54,6 +53,29 @@ type caddyServer struct {
caddyCfgMutex sync.Mutex
}
+func (c *caddyServer) waitForServer(ctx context.Context) error {
+ ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
+ defer cancel()
+
+ t := time.NewTicker(1 * time.Second)
+ defer t.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-t.C:
+ c.cfg.Logger.Info("waiting for Caddy server", zap.Any("cfg", c.cfg))
+ err := c.apiRequest(ctx, http.MethodGet, "/config/", nil)
+ if err == nil {
+ return nil
+ } else {
+ c.cfg.Logger.Debug("error waiting for Caddy server", zap.Error(err))
+ }
+ }
+ }
+}
+
func (c *caddyServer) Run(ctx context.Context) error {
c.cfg.Logger.Info("starting Caddy server", zap.Any("cfg", c.cfg))
defer c.cfg.Logger.Info("shutting down Caddy server")
@@ -64,6 +86,10 @@ func (c *caddyServer) Run(ctx context.Context) error {
return err
}
+ if err := c.waitForServer(ctx); err != nil {
+ return err
+ }
+
<-ctx.Done()
if err := c.stopServer(); err != nil {
@@ -139,7 +165,6 @@ func (c *caddyServer) buildConfig(apps []candy.App) *caddy.Config {
),
Listen: []string{c.cfg.HTTPAddr},
AutoHTTPS: &caddyhttp.AutoHTTPSConfig{Disabled: true},
- AllowH2C: true,
}
httpsServer := &caddyhttp.Server{
@@ -155,8 +180,7 @@ func (c *caddyServer) buildConfig(apps []candy.App) *caddy.Config {
},
apps,
),
- Listen: []string{c.cfg.HTTPSAddr},
- AllowH2C: true,
+ Listen: []string{c.cfg.HTTPSAddr},
}
// Best efforts of parsing corresponding port from addr
@@ -180,7 +204,7 @@ func (c *caddyServer) buildConfig(apps []candy.App) *caddy.Config {
Automation: &caddytls.AutomationConfig{
Policies: []*caddytls.AutomationPolicy{
{
- Subjects: appHosts(apps),
+ SubjectsRaw: appHosts(apps),
IssuersRaw: []json.RawMessage{
caddyconfig.JSONModuleObject(caddytls.InternalIssuer{}, "module", "internal", nil),
},
@@ -275,7 +299,7 @@ func (c *caddyServer) apiRequest(ctx context.Context, method, uri string, v inte
// if it didn't work, let the user know
if resp.StatusCode >= 400 {
- respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024*10))
+ respBody, err := io.ReadAll(io.LimitReader(resp.Body, 1024*10))
if err != nil {
return fmt.Errorf("HTTP %d: reading error message: %v", resp.StatusCode, err)
}
diff --git a/cmd/candy/cmd/launch_darwin.go b/cmd/candy/cmd/launch_darwin.go
index 562a41c3..0bce13ea 100644
--- a/cmd/candy/cmd/launch_darwin.go
+++ b/cmd/candy/cmd/launch_darwin.go
@@ -1,4 +1,4 @@
-// +build darwin
+//go:build darwin
package cmd
diff --git a/cmd/candy/cmd/setup_darwin.go b/cmd/candy/cmd/setup_darwin.go
index 4bd295e5..73699373 100644
--- a/cmd/candy/cmd/setup_darwin.go
+++ b/cmd/candy/cmd/setup_darwin.go
@@ -1,11 +1,10 @@
-// +build darwin
+//go:build darwin
package cmd
import (
"errors"
"fmt"
- "io/ioutil"
"net"
"os"
"path/filepath"
@@ -75,7 +74,7 @@ func runSetupRunE(c *cobra.Command, args []string) error {
file := filepath.Join(resolverDir, "candy-"+domain)
content := fmt.Sprintf(resolverTmpl, domain, host, port)
- b, err := ioutil.ReadFile(file)
+ b, err := os.ReadFile(file)
if err == nil {
if string(b) == content {
logger.Info("resolver configuration file unchanged", zap.String("file", file))
@@ -84,7 +83,7 @@ func runSetupRunE(c *cobra.Command, args []string) error {
}
logger.Info("writing resolver configuration file", zap.String("file", file))
- if err := ioutil.WriteFile(file, []byte(content), 0o644); err != nil {
+ if err := os.WriteFile(file, []byte(content), 0o644); err != nil {
return err
}
}
diff --git a/cmd/candy/cmd/setup_linux.go b/cmd/candy/cmd/setup_linux.go
index 1e50045a..fce7ec09 100644
--- a/cmd/candy/cmd/setup_linux.go
+++ b/cmd/candy/cmd/setup_linux.go
@@ -1,11 +1,10 @@
-// +build linux
+//go:build linux
package cmd
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -69,7 +68,7 @@ func runSetupRunE(c *cobra.Command, args []string) error {
logger = candy.Log()
)
- b, err := ioutil.ReadFile(file)
+ b, err := os.ReadFile(file)
if err == nil {
if string(b) == content {
logger.Info("network name resolution file unchanged", zap.String("file", file))
@@ -78,7 +77,7 @@ func runSetupRunE(c *cobra.Command, args []string) error {
}
logger.Info("writing network name resolution file", zap.String("file", file))
- if err := ioutil.WriteFile(file, []byte(content), 0o644); err != nil {
+ if err := os.WriteFile(file, []byte(content), 0o644); err != nil {
return err
}
diff --git a/dns/server.go b/dns/server.go
index fd1bee21..97c11f24 100644
--- a/dns/server.go
+++ b/dns/server.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"net"
- "sync"
"time"
"github.com/miekg/dns"
@@ -41,36 +40,26 @@ func (d *dnsServer) Run(ctx context.Context) error {
var g run.Group
{
- var wg sync.WaitGroup
- wg.Add(1)
udp := &dns.Server{
Handler: mux,
Addr: d.cfg.Addr,
Net: "udp",
}
g.Add(func() error {
- wg.Done()
return udp.ListenAndServe()
}, func(err error) {
- // Wait for udp server before shutting it down
- wg.Wait()
_ = udp.ShutdownContext(ctx)
})
}
{
- var wg sync.WaitGroup
- wg.Add(1)
tcp := &dns.Server{
Handler: mux,
Addr: d.cfg.Addr,
Net: "tcp",
}
g.Add(func() error {
- wg.Done()
return tcp.ListenAndServe()
}, func(err error) {
- // Wait for tcp server before shutting it down
- wg.Wait()
_ = tcp.ShutdownContext(ctx)
})
}
diff --git a/go.mod b/go.mod
index 2328eaa0..587b8279 100644
--- a/go.mod
+++ b/go.mod
@@ -1,116 +1,125 @@
module github.com/owenthereal/candy
-go 1.17
+go 1.21
require (
- github.com/caddyserver/caddy/v2 v2.4.5
- github.com/fsnotify/fsnotify v1.5.1
- github.com/google/go-cmp v0.5.6
- github.com/miekg/dns v1.1.43
+ github.com/caddyserver/caddy/v2 v2.7.4
+ github.com/fsnotify/fsnotify v1.6.0
+ github.com/google/go-cmp v0.5.9
+ github.com/miekg/dns v1.1.55
github.com/oklog/run v1.1.1-0.20200508094559-c7096881717e
- github.com/spf13/cobra v1.2.1
+ github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.9.0
- go.uber.org/zap v1.19.1
+ go.uber.org/zap v1.25.0
inet.af/tcpproxy v0.0.0-20210824174053-2e577fef49e2
)
require (
- cloud.google.com/go/kms v1.0.0 // indirect
+ filippo.io/edwards25519 v1.0.0 // indirect
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
- github.com/Masterminds/semver/v3 v3.1.1 // indirect
- github.com/Masterminds/sprig/v3 v3.2.2 // indirect
- github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f // indirect
+ github.com/Masterminds/semver/v3 v3.2.0 // indirect
+ github.com/Masterminds/sprig/v3 v3.2.3 // indirect
+ github.com/Microsoft/go-winio v0.6.0 // indirect
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
+ github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/caddyserver/certmagic v0.14.5 // indirect
+ github.com/caddyserver/certmagic v0.19.2 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
- github.com/cespare/xxhash/v2 v2.1.1 // indirect
- github.com/cheekybits/genny v1.0.0 // indirect
- github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/chzyer/readline v1.5.1 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/dgraph-io/badger v1.6.2 // indirect
github.com/dgraph-io/badger/v2 v2.2007.4 // indirect
- github.com/dgraph-io/ristretto v0.0.4-0.20200906165740-41ebdbffecfd // indirect
+ github.com/dgraph-io/ristretto v0.1.0 // indirect
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
- github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac // indirect
+ github.com/dustin/go-humanize v1.0.1 // indirect
github.com/go-kit/kit v0.10.0 // indirect
- github.com/go-logfmt/logfmt v0.5.0 // indirect
- github.com/go-sql-driver/mysql v1.6.0 // indirect
- github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
- github.com/golang/protobuf v1.5.2 // indirect
- github.com/golang/snappy v0.0.3 // indirect
- github.com/google/cel-go v0.7.3 // indirect
+ github.com/go-logfmt/logfmt v0.5.1 // indirect
+ github.com/go-sql-driver/mysql v1.7.0 // indirect
+ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+ github.com/golang/glog v1.1.0 // indirect
+ github.com/golang/mock v1.6.0 // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
+ github.com/golang/snappy v0.0.4 // indirect
+ github.com/google/cel-go v0.15.1 // indirect
+ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/huandu/xstrings v1.3.1 // indirect
- github.com/imdario/mergo v0.3.11 // indirect
- github.com/inconshreveable/mousetrap v1.0.0 // indirect
- github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a // indirect
- github.com/klauspost/compress v1.13.4 // indirect
- github.com/klauspost/cpuid/v2 v2.0.9 // indirect
+ github.com/huandu/xstrings v1.3.3 // indirect
+ github.com/imdario/mergo v0.3.12 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jackc/chunkreader/v2 v2.0.1 // indirect
+ github.com/jackc/pgconn v1.14.0 // indirect
+ github.com/jackc/pgio v1.0.0 // indirect
+ github.com/jackc/pgpassfile v1.0.0 // indirect
+ github.com/jackc/pgproto3/v2 v2.3.2 // indirect
+ github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
+ github.com/jackc/pgtype v1.14.0 // indirect
+ github.com/jackc/pgx/v4 v4.18.0 // indirect
+ github.com/klauspost/compress v1.16.7 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/libdns/libdns v0.2.1 // indirect
- github.com/lucas-clemente/quic-go v0.23.0 // indirect
- github.com/lunixbochs/vtclean v1.0.0 // indirect
github.com/magiconair/properties v1.8.5 // indirect
- github.com/manifoldco/promptui v0.8.0 // indirect
- github.com/marten-seemann/qpack v0.2.1 // indirect
- github.com/marten-seemann/qtls-go1-16 v0.1.4 // indirect
- github.com/marten-seemann/qtls-go1-17 v0.1.0 // indirect
+ github.com/manifoldco/promptui v0.9.0 // indirect
+ github.com/mastercactapus/proxyprotocol v0.0.4 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
- github.com/mattn/go-isatty v0.0.13 // indirect
+ github.com/mattn/go-isatty v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
- github.com/mholt/acmez v1.0.0 // indirect
- github.com/micromdm/scep/v2 v2.0.0 // indirect
- github.com/mitchellh/copystructure v1.0.0 // indirect
- github.com/mitchellh/mapstructure v1.4.2 // indirect
- github.com/mitchellh/reflectwalk v1.0.1 // indirect
- github.com/nxadm/tail v1.4.8 // indirect
- github.com/onsi/ginkgo v1.16.4 // indirect
+ github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
+ github.com/mholt/acmez v1.2.0 // indirect
+ github.com/micromdm/scep/v2 v2.1.0 // indirect
+ github.com/mitchellh/copystructure v1.2.0 // indirect
+ github.com/mitchellh/go-ps v1.0.0 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/mitchellh/reflectwalk v1.0.2 // indirect
+ github.com/onsi/ginkgo/v2 v2.9.5 // indirect
github.com/pelletier/go-toml v1.9.4 // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/prometheus/client_golang v1.11.0 // indirect
- github.com/prometheus/client_model v0.2.0 // indirect
- github.com/prometheus/common v0.26.0 // indirect
- github.com/prometheus/procfs v0.6.0 // indirect
- github.com/rs/xid v1.2.1 // indirect
- github.com/russross/blackfriday/v2 v2.0.1 // indirect
- github.com/samfoo/ansi v0.0.0-20160124022901-b6bd2ded7189 // indirect
+ github.com/prometheus/client_golang v1.14.0 // indirect
+ github.com/prometheus/client_model v0.3.0 // indirect
+ github.com/prometheus/common v0.37.0 // indirect
+ github.com/prometheus/procfs v0.8.0 // indirect
+ github.com/quic-go/qpack v0.4.0 // indirect
+ github.com/quic-go/qtls-go1-20 v0.3.1 // indirect
+ github.com/quic-go/quic-go v0.37.5 // indirect
+ github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shopspring/decimal v1.2.0 // indirect
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
- github.com/sirupsen/logrus v1.7.0 // indirect
- github.com/smallstep/certificates v0.16.4 // indirect
- github.com/smallstep/cli v0.16.1 // indirect
- github.com/smallstep/nosql v0.3.8 // indirect
- github.com/smallstep/truststore v0.9.6 // indirect
+ github.com/slackhq/nebula v1.6.1 // indirect
+ github.com/smallstep/certificates v0.24.3-rc.5 // indirect
+ github.com/smallstep/nosql v0.6.0 // indirect
+ github.com/smallstep/truststore v0.12.1 // indirect
github.com/spf13/afero v1.6.0 // indirect
github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
- github.com/urfave/cli v1.22.5 // indirect
- go.etcd.io/bbolt v1.3.6 // indirect
- go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
- go.step.sm/cli-utils v0.4.1 // indirect
- go.step.sm/crypto v0.9.0 // indirect
- go.step.sm/linkedca v0.0.0-20210611183751-27424aae8d25 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/multierr v1.6.0 // indirect
- golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
- golang.org/x/mod v0.4.2 // indirect
- golang.org/x/net v0.0.0-20210614182718-04defd469f4e // indirect
- golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365 // indirect
- golang.org/x/term v0.0.0-20210503060354-a79de5458b56 // indirect
- golang.org/x/text v0.3.6 // indirect
- golang.org/x/tools v0.1.5 // indirect
- golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
- google.golang.org/api v0.57.0 // indirect
- google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6 // indirect
- google.golang.org/grpc v1.40.0 // indirect
- google.golang.org/protobuf v1.27.1 // indirect
+ github.com/tailscale/tscert v0.0.0-20230509043813-4e9cb4f2b4ad // indirect
+ github.com/urfave/cli v1.22.14 // indirect
+ github.com/zeebo/blake3 v0.2.3 // indirect
+ go.etcd.io/bbolt v1.3.7 // indirect
+ go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
+ go.step.sm/cli-utils v0.8.0 // indirect
+ go.step.sm/crypto v0.33.0 // indirect
+ go.step.sm/linkedca v0.20.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/crypto v0.12.0 // indirect
+ golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 // indirect
+ golang.org/x/mod v0.11.0 // indirect
+ golang.org/x/net v0.14.0 // indirect
+ golang.org/x/sys v0.11.0 // indirect
+ golang.org/x/term v0.11.0 // indirect
+ golang.org/x/text v0.12.0 // indirect
+ golang.org/x/tools v0.10.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
+ google.golang.org/grpc v1.56.2 // indirect
+ google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/ini.v1 v1.63.2 // indirect
- gopkg.in/square/go-jose.v2 v2.5.1 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+ gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ howett.net/plist v1.0.0 // indirect
)
diff --git a/go.sum b/go.sum
index db6c9da2..5753e41b 100644
--- a/go.sum
+++ b/go.sum
@@ -1,14 +1,8 @@
-bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
-bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
@@ -21,7 +15,6 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
@@ -30,246 +23,158 @@ cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSU
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
-cloud.google.com/go v0.94.1 h1:DwuSvDZ1pTYGbXo8yOJevCTr3BoBlE+OVkHAKiYQUXc=
-cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg=
+cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU=
-cloud.google.com/go/kms v1.0.0 h1:YkIeqPXqTAlwXk3Z2/WG0d6h1tqJQjU354WftjEoP9E=
-cloud.google.com/go/kms v1.0.0/go.mod h1:nhUehi+w7zht2XrUfvTRNpxrfayBHqP4lu2NSywui/0=
+cloud.google.com/go/iam v1.1.0 h1:67gSqaPukx7O8WLLHMa0PNs3EBGd2eE4d+psbO/CO94=
+cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk=
+cloud.google.com/go/kms v1.15.0 h1:xYl5WEaSekKYN5gGRyhjvZKM22GVBBCzegGNVPy+aIs=
+cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/spanner v1.17.0/go.mod h1:+17t2ixFwRG4lWRwE+5kipDR9Ef07Jkmc8z0IbMDKUs=
-cloud.google.com/go/spanner v1.18.0/go.mod h1:LvAjUXPeJRGNuGpikMULjhLj/t9cRvdc+fxRoLiugXA=
-cloud.google.com/go/spanner v1.20.0/go.mod h1:ajR/W06cMHQu7nqQ4irRGplPNoWgejGJlEhlB8xBTKk=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-code.gitea.io/sdk/gitea v0.11.3/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY=
-contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=
-contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
-contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw=
-contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
-contrib.go.opencensus.io/exporter/stackdriver v0.13.7/go.mod h1:huNtlWx75MwO7qMs0KrMxPZXzNNWebav1Sq/pm02JdQ=
-contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
-contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA=
-dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
-dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
-dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
-git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
+filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek=
+filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
-github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU=
-github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
-github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0=
-github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
-github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
-github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
-github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
-github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic=
-github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
-github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
-github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
-github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
-github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
-github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
-github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA=
-github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8=
-github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk=
-github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
+github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
+github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
+github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
+github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
-github.com/ThalesIgnite/crypto11 v1.2.4/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE=
-github.com/ThomasRooney/gexpect v0.0.0-20161231170123-5482f0350944/go.mod h1:sPML5WwI6oxLRLPuuqbtoOKhtmpVDCYtwsps+I+vjIY=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
-github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
-github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38/go.mod h1:r7bzyVFMNntcxPZXK3/+KdruV1H5KSlyVY0gc+NgInI=
-github.com/alecthomas/chroma v0.7.2-0.20200305040604-4f3623dce67a/go.mod h1:fv5SzZPFJbwp2NXJWpFIX7DZS4HgV1K4ew4Pc2OZD9s=
-github.com/alecthomas/chroma v0.9.2/go.mod h1:eMuEnpA18XbG/WhOWtCzJHS7WqEtDAI+HxdwoW0nVSk=
-github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721/go.mod h1:QO9JBoKquHd+jz9nshCh40fOfO+JzsoXy8qTHF68zU0=
-github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
-github.com/alecthomas/kong v0.2.1-0.20190708041108-0548c6b1afae/go.mod h1:+inYUSluD+p4L8KdviBSgzcqEjUQOfC5fQDRFuc36lI=
-github.com/alecthomas/kong v0.2.4/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE=
-github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f h1:0cEys61Sr2hUBEXfNV8eyQP01oZuBgoMeHunebPirK8=
-github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y=
-github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
-github.com/apache/beam v2.28.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o=
-github.com/apache/beam v2.30.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/apex/log v1.1.4/go.mod h1:AlpoD9aScyQfJDVHmLMEcx4oU6LqzkWp4Mg9GdAcEvQ=
-github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo=
-github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE=
-github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
+github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b h1:uUXgbcPDK3KpW29o4iy7GtuappbWT0l5NaMo9H9pJDw=
github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
-github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
-github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.30.29/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
-github.com/aws/aws-sdk-go v1.37.0 h1:GzFnhOIsrGyQ69s7VgqtrG2BG8v7X7vwB3Xpbd/DBBk=
-github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/aws/aws-sdk-go v1.44.307 h1:2R0/EPgpZcFSUwZhYImq/srjaOrOfLv5MNRzrFyAM38=
+github.com/aws/aws-sdk-go v1.44.307/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
-github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
-github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
+github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
-github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
-github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
-github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
-github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
-github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw=
-github.com/caddyserver/caddy/v2 v2.4.5 h1:P1mRs6V2cMcagSPn+NWpD+OEYUYLIf6ecOa48cFGeUg=
-github.com/caddyserver/caddy/v2 v2.4.5/go.mod h1:YhfZAAh3jWSbG6rEEOM49FwxmcbLY2fZQVlo59Sc/80=
-github.com/caddyserver/certmagic v0.14.5 h1:y4HcFzLLBMsTv8sSlAPj5K55mvntX8e8ExcmB/lhO6w=
-github.com/caddyserver/certmagic v0.14.5/go.mod h1:/0VQ5og2Jxa5yBQ8eT80wWS7fi/DgNy1uXeXRUJ1Wj0=
-github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo=
+github.com/caddyserver/caddy/v2 v2.7.4 h1:J8nisjdOxnYHXlorUKXY75Gr6iBfudfoGhrJ8t7/flI=
+github.com/caddyserver/caddy/v2 v2.7.4/go.mod h1:/OH2g/56QCSCajEWsFa8kjwacziG/YFxeWgKacnK6KE=
+github.com/caddyserver/certmagic v0.19.2 h1:HZd1AKLx4592MalEGQS39DKs2ZOAJCEM/xYPMQ2/ui0=
+github.com/caddyserver/certmagic v0.19.2/go.mod h1:fsL01NomQ6N+kE2j37ZCnig2MFosG+MIO4ztnmG/zz8=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
-github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
-github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
-github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
-github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
+github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
+github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
+github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
+github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
+github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
-github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
-github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU=
-github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964/go.mod h1:Xd9hchkHSWYkEqJwUGisez3G1QY8Ryz0sdWrLPMGjLk=
-github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8=
github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE=
-github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20201003150343-5d1bab4fc658/go.mod h1:2uGEvGm+JSDLd5UAaKIFSbXDcYyeH0fWJP4N2HMMYMI=
github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o=
github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk=
github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
-github.com/dgraph-io/ristretto v0.0.4-0.20200906165740-41ebdbffecfd h1:KoJOtZf+6wpQaDTuOWGuo61GxcPBIfhwRxRTaTWGCTc=
-github.com/dgraph-io/ristretto v0.0.4-0.20200906165740-41ebdbffecfd/go.mod h1:YylP9MpCYGVZQrly/j/diqcdUetCRRePeBB0c2VGXsA=
+github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI=
+github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
-github.com/dlclark/regexp2 v1.1.6/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac h1:opbrjaN/L8gg6Xh5D04Tem+8xVcz6ajZlGCs49mQgyg=
-github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
-github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -279,75 +184,57 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v0.3.0-java/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
-github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
-github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZUCkZIqFxsQf1o=
-github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw=
-github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
-github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
-github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.4.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-piv/piv-go v1.7.0/go.mod h1:ON2WvQncm7dIkCQ7kYJs+nc3V4jHGfrrJnSF8HKy7Gk=
-github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
-github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
+github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@@ -374,23 +261,21 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/cel-go v0.7.3 h1:8v9BSN0avuGwrHFKNCjfiQ/CE6+D6sW+BDyOVoEeP6o=
-github.com/google/cel-go v0.7.3/go.mod h1:4EtyFAHT5xNr0Msu0MJjyGxPUgdr9DlcaPyzLt/kkt8=
-github.com/google/cel-spec v0.5.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
-github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
-github.com/google/certificate-transparency-go v1.1.2-0.20210422104406-9f33727a7a18/go.mod h1:6CKh9dscIRoqc2kC6YUFICHZMT9NrClyPrRVFrdw1QQ=
-github.com/google/certificate-transparency-go v1.1.2-0.20210512142713-bed466244fa6/go.mod h1:aF2dp7Dh81mY8Y/zpzyXps4fQW5zQbDu2CxfpJB6NkI=
-github.com/google/certificate-transparency-go v1.1.2-0.20210623111010-a50f74f4ce95/go.mod h1:Qj+RD7dL44/KQVYkRk4wDVszkPOzxNcHmuX4HCMEqKg=
+github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
+github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/cel-go v0.15.1 h1:iTgVZor2x9okXtmTrqO8cg4uvqIeaBcWhXtruaWFMYQ=
+github.com/google/cel-go v0.15.1/go.mod h1:YzWEoI07MC/a/wj9in8GeVatqfypkldgBlwXh9bCwqY=
+github.com/google/certificate-transparency-go v1.1.4 h1:hCyXHDbtqlr/lMXU0D4WgbalXL0Zk4dSWWMbPV8VrqY=
+github.com/google/certificate-transparency-go v1.1.4/go.mod h1:D6lvbfwckhNrbM9WVl1EVeMOyzC19mpIjMOI4nxBHtQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -402,18 +287,17 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
-github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
-github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M=
-github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
-github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE=
-github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-tpm v0.3.3 h1:P/ZFNBZYXRxc+z7i5uyd8VP7MaDteuLZInzrH2idRGo=
+github.com/google/go-tpm v0.3.3/go.mod h1:9Hyn3rgnzWF9XBWVk6ml6A6hNkbWjNFlDQL51BeghL4=
+github.com/google/go-tpm-tools v0.3.12 h1:hpWglH4RaZnGVbgOK3IThI5K++jnFvjQ94EIN34xrUU=
+github.com/google/go-tpm-tools v0.3.12/go.mod h1:2OtmyPGPuaWWIOjr+IDhNQb6t5njjbSmZtzc350Q6Ro=
+github.com/google/go-tspi v0.3.0 h1:ADtq8RKfP+jrTyIWIZDIYcKOMecRqNJFOew2IT0Inus=
+github.com/google/go-tspi v0.3.0/go.mod h1:xfMGI3G0PhxCdNVcYr1C4C+EizojDg/TXuX5by8CiHI=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/licenseclassifier v0.0.0-20210325184830-bb04aff29e72/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
@@ -426,74 +310,50 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg=
-github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
-github.com/google/trillian v1.3.14-0.20210409160123-c5ea3abd4a41/go.mod h1:1dPv0CUjNQVFEDuAUFhZql16pw/VlPgaX8qj+g5pVzQ=
-github.com/google/trillian v1.3.14-0.20210511103300-67b5f349eefa/go.mod h1:s4jO3Ai4NSvxucdvqUHON0bCqJyoya32eNw6XJwsmNc=
-github.com/google/trillian v1.3.14-0.20210622121126-870e0cdde059/go.mod h1:77nhQ5M0g7nqL2S6sjQWUyqQ90h0X26T8cr0pQqqxec=
-github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
+github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s=
-github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
-github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww=
-github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
-github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
+github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
+github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
+github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
-github.com/goreleaser/goreleaser v0.134.0/go.mod h1:ZT6Y2rSYa6NxQzIsdfWWNWAlYGXGbreo66NmE+3X3WQ=
-github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w=
github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.4.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/groob/finalizer v0.0.0-20170707115354-4c2ed49aabda/go.mod h1:MyndkAZd5rUMdNogn35MWXBX1UiBigrU8eTj8DoAC2c=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
-github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
-github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
-github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
@@ -514,65 +374,92 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
-github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
-github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs=
-github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=
+github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo=
-github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
-github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik=
-github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
-github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
+github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
+github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
+github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
+github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
+github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
+github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
+github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
+github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
+github.com/jackc/pgconn v1.14.0 h1:vrbA9Ud87g6JdFWkHTJXppVce58qPIdP7N8y0Ml/A7Q=
+github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E=
+github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
+github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
+github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
+github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
+github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=
+github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
+github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0=
+github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
+github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
+github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
+github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
+github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
+github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw=
+github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
+github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
+github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
+github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
+github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
+github.com/jackc/pgx/v4 v4.18.0 h1:Ltaa1ePvc7msFGALnCrqKJVEByu/qYh5jJBYcDtAno4=
+github.com/jackc/pgx/v4 v4.18.0/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE=
+github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4=
-github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg=
-github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
-github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
-github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU=
-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=
-github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
-github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s=
-github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
-github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
+github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
+github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
+github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -580,108 +467,86 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
-github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
+github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis=
github.com/libdns/libdns v0.2.1/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
-github.com/lucas-clemente/quic-go v0.23.0 h1:5vFnKtZ6nHDFsc/F3uuiF4T3y/AXaQdxjUqiVw26GZE=
-github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0=
-github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
-github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8=
-github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
-github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/manifoldco/promptui v0.8.0 h1:R95mMF+McvXZQ7j1g8ucVZE1gLP3Sv6j9vlF9kyRqQo=
-github.com/manifoldco/promptui v0.8.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ=
-github.com/marten-seemann/qpack v0.2.1 h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs=
-github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
-github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I=
-github.com/marten-seemann/qtls-go1-16 v0.1.4 h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco=
-github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
-github.com/marten-seemann/qtls-go1-17 v0.1.0 h1:P9ggrs5xtwiqXv/FHNwntmuLMNq3KaSIG93AtAZ48xk=
-github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8=
+github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
+github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
+github.com/mastercactapus/proxyprotocol v0.0.4 h1:qSY75IZF30ZqIU9iW1ip3I7gTnm8wRAnGWqPxCBVgq0=
+github.com/mastercactapus/proxyprotocol v0.0.4/go.mod h1:X8FRVEDZz9FkrIoL4QYTBF4Ka4ELwTv0sah0/5NxCPw=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
-github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA=
-github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
-github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
-github.com/mholt/acmez v1.0.0 h1:ZAdWrilnq41HTlUO0vMJ6C+z8ZvzQ9I2LR1/Bo+137U=
-github.com/mholt/acmez v1.0.0/go.mod h1:8qnn8QA/Ewx8E3ZSsmscqsIjhhpxuy9vqdgbX2ceceM=
-github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
-github.com/micromdm/scep/v2 v2.0.0 h1:cRzcY0S5QX+0+J+7YC4P2uZSnfMup8S8zJu/bLFgOkA=
-github.com/micromdm/scep/v2 v2.0.0/go.mod h1:ouaDs5tcjOjdHD/h8BGaQsWE87MUnQ/wMTMgfMMIpPc=
+github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
+github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
+github.com/mholt/acmez v1.2.0 h1:1hhLxSgY5FvH5HCnGUuwbKY2VQVo8IU7rxXKSnZ7F30=
+github.com/mholt/acmez v1.2.0/go.mod h1:VT9YwH1xgNX1kmYY89gY8xPJC84BFAisjo8Egigt4kE=
+github.com/micromdm/scep/v2 v2.1.0 h1:2fS9Rla7qRR266hvUoEauBJ7J6FhgssEiq2OkSKXmaU=
+github.com/micromdm/scep/v2 v2.1.0/go.mod h1:BkF7TkPPhmgJAMtHfP+sFTKXmgzNJgLQlvvGoOExBcc=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
-github.com/miekg/dns v1.1.42/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
-github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
-github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
-github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
+github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
-github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
+github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
-github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
-github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
-github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo=
-github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc=
-github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
-github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
@@ -689,60 +554,36 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
-github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
-github.com/newrelic/go-agent v2.15.0+incompatible/go.mod h1:a8Fv1b/fYhFSReoTU6HDkTYIMZeSVNffmoS726Y0LzQ=
-github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM=
-github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
-github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/run v1.1.1-0.20200508094559-c7096881717e h1:bxQ+jj+8fdl9112bovUjD/14jj/uboMqjyVoFkqrdGg=
github.com/oklog/run v1.1.1-0.20200508094559-c7096881717e/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
-github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
-github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
-github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
+github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak=
-github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
+github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
+github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
-github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
-github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
-github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
-github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
-github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
-github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
+github.com/peterbourgon/diskv/v3 v3.0.1 h1:x06SQA46+PKIUftmEujdwSEpIx8kR+M9eLYsUxeYveU=
+github.com/peterbourgon/diskv/v3 v3.0.1/go.mod h1:kJ5Ny7vLdARGU3WUuy6uzO6T0nb/2gWcT1JiBvRmb5o=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -751,136 +592,100 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
-github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
-github.com/pquerna/otp v1.0.0/go.mod h1:Zad1CMQfSQZI5KLpahDiSUX4tMMREnXw98IvL1nhgMk=
-github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
-github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU=
-github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
-github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
+github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/pseudomuto/protoc-gen-doc v1.4.1/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg=
-github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
+github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
+github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
+github.com/quic-go/qtls-go1-20 v0.3.1 h1:O4BLOM3hwfVF3AcktIylQXyl7Yi2iBNVy5QsV+ySxbg=
+github.com/quic-go/qtls-go1-20 v0.3.1/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
+github.com/quic-go/quic-go v0.37.5 h1:pzkYe8AgaxHi+7KJrYBMF+u2rLO5a9kwyCp2dAsljzk=
+github.com/quic-go/quic-go v0.37.5/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
-github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
-github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
+github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
+github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
+github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
-github.com/samfoo/ansi v0.0.0-20160124022901-b6bd2ded7189 h1:CmSpbxmewNQbzqztaY0bke1qzHhyNyC29wYgh17Gxfo=
-github.com/samfoo/ansi v0.0.0-20160124022901-b6bd2ded7189/go.mod h1:UUwuHEJ9zkkPDxspIHOa59PUeSkGFljESGzbxntLmIg=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
-github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/schollz/jsonstore v1.1.0 h1:WZBDjgezFS34CHI+myb4s8GGpir3UMpy7vWoCeO0n6E=
+github.com/schollz/jsonstore v1.1.0/go.mod h1:15c6+9guw8vDRyozGjN3FoILt0wpruJk9Pi66vjaZfg=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
-github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
-github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
-github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
-github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
-github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
-github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
-github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
-github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
-github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
-github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
-github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
-github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
-github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
-github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
-github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
-github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
-github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
-github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
-github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
-github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
-github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smallstep/assert v0.0.0-20180720014142-de77670473b5/go.mod h1:TC9A4+RjIOS+HyTH7wG17/gSqVv95uDw2J64dQZx7RE=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/slackhq/nebula v1.6.1 h1:/OCTR3abj0Sbf2nGoLUrdDXImrCv0ZVFpVPP5qa0DsM=
+github.com/slackhq/nebula v1.6.1/go.mod h1:UmkqnXe4O53QwToSl/gG7sM4BroQwAB7dd4hUaT6MlI=
github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY=
github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc=
-github.com/smallstep/certificates v0.16.0/go.mod h1:oht6bnzBapjumPGXTZK/rBJYLO+O8/TTWt5/VlE9Wd4=
-github.com/smallstep/certificates v0.16.4 h1:/dhaR+6reeTHd2etVIjgpZI0CTn6USrhVqakoV0HZ0w=
-github.com/smallstep/certificates v0.16.4/go.mod h1:U3Dkt4ttxRxC4yPedzzAQokC121/7d3Sfnj6mNgpw7Q=
-github.com/smallstep/certinfo v1.5.1/go.mod h1:gA7HBbue0Wwr3kD60P2UtgTIFfMAOC66D3rzYhI0GZ4=
-github.com/smallstep/cli v0.16.1 h1:zFN/B5XF+WbvwfRya11SPfiT7g7LIMRSCaoeQNce3Hw=
-github.com/smallstep/cli v0.16.1/go.mod h1:C8IES4TcHN3/Va6x9B+ugJM1t0pwzICHAg+RB2FASg4=
-github.com/smallstep/nosql v0.3.6/go.mod h1:h1zC/Z54uNHc8euquLED4qJNCrMHd3nytA141ZZh4qQ=
-github.com/smallstep/nosql v0.3.8 h1:1/EWUbbEdz9ai0g9Fd09VekVjtxp+5+gIHpV2PdwW3o=
-github.com/smallstep/nosql v0.3.8/go.mod h1:X2qkYpNcW3yjLUvhEHfgGfClpKbFPapewvx7zo4TOFs=
-github.com/smallstep/truststore v0.9.6 h1:vNzEJmaJL0XOZD8uouXLmYu4/aP1UQ/wHUopH3qKeYA=
-github.com/smallstep/truststore v0.9.6/go.mod h1:HwHKRcBi0RUxxw1LYDpTRhYC4jZUuxPpkHdVonlkoDM=
-github.com/smallstep/zcrypto v0.0.0-20200203191936-fbc32cf76bce/go.mod h1:+F24VU3UCxfVFvvqgm5jNUFQOm/L6ed13ImwWGFgg/g=
-github.com/smallstep/zlint v0.0.0-20180727184541-d84eaafe274f/go.mod h1:GeHHT7sJDI9ti3oEaFnvx1F4N8n3ZSw2YM1+sbEoxc4=
+github.com/smallstep/certificates v0.24.3-rc.5 h1:l9N7NmFqW5it5UcDtbyZ4CrrvYYiHRM7Dj7Mk+YC1Io=
+github.com/smallstep/certificates v0.24.3-rc.5/go.mod h1:buhMLsuk9tp7JC1uHeN2sYQTXH1OzGn55erpzUIiOGo=
+github.com/smallstep/go-attestation v0.4.4-0.20230509120429-e17291421738 h1:h+cZgVniTaE0uuRMdxTThLaJeuxsv4aas6oStz6f5VQ=
+github.com/smallstep/go-attestation v0.4.4-0.20230509120429-e17291421738/go.mod h1:mk2hyNbyai1oon+ilW9t42BuBVw7ee8elDdgrPq4394=
+github.com/smallstep/nosql v0.6.0 h1:ur7ysI8s9st0cMXnTvB8tA3+x5Eifmkb6hl4uqNV5jc=
+github.com/smallstep/nosql v0.6.0/go.mod h1:jOXwLtockXORUPPZ2MCUcIkGR6w0cN1QGZniY9DITQA=
+github.com/smallstep/truststore v0.12.1 h1:guLUKkc1UlsXeS3t6BuVMa4leOOpdiv02PCRTiy1WdY=
+github.com/smallstep/truststore v0.12.1/go.mod h1:M4mebeNy28KusGX3lJxpLARIktLcyqBOrj3ZiZ46pqw=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
-github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
-github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
-github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
@@ -893,11 +698,8 @@ github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
-github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
-github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
@@ -906,12 +708,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk=
github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
-github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
@@ -920,43 +718,29 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
-github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU=
-github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
-github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
-github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
-github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4=
+github.com/tailscale/tscert v0.0.0-20230509043813-4e9cb4f2b4ad h1:JEOo9j4RzDPBJFTU9YZ/QPkLtfV8+6PbZFFOSUx5VP4=
+github.com/tailscale/tscert v0.0.0-20230509043813-4e9cb4f2b4ad/go.mod h1:kNGUQ3VESx3VZwRwA9MSCUegIl6+saPL8Noq82ozCaU=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
-github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU=
-github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
-github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
-github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k=
-github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
-github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
-github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
+github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk=
+github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -964,45 +748,24 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.3.6/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark-highlighting v0.0.0-20210516132338-9216f9c5aa01/go.mod h1:TwKQPa5XkCCRC2GRZ5wtfNUTQ2+9/i19mGRijFeJ4BE=
-github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
-github.com/zmap/rc2 v0.0.0-20190804163417-abaa70531248/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
-github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is=
-github.com/zmap/zcertificate v0.0.0-20190521191901-30e388164f71/go.mod h1:gIZi1KPgkZNUQzPZXsZrNnUnxy05nTc0+tmlqvIkhRw=
-github.com/zmap/zcrypto v0.0.0-20190329181646-dff83107394d/go.mod h1:ix3q2kpLy0ibAuFXlr7qOhPKwFRRSjuynGuTR8EUPCk=
-github.com/zmap/zlint v0.0.0-20190516161541-9047d02cf65a/go.mod h1:xwLbce0UzBXp44sIAL1cii+hoK8j4AxRKlymZA2AIcY=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
+github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg=
+github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ=
+github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
+github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
+github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
-go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
+go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.0-alpha.0/go.mod h1:kdV+xzCJ3luEBSIeQyB/OEKkWKd8Zkux4sbDeANrosU=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
-go.etcd.io/etcd/client/v3 v3.5.0-alpha.0/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8=
-go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
-go.etcd.io/etcd/etcdctl/v3 v3.5.0-alpha.0/go.mod h1:YPwSaBciV5G6Gpt435AasAG3ROetZsKNUzibRa/++oo=
-go.etcd.io/etcd/etcdctl/v3 v3.5.0/go.mod h1:vGTfKdsh87RI7kA2JHFBEGxjQEYx+pi299wqEOdi34M=
-go.etcd.io/etcd/etcdutl/v3 v3.5.0/go.mod h1:o98rKMCibbFAG8QS9KmvlYDGDShmmIbmRE8vSofzYNg=
-go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0/go.mod h1:tV31atvwzcybuqejDoY3oaNRTtlD2l/Ot78Pc9w7DMY=
-go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
-go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0/go.mod h1:FAwse6Zlm5v4tEWZaTjmNhe17Int4Oxbu7+2r0DiD3w=
-go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
-go.etcd.io/etcd/server/v3 v3.5.0-alpha.0/go.mod h1:tsKetYpt980ZTpzl/gb+UOJj9RkIyCb1u4wjzMg90BQ=
-go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
-go.etcd.io/etcd/tests/v3 v3.5.0-alpha.0/go.mod h1:HnrHxjyCuZ8YDt8PYVyQQ5d1ZQfzJVEtQWllr5Vp/30=
-go.etcd.io/etcd/tests/v3 v3.5.0/go.mod h1:f+mtZ1bE1YPvgKdOJV2BKy4JQW0nAFnQehgOE7+WyJE=
-go.etcd.io/etcd/v3 v3.5.0-alpha.0/go.mod h1:JZ79d3LV6NUfPjUxXrpiFAYcjhT+06qqw+i28snx8To=
-go.etcd.io/etcd/v3 v3.5.0/go.mod h1:FldM0/VzcxYWLvWx1sdA7ghKw7C3L2DvUTzGrcEtsC4=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
-go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
-go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
+go.mozilla.org/pkcs7 v0.0.0-20210730143726-725912489c62/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
+go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -1011,79 +774,57 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
-go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
-go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
-go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
-go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
-go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
-go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.step.sm/cli-utils v0.4.1 h1:QztRUhGYjOPM1I2Nmi7V6XejQyVtcESmo+sbegxvX7Q=
-go.step.sm/cli-utils v0.4.1/go.mod h1:hWYVOSlw8W9Pd+BwIbs/aftVVMRms3EG7Q2qLRwc0WA=
-go.step.sm/crypto v0.9.0 h1:q2AllTSnVj4NRtyEPkGW2ohArLmbGbe6ZAL/VIOKDzA=
-go.step.sm/crypto v0.9.0/go.mod h1:+CYG05Mek1YDqi5WK0ERc6cOpKly2i/a5aZmU1sfGj0=
-go.step.sm/linkedca v0.0.0-20210611183751-27424aae8d25 h1:ncJqviWswJT19IdnfOYQGKG1zL7IDy4lAJz1PuM3fgw=
-go.step.sm/linkedca v0.0.0-20210611183751-27424aae8d25/go.mod h1:5uTRjozEGSPAZal9xJqlaD38cvJcLe3o1VAFVjqcORo=
+go.step.sm/cli-utils v0.8.0 h1:b/Tc1/m3YuQq+u3ghTFP7Dz5zUekZj6GUmd5pCvkEXQ=
+go.step.sm/cli-utils v0.8.0/go.mod h1:S77aISrC0pKuflqiDfxxJlUbiXcAanyJ4POOnzFSxD4=
+go.step.sm/crypto v0.33.0 h1:fP8awo6YkZ0/rrLhzbHYA3U8g24VnWEebZRnGwUobRo=
+go.step.sm/crypto v0.33.0/go.mod h1:rMETKeIA1ZsLBiKT6phQ2IIeBH3GL+XqimeobcqUw1g=
+go.step.sm/linkedca v0.20.0 h1:bH41rvyDm3nSSJ5xgGsKUZOpzJcq5x2zacMIeqtq9oI=
+go.step.sm/linkedca v0.20.0/go.mod h1:eybHw6ZTpuFmkUQnTBRWM2SPIGaP0VbYeo1bupfPT70=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
-go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
+go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
-go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
-go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
-go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
-go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
-go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
-gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI=
-golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
-golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c=
+go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
-golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
+golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
+golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1094,10 +835,10 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
+golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 h1:LGJsf5LRplCck6jUCH3dBL2dmycNruWNF5xugkSlfXw=
+golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1120,39 +861,32 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
+golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20170726083632-f5079bd7f6f7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190301231341-16b79f2e4e95/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1160,11 +894,9 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@@ -1172,92 +904,81 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
-golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
+golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
+golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20170728174421-0f826bdd13b5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181128092732-4ed8d59d0b35/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1272,38 +993,27 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1313,11 +1023,24 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365 h1:6wSTsvPddg9gc/mVEEyk9oOAoxn+bT4Z9q1zx+4RwA4=
-golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210503060354-a79de5458b56 h1:b8jxX3zqjpqb2LklXPzKSGJhzyxCOZSz8ncv8Nv+y7w=
-golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1325,45 +1048,41 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191118222007-07fc4c7f2b98/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1381,49 +1100,40 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201014170642-d1624618ad65/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg=
+golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM=
+golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
@@ -1438,47 +1148,32 @@ google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSr
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.37.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
-google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA=
-google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
-google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
-google.golang.org/api v0.57.0 h1:4t9zuDlHLcIx0ZEhmXEeFVCRsiOgpgn2QOH9N0MNjPI=
-google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc=
+google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
-google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
@@ -1495,7 +1190,6 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -1506,22 +1200,15 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210331142528-b7513248f0ba/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210427215850-f767ed18ee4d/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
@@ -1534,13 +1221,12 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6 h1:2ncG/LajxmrclaZH+ppVi02rQxz4eXYJzGHdFN4Y9UA=
-google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
+google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8=
+google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y=
+google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU=
+google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@@ -1559,7 +1245,6 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
@@ -1571,8 +1256,9 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
+google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -1584,52 +1270,45 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
-gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c=
gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
-gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
-gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
-gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
+gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -1638,17 +1317,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
-howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
+howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
+howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
inet.af/tcpproxy v0.0.0-20210824174053-2e577fef49e2 h1:5dsmluHaw3te6yeluBq4oe2VcZq3tljF8l661Chwzwc=
inet.af/tcpproxy v0.0.0-20210824174053-2e577fef49e2/go.mod h1:Tojt5kmHpDIR2jMojxzZK2w2ZR7OILODmUo2gaSwjrk=
-pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
-software.sslmate.com/src/go-pkcs12 v0.0.0-20201103104416-57fc603b7f52/go.mod h1:/xvNRWUqm0+/ZMiF4EX00vrSCMsE4/NHb+Pt3freEeQ=
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
-sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
-sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/server/server_test.go b/server/server_test.go
index 2bcdf769..1e12b6fc 100644
--- a/server/server_test.go
+++ b/server/server_test.go
@@ -3,7 +3,7 @@ package server
import (
"context"
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"os"
@@ -14,6 +14,8 @@ import (
"time"
"github.com/google/go-cmp/cmp"
+ "github.com/owenthereal/candy"
+ "go.uber.org/zap"
)
func Test_Server(t *testing.T) {
@@ -26,7 +28,7 @@ func Test_Server(t *testing.T) {
tlds = []string{"go-test"}
)
- if err := ioutil.WriteFile(filepath.Join(hostRoot, "app"), []byte(adminAddr), 0o644); err != nil {
+ if err := os.WriteFile(filepath.Join(hostRoot, "app"), []byte(adminAddr), 0o644); err != nil {
t.Fatal(err)
}
@@ -39,8 +41,17 @@ func Test_Server(t *testing.T) {
DnsAddr: dnsAddr,
})
errch := make(chan error)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
go func() {
- errch <- svr.Run(context.Background())
+ err := svr.Run(ctx)
+ if err != nil {
+ candy.Log().Error("error running server", zap.Error(err))
+ }
+
+ errch <- err
}()
t.Run("http addr", func(t *testing.T) {
@@ -50,7 +61,7 @@ func Test_Server(t *testing.T) {
return err
}
- b, err := ioutil.ReadAll(resp.Body)
+ b, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
@@ -72,7 +83,7 @@ func Test_Server(t *testing.T) {
return err
}
- b, err := ioutil.ReadAll(resp.Body)
+ b, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
@@ -94,7 +105,7 @@ func Test_Server(t *testing.T) {
return nil
}
- b, err := ioutil.ReadAll(resp.Body)
+ b, err := io.ReadAll(resp.Body)
if err != nil {
return nil
}
@@ -129,33 +140,33 @@ func Test_Server(t *testing.T) {
})
t.Run("add new domain", func(t *testing.T) {
- if err := ioutil.WriteFile(filepath.Join(hostRoot, "app2"), []byte(adminAddr), 0o644); err != nil {
+ if err := os.WriteFile(filepath.Join(hostRoot, "app2"), []byte(adminAddr), 0o644); err != nil {
t.Fatal(err)
}
- r := &net.Resolver{
- PreferGo: true,
- Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
- return net.Dial("udp", dnsAddr)
- },
- }
+ waitUntil(t, 3, func() error {
+ r := &net.Resolver{
+ PreferGo: true,
+ Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
+ return net.Dial("udp", dnsAddr)
+ },
+ }
- ips, err := r.LookupHost(context.Background(), "app2.go-test")
- if err != nil {
- t.Fatal(err)
- }
+ ips, err := r.LookupHost(context.Background(), "app2.go-test")
+ if err != nil {
+ t.Fatal(err)
+ }
- if diff := cmp.Diff([]string{"127.0.0.1"}, ips); diff != "" {
- t.Fatalf("Unexpected IPs (-want +got): %s", diff)
- }
+ if diff := cmp.Diff([]string{"127.0.0.1"}, ips); diff != "" {
+ t.Fatalf("Unexpected IPs (-want +got): %s", diff)
+ }
- waitUntil(t, 3, func() error {
resp, err := http.Get(fmt.Sprintf("http://%s/config/apps/tls/automation/policies/0/subjects", adminAddr))
if err != nil {
return nil
}
- b, err := ioutil.ReadAll(resp.Body)
+ b, err := io.ReadAll(resp.Body)
if err != nil {
return nil
}
@@ -214,12 +225,12 @@ func Test_Server_Shutdown(t *testing.T) {
Config: Config{
HostRoot: hostRoot,
Domain: tlds,
- HttpAddr: "invalid-addr",
+ HttpAddr: "",
HttpsAddr: randomAddr(t),
- AdminAddr: "", // TODO: running into caddy race issue with `go test -race` when replacing admin server. Disabling admin server for this and report upstream.
+ AdminAddr: randomAddr(t),
DnsAddr: randomAddr(t),
},
- WantErrMsg: "address invalid-addr: missing port in address",
+ WantErrMsg: "loading new config: loading http app module: http: invalid configuration: invalid listener address '': missing port in address",
},
{
Name: "invalid admin addr",
@@ -231,31 +242,38 @@ func Test_Server_Shutdown(t *testing.T) {
AdminAddr: "invalid-addr",
DnsAddr: randomAddr(t),
},
- WantErrMsg: "address invalid-addr: missing port in address",
+ WantErrMsg: "loading new config: starting caddy administration endpoint: listen tcp: lookup invalid-addr",
},
{
Name: "invalid host root",
Config: Config{
- HostRoot: "invalid-host-root",
+ HostRoot: "/tmp/invalid-host-root",
Domain: tlds,
HttpAddr: randomAddr(t),
HttpsAddr: randomAddr(t),
AdminAddr: randomAddr(t),
DnsAddr: randomAddr(t),
},
- WantErrMsg: "invalid-host-root: no such file or directory",
+ WantErrMsg: "no such file or directory",
},
}
for _, c := range cases {
c := c
t.Run(c.Name, func(t *testing.T) {
- //t.Parallel()
-
errch := make(chan error)
srv := New(c.Config)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
go func() {
- errch <- srv.Run(context.Background())
+ err := srv.Run(ctx)
+ if err != nil {
+ candy.Log().Error("error running server", zap.Error(err))
+ }
+
+ errch <- err
}()
select {
@@ -275,7 +293,7 @@ func randomAddr(t *testing.T) string {
}
func randomPort(t *testing.T) string {
- listener, err := net.Listen("tcp", ":0")
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/github.com/AndreasBriese/bbloom/.travis.yml b/vendor/github.com/AndreasBriese/bbloom/.travis.yml
deleted file mode 100644
index 4f2ee4d9..00000000
--- a/vendor/github.com/AndreasBriese/bbloom/.travis.yml
+++ /dev/null
@@ -1 +0,0 @@
-language: go
diff --git a/vendor/github.com/AndreasBriese/bbloom/LICENSE b/vendor/github.com/AndreasBriese/bbloom/LICENSE
deleted file mode 100644
index 4b20050e..00000000
--- a/vendor/github.com/AndreasBriese/bbloom/LICENSE
+++ /dev/null
@@ -1,35 +0,0 @@
-bbloom.go
-
-// The MIT License (MIT)
-// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
-
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-// the Software, and to permit persons to whom the Software is furnished to do so,
-// subject to the following conditions:
-
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-siphash.go
-
-// https://github.com/dchest/siphash
-//
-// Written in 2012 by Dmitry Chestnykh.
-//
-// To the extent possible under law, the author have dedicated all copyright
-// and related and neighboring rights to this software to the public domain
-// worldwide. This software is distributed without any warranty.
-// http://creativecommons.org/publicdomain/zero/1.0/
-//
-// Package siphash implements SipHash-2-4, a fast short-input PRF
-// created by Jean-Philippe Aumasson and Daniel J. Bernstein.
diff --git a/vendor/github.com/AndreasBriese/bbloom/README.md b/vendor/github.com/AndreasBriese/bbloom/README.md
deleted file mode 100644
index d7413c33..00000000
--- a/vendor/github.com/AndreasBriese/bbloom/README.md
+++ /dev/null
@@ -1,131 +0,0 @@
-## bbloom: a bitset Bloom filter for go/golang
-===
-
-[![Build Status](https://travis-ci.org/AndreasBriese/bbloom.png?branch=master)](http://travis-ci.org/AndreasBriese/bbloom)
-
-package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter.
-
-NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom
-
-===
-
-changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache.
-
-This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html".
-Nonetheless bbloom should work with any other form of entries.
-
-~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~
-
-Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash )
-
-Minimum hashset size is: 512 ([4]uint64; will be set automatically).
-
-###install
-
-```sh
-go get github.com/AndreasBriese/bbloom
-```
-
-###test
-+ change to folder ../bbloom
-+ create wordlist in file "words.txt" (you might use `python permut.py`)
-+ run 'go test -bench=.' within the folder
-
-```go
-go test -bench=.
-```
-
-~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~
-
-using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively)
-
-### usage
-
-after installation add
-
-```go
-import (
- ...
- "github.com/AndreasBriese/bbloom"
- ...
- )
-```
-
-at your header. In the program use
-
-```go
-// create a bloom filter for 65536 items and 1 % wrong-positive ratio
-bf := bbloom.New(float64(1<<16), float64(0.01))
-
-// or
-// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly
-// bf = bbloom.New(float64(650000), float64(7))
-// or
-bf = bbloom.New(650000.0, 7.0)
-
-// add one item
-bf.Add([]byte("butter"))
-
-// Number of elements added is exposed now
-// Note: ElemNum will not be included in JSON export (for compatability to older version)
-nOfElementsInFilter := bf.ElemNum
-
-// check if item is in the filter
-isIn := bf.Has([]byte("butter")) // should be true
-isNotIn := bf.Has([]byte("Butter")) // should be false
-
-// 'add only if item is new' to the bloomfilter
-added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set
-added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new
-
-// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS
-// add one item
-bf.AddTS([]byte("peanutbutter"))
-// check if item is in the filter
-isIn = bf.HasTS([]byte("peanutbutter")) // should be true
-isNotIn = bf.HasTS([]byte("peanutButter")) // should be false
-// 'add only if item is new' to the bloomfilter
-added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set
-added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new
-
-// convert to JSON ([]byte)
-Json := bf.JSONMarshal()
-
-// bloomfilters Mutex is exposed for external un-/locking
-// i.e. mutex lock while doing JSON conversion
-bf.Mtx.Lock()
-Json = bf.JSONMarshal()
-bf.Mtx.Unlock()
-
-// restore a bloom filter from storage
-bfNew := bbloom.JSONUnmarshal(Json)
-
-isInNew := bfNew.Has([]byte("butter")) // should be true
-isNotInNew := bfNew.Has([]byte("Butter")) // should be false
-
-```
-
-to work with the bloom filter.
-
-### why 'fast'?
-
-It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint:
-
-
- Bloom filter (filter size 524288, 7 hashlocs)
- github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op)
- github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op)
- github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op)
- github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op)
-
- github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op)
- github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op)
- github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op)
- github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op)
- github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op)
- github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op)
-
-(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz)
-
-
-With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions.
diff --git a/vendor/github.com/AndreasBriese/bbloom/bbloom.go b/vendor/github.com/AndreasBriese/bbloom/bbloom.go
deleted file mode 100644
index c36948fc..00000000
--- a/vendor/github.com/AndreasBriese/bbloom/bbloom.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// The MIT License (MIT)
-// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
-
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-// the Software, and to permit persons to whom the Software is furnished to do so,
-// subject to the following conditions:
-
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-// 2019/08/25 code revision to reduce unsafe use
-// Parts are adopted from the fork at ipfs/bbloom after performance rev by
-// Steve Allen (https://github.com/Stebalien)
-// (see https://github.com/ipfs/bbloom/blob/master/bbloom.go)
-// -> func Has
-// -> func set
-// -> func add
-
-package bbloom
-
-import (
- "bytes"
- "encoding/json"
- "log"
- "math"
- "sync"
- "unsafe"
-)
-
-// helper
-// not needed anymore by Set
-// var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128}
-
-func getSize(ui64 uint64) (size uint64, exponent uint64) {
- if ui64 < uint64(512) {
- ui64 = uint64(512)
- }
- size = uint64(1)
- for size < ui64 {
- size <<= 1
- exponent++
- }
- return size, exponent
-}
-
-func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) {
- size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2)
- locs := math.Ceil(float64(0.69314718056) * size / numEntries)
- return uint64(size), uint64(locs)
-}
-
-// New
-// returns a new bloomfilter
-func New(params ...float64) (bloomfilter Bloom) {
- var entries, locs uint64
- if len(params) == 2 {
- if params[1] < 1 {
- entries, locs = calcSizeByWrongPositives(params[0], params[1])
- } else {
- entries, locs = uint64(params[0]), uint64(params[1])
- }
- } else {
- log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))")
- }
- size, exponent := getSize(uint64(entries))
- bloomfilter = Bloom{
- Mtx: &sync.Mutex{},
- sizeExp: exponent,
- size: size - 1,
- setLocs: locs,
- shift: 64 - exponent,
- }
- bloomfilter.Size(size)
- return bloomfilter
-}
-
-// NewWithBoolset
-// takes a []byte slice and number of locs per entry
-// returns the bloomfilter with a bitset populated according to the input []byte
-func NewWithBoolset(bs *[]byte, locs uint64) (bloomfilter Bloom) {
- bloomfilter = New(float64(len(*bs)<<3), float64(locs))
- for i, b := range *bs {
- *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bloomfilter.bitset[0])) + uintptr(i))) = b
- }
- return bloomfilter
-}
-
-// bloomJSONImExport
-// Im/Export structure used by JSONMarshal / JSONUnmarshal
-type bloomJSONImExport struct {
- FilterSet []byte
- SetLocs uint64
-}
-
-// JSONUnmarshal
-// takes JSON-Object (type bloomJSONImExport) as []bytes
-// returns Bloom object
-func JSONUnmarshal(dbData []byte) Bloom {
- bloomImEx := bloomJSONImExport{}
- json.Unmarshal(dbData, &bloomImEx)
- buf := bytes.NewBuffer(bloomImEx.FilterSet)
- bs := buf.Bytes()
- bf := NewWithBoolset(&bs, bloomImEx.SetLocs)
- return bf
-}
-
-//
-// Bloom filter
-type Bloom struct {
- Mtx *sync.Mutex
- ElemNum uint64
- bitset []uint64
- sizeExp uint64
- size uint64
- setLocs uint64
- shift uint64
-}
-
-// <--- http://www.cse.yorku.ca/~oz/hash.html
-// modified Berkeley DB Hash (32bit)
-// hash is casted to l, h = 16bit fragments
-// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) {
-// hash := uint64(len(*b))
-// for _, c := range *b {
-// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash
-// }
-// h = hash >> bl.shift
-// l = hash << bl.shift >> bl.shift
-// return l, h
-// }
-
-// Update: found sipHash of Jean-Philippe Aumasson & Daniel J. Bernstein to be even faster than absdbm()
-// https://131002.net/siphash/
-// siphash was implemented for Go by Dmitry Chestnykh https://github.com/dchest/siphash
-
-// Add
-// set the bit(s) for entry; Adds an entry to the Bloom filter
-func (bl *Bloom) Add(entry []byte) {
- l, h := bl.sipHash(entry)
- for i := uint64(0); i < bl.setLocs; i++ {
- bl.set((h + i*l) & bl.size)
- bl.ElemNum++
- }
-}
-
-// AddTS
-// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
-func (bl *Bloom) AddTS(entry []byte) {
- bl.Mtx.Lock()
- defer bl.Mtx.Unlock()
- bl.Add(entry)
-}
-
-// Has
-// check if bit(s) for entry is/are set
-// returns true if the entry was added to the Bloom Filter
-func (bl Bloom) Has(entry []byte) bool {
- l, h := bl.sipHash(entry)
- res := true
- for i := uint64(0); i < bl.setLocs; i++ {
- res = res && bl.isSet((h+i*l)&bl.size)
- // https://github.com/ipfs/bbloom/commit/84e8303a9bfb37b2658b85982921d15bbb0fecff
- // // Branching here (early escape) is not worth it
- // // This is my conclusion from benchmarks
- // // (prevents loop unrolling)
- // switch bl.IsSet((h + i*l) & bl.size) {
- // case false:
- // return false
- // }
- }
- return res
-}
-
-// HasTS
-// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
-func (bl *Bloom) HasTS(entry []byte) bool {
- bl.Mtx.Lock()
- defer bl.Mtx.Unlock()
- return bl.Has(entry)
-}
-
-// AddIfNotHas
-// Only Add entry if it's not present in the bloomfilter
-// returns true if entry was added
-// returns false if entry was allready registered in the bloomfilter
-func (bl Bloom) AddIfNotHas(entry []byte) (added bool) {
- if bl.Has(entry) {
- return added
- }
- bl.Add(entry)
- return true
-}
-
-// AddIfNotHasTS
-// Tread safe: Only Add entry if it's not present in the bloomfilter
-// returns true if entry was added
-// returns false if entry was allready registered in the bloomfilter
-func (bl *Bloom) AddIfNotHasTS(entry []byte) (added bool) {
- bl.Mtx.Lock()
- defer bl.Mtx.Unlock()
- return bl.AddIfNotHas(entry)
-}
-
-// Size
-// make Bloom filter with as bitset of size sz
-func (bl *Bloom) Size(sz uint64) {
- bl.bitset = make([]uint64, sz>>6)
-}
-
-// Clear
-// resets the Bloom filter
-func (bl *Bloom) Clear() {
- bs := bl.bitset
- for i := range bs {
- bs[i] = 0
- }
-}
-
-// Set
-// set the bit[idx] of bitsit
-func (bl *Bloom) set(idx uint64) {
- // ommit unsafe
- // *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))) |= mask[idx%8]
- bl.bitset[idx>>6] |= 1 << (idx % 64)
-}
-
-// IsSet
-// check if bit[idx] of bitset is set
-// returns true/false
-func (bl *Bloom) isSet(idx uint64) bool {
- // ommit unsafe
- // return (((*(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)))) >> (idx % 8)) & 1) == 1
- return bl.bitset[idx>>6]&(1<<(idx%64)) != 0
-}
-
-// JSONMarshal
-// returns JSON-object (type bloomJSONImExport) as []byte
-func (bl Bloom) JSONMarshal() []byte {
- bloomImEx := bloomJSONImExport{}
- bloomImEx.SetLocs = uint64(bl.setLocs)
- bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3)
- for i := range bloomImEx.FilterSet {
- bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[0])) + uintptr(i)))
- }
- data, err := json.Marshal(bloomImEx)
- if err != nil {
- log.Fatal("json.Marshal failed: ", err)
- }
- return data
-}
-
-// // alternative hashFn
-// func (bl Bloom) fnv64a(b *[]byte) (l, h uint64) {
-// h64 := fnv.New64a()
-// h64.Write(*b)
-// hash := h64.Sum64()
-// h = hash >> 32
-// l = hash << 32 >> 32
-// return l, h
-// }
-//
-// // <-- http://partow.net/programming/hashfunctions/index.html
-// // citation: An algorithm proposed by Donald E. Knuth in The Art Of Computer Programming Volume 3,
-// // under the topic of sorting and search chapter 6.4.
-// // modified to fit with boolset-length
-// func (bl Bloom) DEKHash(b *[]byte) (l, h uint64) {
-// hash := uint64(len(*b))
-// for _, c := range *b {
-// hash = ((hash << 5) ^ (hash >> bl.shift)) ^ uint64(c)
-// }
-// h = hash >> bl.shift
-// l = hash << bl.sizeExp >> bl.sizeExp
-// return l, h
-// }
diff --git a/vendor/github.com/AndreasBriese/bbloom/sipHash.go b/vendor/github.com/AndreasBriese/bbloom/sipHash.go
deleted file mode 100644
index a91d8199..00000000
--- a/vendor/github.com/AndreasBriese/bbloom/sipHash.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// Written in 2012 by Dmitry Chestnykh.
-//
-// To the extent possible under law, the author have dedicated all copyright
-// and related and neighboring rights to this software to the public domain
-// worldwide. This software is distributed without any warranty.
-// http://creativecommons.org/publicdomain/zero/1.0/
-//
-// Package siphash implements SipHash-2-4, a fast short-input PRF
-// created by Jean-Philippe Aumasson and Daniel J. Bernstein.
-
-package bbloom
-
-// Hash returns the 64-bit SipHash-2-4 of the given byte slice with two 64-bit
-// parts of 128-bit key: k0 and k1.
-func (bl Bloom) sipHash(p []byte) (l, h uint64) {
- // Initialization.
- v0 := uint64(8317987320269560794) // k0 ^ 0x736f6d6570736575
- v1 := uint64(7237128889637516672) // k1 ^ 0x646f72616e646f6d
- v2 := uint64(7816392314733513934) // k0 ^ 0x6c7967656e657261
- v3 := uint64(8387220255325274014) // k1 ^ 0x7465646279746573
- t := uint64(len(p)) << 56
-
- // Compression.
- for len(p) >= 8 {
-
- m := uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 |
- uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
-
- v3 ^= m
-
- // Round 1.
- v0 += v1
- v1 = v1<<13 | v1>>51
- v1 ^= v0
- v0 = v0<<32 | v0>>32
-
- v2 += v3
- v3 = v3<<16 | v3>>48
- v3 ^= v2
-
- v0 += v3
- v3 = v3<<21 | v3>>43
- v3 ^= v0
-
- v2 += v1
- v1 = v1<<17 | v1>>47
- v1 ^= v2
- v2 = v2<<32 | v2>>32
-
- // Round 2.
- v0 += v1
- v1 = v1<<13 | v1>>51
- v1 ^= v0
- v0 = v0<<32 | v0>>32
-
- v2 += v3
- v3 = v3<<16 | v3>>48
- v3 ^= v2
-
- v0 += v3
- v3 = v3<<21 | v3>>43
- v3 ^= v0
-
- v2 += v1
- v1 = v1<<17 | v1>>47
- v1 ^= v2
- v2 = v2<<32 | v2>>32
-
- v0 ^= m
- p = p[8:]
- }
-
- // Compress last block.
- switch len(p) {
- case 7:
- t |= uint64(p[6]) << 48
- fallthrough
- case 6:
- t |= uint64(p[5]) << 40
- fallthrough
- case 5:
- t |= uint64(p[4]) << 32
- fallthrough
- case 4:
- t |= uint64(p[3]) << 24
- fallthrough
- case 3:
- t |= uint64(p[2]) << 16
- fallthrough
- case 2:
- t |= uint64(p[1]) << 8
- fallthrough
- case 1:
- t |= uint64(p[0])
- }
-
- v3 ^= t
-
- // Round 1.
- v0 += v1
- v1 = v1<<13 | v1>>51
- v1 ^= v0
- v0 = v0<<32 | v0>>32
-
- v2 += v3
- v3 = v3<<16 | v3>>48
- v3 ^= v2
-
- v0 += v3
- v3 = v3<<21 | v3>>43
- v3 ^= v0
-
- v2 += v1
- v1 = v1<<17 | v1>>47
- v1 ^= v2
- v2 = v2<<32 | v2>>32
-
- // Round 2.
- v0 += v1
- v1 = v1<<13 | v1>>51
- v1 ^= v0
- v0 = v0<<32 | v0>>32
-
- v2 += v3
- v3 = v3<<16 | v3>>48
- v3 ^= v2
-
- v0 += v3
- v3 = v3<<21 | v3>>43
- v3 ^= v0
-
- v2 += v1
- v1 = v1<<17 | v1>>47
- v1 ^= v2
- v2 = v2<<32 | v2>>32
-
- v0 ^= t
-
- // Finalization.
- v2 ^= 0xff
-
- // Round 1.
- v0 += v1
- v1 = v1<<13 | v1>>51
- v1 ^= v0
- v0 = v0<<32 | v0>>32
-
- v2 += v3
- v3 = v3<<16 | v3>>48
- v3 ^= v2
-
- v0 += v3
- v3 = v3<<21 | v3>>43
- v3 ^= v0
-
- v2 += v1
- v1 = v1<<17 | v1>>47
- v1 ^= v2
- v2 = v2<<32 | v2>>32
-
- // Round 2.
- v0 += v1
- v1 = v1<<13 | v1>>51
- v1 ^= v0
- v0 = v0<<32 | v0>>32
-
- v2 += v3
- v3 = v3<<16 | v3>>48
- v3 ^= v2
-
- v0 += v3
- v3 = v3<<21 | v3>>43
- v3 ^= v0
-
- v2 += v1
- v1 = v1<<17 | v1>>47
- v1 ^= v2
- v2 = v2<<32 | v2>>32
-
- // Round 3.
- v0 += v1
- v1 = v1<<13 | v1>>51
- v1 ^= v0
- v0 = v0<<32 | v0>>32
-
- v2 += v3
- v3 = v3<<16 | v3>>48
- v3 ^= v2
-
- v0 += v3
- v3 = v3<<21 | v3>>43
- v3 ^= v0
-
- v2 += v1
- v1 = v1<<17 | v1>>47
- v1 ^= v2
- v2 = v2<<32 | v2>>32
-
- // Round 4.
- v0 += v1
- v1 = v1<<13 | v1>>51
- v1 ^= v0
- v0 = v0<<32 | v0>>32
-
- v2 += v3
- v3 = v3<<16 | v3>>48
- v3 ^= v2
-
- v0 += v3
- v3 = v3<<21 | v3>>43
- v3 ^= v0
-
- v2 += v1
- v1 = v1<<17 | v1>>47
- v1 ^= v2
- v2 = v2<<32 | v2>>32
-
- // return v0 ^ v1 ^ v2 ^ v3
-
- hash := v0 ^ v1 ^ v2 ^ v3
- h = hash >> bl.shift
- l = hash << bl.shift >> bl.shift
- return l, h
-
-}
diff --git a/vendor/github.com/AndreasBriese/bbloom/words.txt b/vendor/github.com/AndreasBriese/bbloom/words.txt
deleted file mode 100644
index ad86a31a..00000000
--- a/vendor/github.com/AndreasBriese/bbloom/words.txt
+++ /dev/null
@@ -1,140 +0,0 @@
-2014/01/01 00:00:00 /info.html
-2014/01/01 00:00:00 /info.html
-2014/01/01 00:00:01 /info.html
-2014/01/01 00:00:02 /info.html
-2014/01/01 00:00:03 /info.html
-2014/01/01 00:00:04 /info.html
-2014/01/01 00:00:05 /info.html
-2014/01/01 00:00:06 /info.html
-2014/01/01 00:00:07 /info.html
-2014/01/01 00:00:08 /info.html
-2014/01/01 00:00:09 /info.html
-2014/01/01 00:00:10 /info.html
-2014/01/01 00:00:11 /info.html
-2014/01/01 00:00:12 /info.html
-2014/01/01 00:00:13 /info.html
-2014/01/01 00:00:14 /info.html
-2014/01/01 00:00:15 /info.html
-2014/01/01 00:00:16 /info.html
-2014/01/01 00:00:17 /info.html
-2014/01/01 00:00:18 /info.html
-2014/01/01 00:00:19 /info.html
-2014/01/01 00:00:20 /info.html
-2014/01/01 00:00:21 /info.html
-2014/01/01 00:00:22 /info.html
-2014/01/01 00:00:23 /info.html
-2014/01/01 00:00:24 /info.html
-2014/01/01 00:00:25 /info.html
-2014/01/01 00:00:26 /info.html
-2014/01/01 00:00:27 /info.html
-2014/01/01 00:00:28 /info.html
-2014/01/01 00:00:29 /info.html
-2014/01/01 00:00:30 /info.html
-2014/01/01 00:00:31 /info.html
-2014/01/01 00:00:32 /info.html
-2014/01/01 00:00:33 /info.html
-2014/01/01 00:00:34 /info.html
-2014/01/01 00:00:35 /info.html
-2014/01/01 00:00:36 /info.html
-2014/01/01 00:00:37 /info.html
-2014/01/01 00:00:38 /info.html
-2014/01/01 00:00:39 /info.html
-2014/01/01 00:00:40 /info.html
-2014/01/01 00:00:41 /info.html
-2014/01/01 00:00:42 /info.html
-2014/01/01 00:00:43 /info.html
-2014/01/01 00:00:44 /info.html
-2014/01/01 00:00:45 /info.html
-2014/01/01 00:00:46 /info.html
-2014/01/01 00:00:47 /info.html
-2014/01/01 00:00:48 /info.html
-2014/01/01 00:00:49 /info.html
-2014/01/01 00:00:50 /info.html
-2014/01/01 00:00:51 /info.html
-2014/01/01 00:00:52 /info.html
-2014/01/01 00:00:53 /info.html
-2014/01/01 00:00:54 /info.html
-2014/01/01 00:00:55 /info.html
-2014/01/01 00:00:56 /info.html
-2014/01/01 00:00:57 /info.html
-2014/01/01 00:00:58 /info.html
-2014/01/01 00:00:59 /info.html
-2014/01/01 00:01:00 /info.html
-2014/01/01 00:01:01 /info.html
-2014/01/01 00:01:02 /info.html
-2014/01/01 00:01:03 /info.html
-2014/01/01 00:01:04 /info.html
-2014/01/01 00:01:05 /info.html
-2014/01/01 00:01:06 /info.html
-2014/01/01 00:01:07 /info.html
-2014/01/01 00:01:08 /info.html
-2014/01/01 00:01:09 /info.html
-2014/01/01 00:01:10 /info.html
-2014/01/01 00:01:11 /info.html
-2014/01/01 00:01:12 /info.html
-2014/01/01 00:01:13 /info.html
-2014/01/01 00:01:14 /info.html
-2014/01/01 00:01:15 /info.html
-2014/01/01 00:01:16 /info.html
-2014/01/01 00:01:17 /info.html
-2014/01/01 00:01:18 /info.html
-2014/01/01 00:01:19 /info.html
-2014/01/01 00:01:20 /info.html
-2014/01/01 00:01:21 /info.html
-2014/01/01 00:01:22 /info.html
-2014/01/01 00:01:23 /info.html
-2014/01/01 00:01:24 /info.html
-2014/01/01 00:01:25 /info.html
-2014/01/01 00:01:26 /info.html
-2014/01/01 00:01:27 /info.html
-2014/01/01 00:01:28 /info.html
-2014/01/01 00:01:29 /info.html
-2014/01/01 00:01:30 /info.html
-2014/01/01 00:01:31 /info.html
-2014/01/01 00:01:32 /info.html
-2014/01/01 00:01:33 /info.html
-2014/01/01 00:01:34 /info.html
-2014/01/01 00:01:35 /info.html
-2014/01/01 00:01:36 /info.html
-2014/01/01 00:01:37 /info.html
-2014/01/01 00:01:38 /info.html
-2014/01/01 00:01:39 /info.html
-2014/01/01 00:01:40 /info.html
-2014/01/01 00:01:41 /info.html
-2014/01/01 00:01:42 /info.html
-2014/01/01 00:01:43 /info.html
-2014/01/01 00:01:44 /info.html
-2014/01/01 00:01:45 /info.html
-2014/01/01 00:01:46 /info.html
-2014/01/01 00:01:47 /info.html
-2014/01/01 00:01:48 /info.html
-2014/01/01 00:01:49 /info.html
-2014/01/01 00:01:50 /info.html
-2014/01/01 00:01:51 /info.html
-2014/01/01 00:01:52 /info.html
-2014/01/01 00:01:53 /info.html
-2014/01/01 00:01:54 /info.html
-2014/01/01 00:01:55 /info.html
-2014/01/01 00:01:56 /info.html
-2014/01/01 00:01:57 /info.html
-2014/01/01 00:01:58 /info.html
-2014/01/01 00:01:59 /info.html
-2014/01/01 00:02:00 /info.html
-2014/01/01 00:02:01 /info.html
-2014/01/01 00:02:02 /info.html
-2014/01/01 00:02:03 /info.html
-2014/01/01 00:02:04 /info.html
-2014/01/01 00:02:05 /info.html
-2014/01/01 00:02:06 /info.html
-2014/01/01 00:02:07 /info.html
-2014/01/01 00:02:08 /info.html
-2014/01/01 00:02:09 /info.html
-2014/01/01 00:02:10 /info.html
-2014/01/01 00:02:11 /info.html
-2014/01/01 00:02:12 /info.html
-2014/01/01 00:02:13 /info.html
-2014/01/01 00:02:14 /info.html
-2014/01/01 00:02:15 /info.html
-2014/01/01 00:02:16 /info.html
-2014/01/01 00:02:17 /info.html
-2014/01/01 00:02:18 /info.html
diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml
deleted file mode 100644
index 4025e01e..00000000
--- a/vendor/github.com/Masterminds/goutils/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-
-go:
- - 1.6
- - 1.7
- - 1.8
- - tip
-
-script:
- - go test -v
-
-notifications:
- webhooks:
- urls:
- - https://webhooks.gitter.im/e/06e3328629952dabe3e0
- on_success: change # options: [always|never|change] default: always
- on_failure: always # options: [always|never|change] default: always
- on_start: never # options: [always|never|change] default: always
diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md
deleted file mode 100644
index d700ec47..00000000
--- a/vendor/github.com/Masterminds/goutils/CHANGELOG.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# 1.0.1 (2017-05-31)
-
-## Fixed
-- #21: Fix generation of alphanumeric strings (thanks @dbarranco)
-
-# 1.0.0 (2014-04-30)
-
-- Initial release.
diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt
deleted file mode 100644
index d6456956..00000000
--- a/vendor/github.com/Masterminds/goutils/LICENSE.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md
deleted file mode 100644
index 163ffe72..00000000
--- a/vendor/github.com/Masterminds/goutils/README.md
+++ /dev/null
@@ -1,70 +0,0 @@
-GoUtils
-===========
-[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html)
-[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils)
-
-
-GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some
-string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes:
-* WordUtils
-* RandomStringUtils
-* StringUtils (partial implementation)
-
-## Installation
-If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this:
-
- go get github.com/Masterminds/goutils
-
-If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils.
-
-
-## Documentation
-GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils)
-
-
-## Usage
-The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file).
-
- package main
-
- import (
- "fmt"
- "github.com/Masterminds/goutils"
- )
-
- func main() {
-
- // EXAMPLE 1: A goutils function which returns no errors
- fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF"
-
- }
-Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file).
-
- package main
-
- import (
- "fmt"
- "github.com/Masterminds/goutils"
- )
-
- func main() {
-
- // EXAMPLE 2: A goutils function which returns an error
- rand1, err1 := goutils.Random (-1, 0, 0, true, true)
-
- if err1 != nil {
- fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...)
- } else {
- fmt.Println(rand1)
- }
-
- }
-
-## License
-GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license.
-
-## Issue Reporting
-Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues
-
-## Website
-* [GoUtils webpage](http://Masterminds.github.io/goutils/)
diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml
deleted file mode 100644
index 657564a8..00000000
--- a/vendor/github.com/Masterminds/goutils/appveyor.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-version: build-{build}.{branch}
-
-clone_folder: C:\gopath\src\github.com\Masterminds\goutils
-shallow_clone: true
-
-environment:
- GOPATH: C:\gopath
-
-platform:
- - x64
-
-build: off
-
-install:
- - go version
- - go env
-
-test_script:
- - go test -v
-
-deploy: off
diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go
deleted file mode 100644
index 8dbd9248..00000000
--- a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
-Copyright 2014 Alexander Okoli
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goutils
-
-import (
- "crypto/rand"
- "fmt"
- "math"
- "math/big"
- "unicode"
-)
-
-/*
-CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)).
-
-Parameter:
- count - the length of random string to create
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
-*/
-func CryptoRandomNonAlphaNumeric(count int) (string, error) {
- return CryptoRandomAlphaNumericCustom(count, false, false)
-}
-
-/*
-CryptoRandomAscii creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive).
-
-Parameter:
- count - the length of random string to create
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
-*/
-func CryptoRandomAscii(count int) (string, error) {
- return CryptoRandom(count, 32, 127, false, false)
-}
-
-/*
-CryptoRandomNumeric creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of numeric characters.
-
-Parameter:
- count - the length of random string to create
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
-*/
-func CryptoRandomNumeric(count int) (string, error) {
- return CryptoRandom(count, 0, 0, false, true)
-}
-
-/*
-CryptoRandomAlphabetic creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
-
-Parameters:
- count - the length of random string to create
- letters - if true, generated string may include alphabetic characters
- numbers - if true, generated string may include numeric characters
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
-*/
-func CryptoRandomAlphabetic(count int) (string, error) {
- return CryptoRandom(count, 0, 0, true, false)
-}
-
-/*
-CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of alpha-numeric characters.
-
-Parameter:
- count - the length of random string to create
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
-*/
-func CryptoRandomAlphaNumeric(count int) (string, error) {
- return CryptoRandom(count, 0, 0, true, true)
-}
-
-/*
-CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
-
-Parameters:
- count - the length of random string to create
- letters - if true, generated string may include alphabetic characters
- numbers - if true, generated string may include numeric characters
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
-*/
-func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) {
- return CryptoRandom(count, 0, 0, letters, numbers)
-}
-
-/*
-CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness.
-If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used,
-unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively.
-If chars is not nil, characters stored in chars that are between start and end are chosen.
-
-Parameters:
- count - the length of random string to create
- start - the position in set of chars (ASCII/Unicode int) to start at
- end - the position in set of chars (ASCII/Unicode int) to end before
- letters - if true, generated string may include alphabetic characters
- numbers - if true, generated string may include numeric characters
- chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars.
-
-Returns:
- string - the random string
- error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars)
-*/
-func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) {
- if count == 0 {
- return "", nil
- } else if count < 0 {
- err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...")
- return "", err
- }
- if chars != nil && len(chars) == 0 {
- err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty")
- return "", err
- }
-
- if start == 0 && end == 0 {
- if chars != nil {
- end = len(chars)
- } else {
- if !letters && !numbers {
- end = math.MaxInt32
- } else {
- end = 'z' + 1
- start = ' '
- }
- }
- } else {
- if end <= start {
- err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start)
- return "", err
- }
-
- if chars != nil && end > len(chars) {
- err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars))
- return "", err
- }
- }
-
- buffer := make([]rune, count)
- gap := end - start
-
- // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319
- // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343
-
- for count != 0 {
- count--
- var ch rune
- if chars == nil {
- ch = rune(getCryptoRandomInt(gap) + int64(start))
- } else {
- ch = chars[getCryptoRandomInt(gap)+int64(start)]
- }
-
- if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers {
- if ch >= 56320 && ch <= 57343 { // low surrogate range
- if count == 0 {
- count++
- } else {
- // Insert low surrogate
- buffer[count] = ch
- count--
- // Insert high surrogate
- buffer[count] = rune(55296 + getCryptoRandomInt(128))
- }
- } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial)
- if count == 0 {
- count++
- } else {
- // Insert low surrogate
- buffer[count] = rune(56320 + getCryptoRandomInt(128))
- count--
- // Insert high surrogate
- buffer[count] = ch
- }
- } else if ch >= 56192 && ch <= 56319 {
- // private high surrogate, skip it
- count++
- } else {
- // not one of the surrogates*
- buffer[count] = ch
- }
- } else {
- count++
- }
- }
- return string(buffer), nil
-}
-
-func getCryptoRandomInt(count int) int64 {
- nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count)))
- if err != nil {
- panic(err)
- }
- return nBig.Int64()
-}
diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go
deleted file mode 100644
index 27267023..00000000
--- a/vendor/github.com/Masterminds/goutils/randomstringutils.go
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
-Copyright 2014 Alexander Okoli
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goutils
-
-import (
- "fmt"
- "math"
- "math/rand"
- "time"
- "unicode"
-)
-
-// RANDOM provides the time-based seed used to generate random numbers
-var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano()))
-
-/*
-RandomNonAlphaNumeric creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)).
-
-Parameter:
- count - the length of random string to create
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
-*/
-func RandomNonAlphaNumeric(count int) (string, error) {
- return RandomAlphaNumericCustom(count, false, false)
-}
-
-/*
-RandomAscii creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive).
-
-Parameter:
- count - the length of random string to create
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
-*/
-func RandomAscii(count int) (string, error) {
- return Random(count, 32, 127, false, false)
-}
-
-/*
-RandomNumeric creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of numeric characters.
-
-Parameter:
- count - the length of random string to create
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
-*/
-func RandomNumeric(count int) (string, error) {
- return Random(count, 0, 0, false, true)
-}
-
-/*
-RandomAlphabetic creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of alphabetic characters.
-
-Parameters:
- count - the length of random string to create
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
-*/
-func RandomAlphabetic(count int) (string, error) {
- return Random(count, 0, 0, true, false)
-}
-
-/*
-RandomAlphaNumeric creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of alpha-numeric characters.
-
-Parameter:
- count - the length of random string to create
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
-*/
-func RandomAlphaNumeric(count int) (string, error) {
- return Random(count, 0, 0, true, true)
-}
-
-/*
-RandomAlphaNumericCustom creates a random string whose length is the number of characters specified.
-Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
-
-Parameters:
- count - the length of random string to create
- letters - if true, generated string may include alphabetic characters
- numbers - if true, generated string may include numeric characters
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
-*/
-func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) {
- return Random(count, 0, 0, letters, numbers)
-}
-
-/*
-Random creates a random string based on a variety of options, using default source of randomness.
-This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but
-instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance.
-
-Parameters:
- count - the length of random string to create
- start - the position in set of chars (ASCII/Unicode int) to start at
- end - the position in set of chars (ASCII/Unicode int) to end before
- letters - if true, generated string may include alphabetic characters
- numbers - if true, generated string may include numeric characters
- chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars.
-
-Returns:
- string - the random string
- error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
-*/
-func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) {
- return RandomSeed(count, start, end, letters, numbers, chars, RANDOM)
-}
-
-/*
-RandomSeed creates a random string based on a variety of options, using supplied source of randomness.
-If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used,
-unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively.
-If chars is not nil, characters stored in chars that are between start and end are chosen.
-This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance
-with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably.
-
-Parameters:
- count - the length of random string to create
- start - the position in set of chars (ASCII/Unicode decimals) to start at
- end - the position in set of chars (ASCII/Unicode decimals) to end before
- letters - if true, generated string may include alphabetic characters
- numbers - if true, generated string may include numeric characters
- chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars.
- random - a source of randomness.
-
-Returns:
- string - the random string
- error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars)
-*/
-func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) {
-
- if count == 0 {
- return "", nil
- } else if count < 0 {
- err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...")
- return "", err
- }
- if chars != nil && len(chars) == 0 {
- err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty")
- return "", err
- }
-
- if start == 0 && end == 0 {
- if chars != nil {
- end = len(chars)
- } else {
- if !letters && !numbers {
- end = math.MaxInt32
- } else {
- end = 'z' + 1
- start = ' '
- }
- }
- } else {
- if end <= start {
- err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start)
- return "", err
- }
-
- if chars != nil && end > len(chars) {
- err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars))
- return "", err
- }
- }
-
- buffer := make([]rune, count)
- gap := end - start
-
- // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319
- // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343
-
- for count != 0 {
- count--
- var ch rune
- if chars == nil {
- ch = rune(random.Intn(gap) + start)
- } else {
- ch = chars[random.Intn(gap)+start]
- }
-
- if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers {
- if ch >= 56320 && ch <= 57343 { // low surrogate range
- if count == 0 {
- count++
- } else {
- // Insert low surrogate
- buffer[count] = ch
- count--
- // Insert high surrogate
- buffer[count] = rune(55296 + random.Intn(128))
- }
- } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial)
- if count == 0 {
- count++
- } else {
- // Insert low surrogate
- buffer[count] = rune(56320 + random.Intn(128))
- count--
- // Insert high surrogate
- buffer[count] = ch
- }
- } else if ch >= 56192 && ch <= 56319 {
- // private high surrogate, skip it
- count++
- } else {
- // not one of the surrogates*
- buffer[count] = ch
- }
- } else {
- count++
- }
- }
- return string(buffer), nil
-}
diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go
deleted file mode 100644
index 741bb530..00000000
--- a/vendor/github.com/Masterminds/goutils/stringutils.go
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
-Copyright 2014 Alexander Okoli
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goutils
-
-import (
- "bytes"
- "fmt"
- "strings"
- "unicode"
-)
-
-// Typically returned by functions where a searched item cannot be found
-const INDEX_NOT_FOUND = -1
-
-/*
-Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..."
-
-Specifically, the algorithm is as follows:
-
- - If str is less than maxWidth characters long, return it.
- - Else abbreviate it to (str[0:maxWidth - 3] + "...").
- - If maxWidth is less than 4, return an illegal argument error.
- - In no case will it return a string of length greater than maxWidth.
-
-Parameters:
- str - the string to check
- maxWidth - maximum length of result string, must be at least 4
-
-Returns:
- string - abbreviated string
- error - if the width is too small
-*/
-func Abbreviate(str string, maxWidth int) (string, error) {
- return AbbreviateFull(str, 0, maxWidth)
-}
-
-/*
-AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..."
-This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not
-necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear
-somewhere in the result.
-In no case will it return a string of length greater than maxWidth.
-
-Parameters:
- str - the string to check
- offset - left edge of source string
- maxWidth - maximum length of result string, must be at least 4
-
-Returns:
- string - abbreviated string
- error - if the width is too small
-*/
-func AbbreviateFull(str string, offset int, maxWidth int) (string, error) {
- if str == "" {
- return "", nil
- }
- if maxWidth < 4 {
- err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4")
- return "", err
- }
- if len(str) <= maxWidth {
- return str, nil
- }
- if offset > len(str) {
- offset = len(str)
- }
- if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7
- offset = len(str) - (maxWidth - 3)
- }
- abrevMarker := "..."
- if offset <= 4 {
- return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker;
- }
- if maxWidth < 7 {
- err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7")
- return "", err
- }
- if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15
- abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3))
- return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3);
- }
- return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3));
-}
-
-/*
-DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune).
-It returns the string without whitespaces.
-
-Parameter:
- str - the string to delete whitespace from, may be nil
-
-Returns:
- the string without whitespaces
-*/
-func DeleteWhiteSpace(str string) string {
- if str == "" {
- return str
- }
- sz := len(str)
- var chs bytes.Buffer
- count := 0
- for i := 0; i < sz; i++ {
- ch := rune(str[i])
- if !unicode.IsSpace(ch) {
- chs.WriteRune(ch)
- count++
- }
- }
- if count == sz {
- return str
- }
- return chs.String()
-}
-
-/*
-IndexOfDifference compares two strings, and returns the index at which the strings begin to differ.
-
-Parameters:
- str1 - the first string
- str2 - the second string
-
-Returns:
- the index where str1 and str2 begin to differ; -1 if they are equal
-*/
-func IndexOfDifference(str1 string, str2 string) int {
- if str1 == str2 {
- return INDEX_NOT_FOUND
- }
- if IsEmpty(str1) || IsEmpty(str2) {
- return 0
- }
- var i int
- for i = 0; i < len(str1) && i < len(str2); i++ {
- if rune(str1[i]) != rune(str2[i]) {
- break
- }
- }
- if i < len(str2) || i < len(str1) {
- return i
- }
- return INDEX_NOT_FOUND
-}
-
-/*
-IsBlank checks if a string is whitespace or empty (""). Observe the following behavior:
-
- goutils.IsBlank("") = true
- goutils.IsBlank(" ") = true
- goutils.IsBlank("bob") = false
- goutils.IsBlank(" bob ") = false
-
-Parameter:
- str - the string to check
-
-Returns:
- true - if the string is whitespace or empty ("")
-*/
-func IsBlank(str string) bool {
- strLen := len(str)
- if str == "" || strLen == 0 {
- return true
- }
- for i := 0; i < strLen; i++ {
- if unicode.IsSpace(rune(str[i])) == false {
- return false
- }
- }
- return true
-}
-
-/*
-IndexOf returns the index of the first instance of sub in str, with the search beginning from the
-index start point specified. -1 is returned if sub is not present in str.
-
-An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero.
-A start position greater than the string length returns -1.
-
-Parameters:
- str - the string to check
- sub - the substring to find
- start - the start position; negative treated as zero
-
-Returns:
- the first index where the sub string was found (always >= start)
-*/
-func IndexOf(str string, sub string, start int) int {
-
- if start < 0 {
- start = 0
- }
-
- if len(str) < start {
- return INDEX_NOT_FOUND
- }
-
- if IsEmpty(str) || IsEmpty(sub) {
- return INDEX_NOT_FOUND
- }
-
- partialIndex := strings.Index(str[start:len(str)], sub)
- if partialIndex == -1 {
- return INDEX_NOT_FOUND
- }
- return partialIndex + start
-}
-
-// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise.
-func IsEmpty(str string) bool {
- return len(str) == 0
-}
-
-// Returns either the passed in string, or if the string is empty, the value of defaultStr.
-func DefaultString(str string, defaultStr string) string {
- if IsEmpty(str) {
- return defaultStr
- }
- return str
-}
-
-// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr.
-func DefaultIfBlank(str string, defaultStr string) string {
- if IsBlank(str) {
- return defaultStr
- }
- return str
-}
diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go
deleted file mode 100644
index 034cad8e..00000000
--- a/vendor/github.com/Masterminds/goutils/wordutils.go
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
-Copyright 2014 Alexander Okoli
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Package goutils provides utility functions to manipulate strings in various ways.
-The code snippets below show examples of how to use goutils. Some functions return
-errors while others do not, so usage would vary as a result.
-
-Example:
-
- package main
-
- import (
- "fmt"
- "github.com/aokoli/goutils"
- )
-
- func main() {
-
- // EXAMPLE 1: A goutils function which returns no errors
- fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF"
-
-
-
- // EXAMPLE 2: A goutils function which returns an error
- rand1, err1 := goutils.Random (-1, 0, 0, true, true)
-
- if err1 != nil {
- fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...)
- } else {
- fmt.Println(rand1)
- }
- }
-*/
-package goutils
-
-import (
- "bytes"
- "strings"
- "unicode"
-)
-
-// VERSION indicates the current version of goutils
-const VERSION = "1.0.0"
-
-/*
-Wrap wraps a single line of text, identifying words by ' '.
-New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped.
-Leading spaces on a new line are stripped. Trailing spaces are not stripped.
-
-Parameters:
- str - the string to be word wrapped
- wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1
-
-Returns:
- a line with newlines inserted
-*/
-func Wrap(str string, wrapLength int) string {
- return WrapCustom(str, wrapLength, "", false)
-}
-
-/*
-WrapCustom wraps a single line of text, identifying words by ' '.
-Leading spaces on a new line are stripped. Trailing spaces are not stripped.
-
-Parameters:
- str - the string to be word wrapped
- wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1
- newLineStr - the string to insert for a new line, "" uses '\n'
- wrapLongWords - true if long words (such as URLs) should be wrapped
-
-Returns:
- a line with newlines inserted
-*/
-func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string {
-
- if str == "" {
- return ""
- }
- if newLineStr == "" {
- newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons
- }
- if wrapLength < 1 {
- wrapLength = 1
- }
-
- inputLineLength := len(str)
- offset := 0
-
- var wrappedLine bytes.Buffer
-
- for inputLineLength-offset > wrapLength {
-
- if rune(str[offset]) == ' ' {
- offset++
- continue
- }
-
- end := wrapLength + offset + 1
- spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset
-
- if spaceToWrapAt >= offset {
- // normal word (not longer than wrapLength)
- wrappedLine.WriteString(str[offset:spaceToWrapAt])
- wrappedLine.WriteString(newLineStr)
- offset = spaceToWrapAt + 1
-
- } else {
- // long word or URL
- if wrapLongWords {
- end := wrapLength + offset
- // long words are wrapped one line at a time
- wrappedLine.WriteString(str[offset:end])
- wrappedLine.WriteString(newLineStr)
- offset += wrapLength
- } else {
- // long words aren't wrapped, just extended beyond limit
- end := wrapLength + offset
- index := strings.IndexRune(str[end:len(str)], ' ')
- if index == -1 {
- wrappedLine.WriteString(str[offset:len(str)])
- offset = inputLineLength
- } else {
- spaceToWrapAt = index + end
- wrappedLine.WriteString(str[offset:spaceToWrapAt])
- wrappedLine.WriteString(newLineStr)
- offset = spaceToWrapAt + 1
- }
- }
- }
- }
-
- wrappedLine.WriteString(str[offset:len(str)])
-
- return wrappedLine.String()
-
-}
-
-/*
-Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed.
-To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune).
-The delimiters represent a set of characters understood to separate words. The first string character
-and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "".
-Capitalization uses the Unicode title case, normally equivalent to upper case.
-
-Parameters:
- str - the string to capitalize
- delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
-
-Returns:
- capitalized string
-*/
-func Capitalize(str string, delimiters ...rune) string {
-
- var delimLen int
-
- if delimiters == nil {
- delimLen = -1
- } else {
- delimLen = len(delimiters)
- }
-
- if str == "" || delimLen == 0 {
- return str
- }
-
- buffer := []rune(str)
- capitalizeNext := true
- for i := 0; i < len(buffer); i++ {
- ch := buffer[i]
- if isDelimiter(ch, delimiters...) {
- capitalizeNext = true
- } else if capitalizeNext {
- buffer[i] = unicode.ToTitle(ch)
- capitalizeNext = false
- }
- }
- return string(buffer)
-
-}
-
-/*
-CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a
-titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood
-to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized.
-Capitalization uses the Unicode title case, normally equivalent to upper case.
-
-Parameters:
- str - the string to capitalize fully
- delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
-
-Returns:
- capitalized string
-*/
-func CapitalizeFully(str string, delimiters ...rune) string {
-
- var delimLen int
-
- if delimiters == nil {
- delimLen = -1
- } else {
- delimLen = len(delimiters)
- }
-
- if str == "" || delimLen == 0 {
- return str
- }
- str = strings.ToLower(str)
- return Capitalize(str, delimiters...)
-}
-
-/*
-Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed.
-The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter
-character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char).
-
-Parameters:
- str - the string to uncapitalize fully
- delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
-
-Returns:
- uncapitalized string
-*/
-func Uncapitalize(str string, delimiters ...rune) string {
-
- var delimLen int
-
- if delimiters == nil {
- delimLen = -1
- } else {
- delimLen = len(delimiters)
- }
-
- if str == "" || delimLen == 0 {
- return str
- }
-
- buffer := []rune(str)
- uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char.
- for i := 0; i < len(buffer); i++ {
- ch := buffer[i]
- if isDelimiter(ch, delimiters...) {
- uncapitalizeNext = true
- } else if uncapitalizeNext {
- buffer[i] = unicode.ToLower(ch)
- uncapitalizeNext = false
- }
- }
- return string(buffer)
-}
-
-/*
-SwapCase swaps the case of a string using a word based algorithm.
-
-Conversion algorithm:
-
- Upper case character converts to Lower case
- Title case character converts to Lower case
- Lower case character after Whitespace or at start converts to Title case
- Other Lower case character converts to Upper case
- Whitespace is defined by unicode.IsSpace(char).
-
-Parameters:
- str - the string to swap case
-
-Returns:
- the changed string
-*/
-func SwapCase(str string) string {
- if str == "" {
- return str
- }
- buffer := []rune(str)
-
- whitespace := true
-
- for i := 0; i < len(buffer); i++ {
- ch := buffer[i]
- if unicode.IsUpper(ch) {
- buffer[i] = unicode.ToLower(ch)
- whitespace = false
- } else if unicode.IsTitle(ch) {
- buffer[i] = unicode.ToLower(ch)
- whitespace = false
- } else if unicode.IsLower(ch) {
- if whitespace {
- buffer[i] = unicode.ToTitle(ch)
- whitespace = false
- } else {
- buffer[i] = unicode.ToUpper(ch)
- }
- } else {
- whitespace = unicode.IsSpace(ch)
- }
- }
- return string(buffer)
-}
-
-/*
-Initials extracts the initial letters from each word in the string. The first letter of the string and all first
-letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters
-parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string.
-
-Parameters:
- str - the string to get initials from
- delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter
-Returns:
- string of initial letters
-*/
-func Initials(str string, delimiters ...rune) string {
- if str == "" {
- return str
- }
- if delimiters != nil && len(delimiters) == 0 {
- return ""
- }
- strLen := len(str)
- var buf bytes.Buffer
- lastWasGap := true
- for i := 0; i < strLen; i++ {
- ch := rune(str[i])
-
- if isDelimiter(ch, delimiters...) {
- lastWasGap = true
- } else if lastWasGap {
- buf.WriteRune(ch)
- lastWasGap = false
- }
- }
- return buf.String()
-}
-
-// private function (lower case func name)
-func isDelimiter(ch rune, delimiters ...rune) bool {
- if delimiters == nil {
- return unicode.IsSpace(ch)
- }
- for _, delimiter := range delimiters {
- if ch == delimiter {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore
deleted file mode 100644
index 6b061e61..00000000
--- a/vendor/github.com/Masterminds/semver/v3/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-_fuzz/
\ No newline at end of file
diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml
deleted file mode 100644
index fdbdf144..00000000
--- a/vendor/github.com/Masterminds/semver/v3/.golangci.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-run:
- deadline: 2m
-
-linters:
- disable-all: true
- enable:
- - deadcode
- - dupl
- - errcheck
- - gofmt
- - goimports
- - golint
- - gosimple
- - govet
- - ineffassign
- - misspell
- - nakedret
- - structcheck
- - unused
- - varcheck
-
-linters-settings:
- gofmt:
- simplify: true
- dupl:
- threshold: 400
diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
deleted file mode 100644
index 1f90c38d..00000000
--- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
+++ /dev/null
@@ -1,194 +0,0 @@
-# Changelog
-
-## 3.1.1 (2020-11-23)
-
-### Fixed
-
-- #158: Fixed issue with generated regex operation order that could cause problem
-
-## 3.1.0 (2020-04-15)
-
-### Added
-
-- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah)
-
-### Changed
-
-- #148: More accurate validation messages on constraints
-
-## 3.0.3 (2019-12-13)
-
-### Fixed
-
-- #141: Fixed issue with <= comparison
-
-## 3.0.2 (2019-11-14)
-
-### Fixed
-
-- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos)
-
-## 3.0.1 (2019-09-13)
-
-### Fixed
-
-- #125: Fixes issue with module path for v3
-
-## 3.0.0 (2019-09-12)
-
-This is a major release of the semver package which includes API changes. The Go
-API is compatible with ^1. The Go API was not changed because many people are using
-`go get` without Go modules for their applications and API breaking changes cause
-errors which we have or would need to support.
-
-The changes in this release are the handling based on the data passed into the
-functions. These are described in the added and changed sections below.
-
-### Added
-
-- StrictNewVersion function. This is similar to NewVersion but will return an
- error if the version passed in is not a strict semantic version. For example,
- 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly
- speaking semantic versions. This function is faster, performs fewer operations,
- and uses fewer allocations than NewVersion.
-- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint.
- The Makefile contains the operations used. For more information on you can start
- on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing
-- Now using Go modules
-
-### Changed
-
-- NewVersion has proper prerelease and metadata validation with error messages
- to signal an issue with either of them
-- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the
- version is >=1 the ^ ranges works the same as v1. For major versions of 0 the
- rules have changed. The minor version is treated as the stable version unless
- a patch is specified and then it is equivalent to =. One difference from npm/js
- is that prereleases there are only to a specific version (e.g. 1.2.3).
- Prereleases here look over multiple versions and follow semantic version
- ordering rules. This pattern now follows along with the expected and requested
- handling of this packaged by numerous users.
-
-## 1.5.0 (2019-09-11)
-
-### Added
-
-- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c)
-
-### Changed
-
-- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil)
-- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil)
-- #72: Adding docs comment pointing to vert for a cli
-- #71: Update the docs on pre-release comparator handling
-- #89: Test with new go versions (thanks @thedevsaddam)
-- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll)
-
-### Fixed
-
-- #78: Fix unchecked error in example code (thanks @ravron)
-- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
-- #97: Fixed copyright file for proper display on GitHub
-- #107: Fix handling prerelease when sorting alphanum and num
-- #109: Fixed where Validate sometimes returns wrong message on error
-
-## 1.4.2 (2018-04-10)
-
-### Changed
-
-- #72: Updated the docs to point to vert for a console appliaction
-- #71: Update the docs on pre-release comparator handling
-
-### Fixed
-
-- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
-
-## 1.4.1 (2018-04-02)
-
-### Fixed
-
-- Fixed #64: Fix pre-release precedence issue (thanks @uudashr)
-
-## 1.4.0 (2017-10-04)
-
-### Changed
-
-- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill)
-
-## 1.3.1 (2017-07-10)
-
-### Fixed
-
-- Fixed #57: number comparisons in prerelease sometimes inaccurate
-
-## 1.3.0 (2017-05-02)
-
-### Added
-
-- #45: Added json (un)marshaling support (thanks @mh-cbon)
-- Stability marker. See https://masterminds.github.io/stability/
-
-### Fixed
-
-- #51: Fix handling of single digit tilde constraint (thanks @dgodd)
-
-### Changed
-
-- #55: The godoc icon moved from png to svg
-
-## 1.2.3 (2017-04-03)
-
-### Fixed
-
-- #46: Fixed 0.x.x and 0.0.x in constraints being treated as *
-
-## Release 1.2.2 (2016-12-13)
-
-### Fixed
-
-- #34: Fixed issue where hyphen range was not working with pre-release parsing.
-
-## Release 1.2.1 (2016-11-28)
-
-### Fixed
-
-- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha"
- properly.
-
-## Release 1.2.0 (2016-11-04)
-
-### Added
-
-- #20: Added MustParse function for versions (thanks @adamreese)
-- #15: Added increment methods on versions (thanks @mh-cbon)
-
-### Fixed
-
-- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and
- might not satisfy the intended compatibility. The change here ignores pre-releases
- on constraint checks (e.g., ~ or ^) when a pre-release is not part of the
- constraint. For example, `^1.2.3` will ignore pre-releases while
- `^1.2.3-alpha` will include them.
-
-## Release 1.1.1 (2016-06-30)
-
-### Changed
-
-- Issue #9: Speed up version comparison performance (thanks @sdboyer)
-- Issue #8: Added benchmarks (thanks @sdboyer)
-- Updated Go Report Card URL to new location
-- Updated Readme to add code snippet formatting (thanks @mh-cbon)
-- Updating tagging to v[SemVer] structure for compatibility with other tools.
-
-## Release 1.1.0 (2016-03-11)
-
-- Issue #2: Implemented validation to provide reasons a versions failed a
- constraint.
-
-## Release 1.0.1 (2015-12-31)
-
-- Fixed #1: * constraint failing on valid versions.
-
-## Release 1.0.0 (2015-10-20)
-
-- Initial release
diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
deleted file mode 100644
index 9ff7da9c..00000000
--- a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (C) 2014-2019, Matt Butcher and Matt Farina
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile
deleted file mode 100644
index eac19178..00000000
--- a/vendor/github.com/Masterminds/semver/v3/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
-GOPATH=$(shell go env GOPATH)
-GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint
-GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build
-GOFUZZ = $(GOPATH)/bin/go-fuzz
-
-.PHONY: lint
-lint: $(GOLANGCI_LINT)
- @echo "==> Linting codebase"
- @$(GOLANGCI_LINT) run
-
-.PHONY: test
-test:
- @echo "==> Running tests"
- GO111MODULE=on go test -v
-
-.PHONY: test-cover
-test-cover:
- @echo "==> Running Tests with coverage"
- GO111MODULE=on go test -cover .
-
-.PHONY: fuzz
-fuzz: $(GOFUZZBUILD) $(GOFUZZ)
- @echo "==> Fuzz testing"
- $(GOFUZZBUILD)
- $(GOFUZZ) -workdir=_fuzz
-
-$(GOLANGCI_LINT):
- # Install golangci-lint. The configuration for it is in the .golangci.yml
- # file in the root of the repository
- echo ${GOPATH}
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1
-
-$(GOFUZZBUILD):
- cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build
-
-$(GOFUZZ):
- cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep
\ No newline at end of file
diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md
deleted file mode 100644
index d8f54dcb..00000000
--- a/vendor/github.com/Masterminds/semver/v3/README.md
+++ /dev/null
@@ -1,244 +0,0 @@
-# SemVer
-
-The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
-
-* Parse semantic versions
-* Sort semantic versions
-* Check if a semantic version fits within a set of constraints
-* Optionally work with a `v` prefix
-
-[![Stability:
-Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html)
-[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions)
-[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3)
-[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver)
-
-If you are looking for a command line tool for version comparisons please see
-[vert](https://github.com/Masterminds/vert) which uses this library.
-
-## Package Versions
-
-There are three major versions fo the `semver` package.
-
-* 3.x.x is the new stable and active version. This version is focused on constraint
- compatibility for range handling in other tools from other languages. It has
- a similar API to the v1 releases. The development of this version is on the master
- branch. The documentation for this version is below.
-* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are
- no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer).
- There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x).
-* 1.x.x is the most widely used version with numerous tagged releases. This is the
- previous stable and is still maintained for bug fixes. The development, to fix
- bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md).
-
-## Parsing Semantic Versions
-
-There are two functions that can parse semantic versions. The `StrictNewVersion`
-function only parses valid version 2 semantic versions as outlined in the
-specification. The `NewVersion` function attempts to coerce a version into a
-semantic version and parse it. For example, if there is a leading v or a version
-listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid
-semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
-that can be sorted, compared, and used in constraints.
-
-When parsing a version an error is returned if there is an issue parsing the
-version. For example,
-
- v, err := semver.NewVersion("1.2.3-beta.1+build345")
-
-The version object has methods to get the parts of the version, compare it to
-other versions, convert the version back into a string, and get the original
-string. Getting the original string is useful if the semantic version was coerced
-into a valid form.
-
-## Sorting Semantic Versions
-
-A set of versions can be sorted using the `sort` package from the standard library.
-For example,
-
-```go
-raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
-vs := make([]*semver.Version, len(raw))
-for i, r := range raw {
- v, err := semver.NewVersion(r)
- if err != nil {
- t.Errorf("Error parsing version: %s", err)
- }
-
- vs[i] = v
-}
-
-sort.Sort(semver.Collection(vs))
-```
-
-## Checking Version Constraints
-
-There are two methods for comparing versions. One uses comparison methods on
-`Version` instances and the other uses `Constraints`. There are some important
-differences to notes between these two methods of comparison.
-
-1. When two versions are compared using functions such as `Compare`, `LessThan`,
- and others it will follow the specification and always include prereleases
- within the comparison. It will provide an answer that is valid with the
- comparison section of the spec at https://semver.org/#spec-item-11
-2. When constraint checking is used for checks or validation it will follow a
- different set of rules that are common for ranges with tools like npm/js
- and Rust/Cargo. This includes considering prereleases to be invalid if the
- ranges does not include one. If you want to have it include pre-releases a
- simple solution is to include `-0` in your range.
-3. Constraint ranges can have some complex rules including the shorthand use of
- ~ and ^. For more details on those see the options below.
-
-There are differences between the two methods or checking versions because the
-comparison methods on `Version` follow the specification while comparison ranges
-are not part of the specification. Different packages and tools have taken it
-upon themselves to come up with range rules. This has resulted in differences.
-For example, npm/js and Cargo/Rust follow similar patterns while PHP has a
-different pattern for ^. The comparison features in this package follow the
-npm/js and Cargo/Rust lead because applications using it have followed similar
-patters with their versions.
-
-Checking a version against version constraints is one of the most featureful
-parts of the package.
-
-```go
-c, err := semver.NewConstraint(">= 1.2.3")
-if err != nil {
- // Handle constraint not being parsable.
-}
-
-v, err := semver.NewVersion("1.3")
-if err != nil {
- // Handle version not being parsable.
-}
-// Check if the version meets the constraints. The a variable will be true.
-a := c.Check(v)
-```
-
-### Basic Comparisons
-
-There are two elements to the comparisons. First, a comparison string is a list
-of space or comma separated AND comparisons. These are then separated by || (OR)
-comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
-comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
-greater than or equal to 4.2.3.
-
-The basic comparisons are:
-
-* `=`: equal (aliased to no operator)
-* `!=`: not equal
-* `>`: greater than
-* `<`: less than
-* `>=`: greater than or equal to
-* `<=`: less than or equal to
-
-### Working With Prerelease Versions
-
-Pre-releases, for those not familiar with them, are used for software releases
-prior to stable or generally available releases. Examples of prereleases include
-development, alpha, beta, and release candidate releases. A prerelease may be
-a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
-order of precedence, prereleases come before their associated releases. In this
-example `1.2.3-beta.1 < 1.2.3`.
-
-According to the Semantic Version specification prereleases may not be
-API compliant with their release counterpart. It says,
-
-> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
-
-SemVer comparisons using constraints without a prerelease comparator will skip
-prerelease versions. For example, `>=1.2.3` will skip prereleases when looking
-at a list of releases while `>=1.2.3-0` will evaluate and find prereleases.
-
-The reason for the `0` as a pre-release version in the example comparison is
-because pre-releases can only contain ASCII alphanumerics and hyphens (along with
-`.` separators), per the spec. Sorting happens in ASCII sort order, again per the
-spec. The lowest character is a `0` in ASCII sort order
-(see an [ASCII Table](http://www.asciitable.com/))
-
-Understanding ASCII sort ordering is important because A-Z comes before a-z. That
-means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case
-sensitivity doesn't apply here. This is due to ASCII sort ordering which is what
-the spec specifies.
-
-### Hyphen Range Comparisons
-
-There are multiple methods to handle ranges and the first is hyphens ranges.
-These look like:
-
-* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
-* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
-
-### Wildcards In Comparisons
-
-The `x`, `X`, and `*` characters can be used as a wildcard character. This works
-for all comparison operators. When used on the `=` operator it falls
-back to the patch level comparison (see tilde below). For example,
-
-* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
-* `>= 1.2.x` is equivalent to `>= 1.2.0`
-* `<= 2.x` is equivalent to `< 3`
-* `*` is equivalent to `>= 0.0.0`
-
-### Tilde Range Comparisons (Patch)
-
-The tilde (`~`) comparison operator is for patch level ranges when a minor
-version is specified and major level changes when the minor number is missing.
-For example,
-
-* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
-* `~1` is equivalent to `>= 1, < 2`
-* `~2.3` is equivalent to `>= 2.3, < 2.4`
-* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
-* `~1.x` is equivalent to `>= 1, < 2`
-
-### Caret Range Comparisons (Major)
-
-The caret (`^`) comparison operator is for major level changes once a stable
-(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
-as the API stability level. This is useful when comparisons of API versions as a
-major change is API breaking. For example,
-
-* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
-* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
-* `^2.3` is equivalent to `>= 2.3, < 3`
-* `^2.x` is equivalent to `>= 2.0.0, < 3`
-* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
-* `^0.2` is equivalent to `>=0.2.0 <0.3.0`
-* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
-* `^0.0` is equivalent to `>=0.0.0 <0.1.0`
-* `^0` is equivalent to `>=0.0.0 <1.0.0`
-
-## Validation
-
-In addition to testing a version against a constraint, a version can be validated
-against a constraint. When validation fails a slice of errors containing why a
-version didn't meet the constraint is returned. For example,
-
-```go
-c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
-if err != nil {
- // Handle constraint not being parseable.
-}
-
-v, err := semver.NewVersion("1.3")
-if err != nil {
- // Handle version not being parseable.
-}
-
-// Validate a version against a constraint.
-a, msgs := c.Validate(v)
-// a is false
-for _, m := range msgs {
- fmt.Println(m)
-
- // Loops over the errors which would read
- // "1.3 is greater than 1.2.3"
- // "1.3 is less than 1.4"
-}
-```
-
-## Contribute
-
-If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
-or [create a pull request](https://github.com/Masterminds/semver/pulls).
diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go
deleted file mode 100644
index a7823589..00000000
--- a/vendor/github.com/Masterminds/semver/v3/collection.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package semver
-
-// Collection is a collection of Version instances and implements the sort
-// interface. See the sort package for more details.
-// https://golang.org/pkg/sort/
-type Collection []*Version
-
-// Len returns the length of a collection. The number of Version instances
-// on the slice.
-func (c Collection) Len() int {
- return len(c)
-}
-
-// Less is needed for the sort interface to compare two Version objects on the
-// slice. If checks if one is less than the other.
-func (c Collection) Less(i, j int) bool {
- return c[i].LessThan(c[j])
-}
-
-// Swap is needed for the sort interface to replace the Version objects
-// at two different positions in the slice.
-func (c Collection) Swap(i, j int) {
- c[i], c[j] = c[j], c[i]
-}
diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go
deleted file mode 100644
index 547613f0..00000000
--- a/vendor/github.com/Masterminds/semver/v3/constraints.go
+++ /dev/null
@@ -1,568 +0,0 @@
-package semver
-
-import (
- "bytes"
- "errors"
- "fmt"
- "regexp"
- "strings"
-)
-
-// Constraints is one or more constraint that a semantic version can be
-// checked against.
-type Constraints struct {
- constraints [][]*constraint
-}
-
-// NewConstraint returns a Constraints instance that a Version instance can
-// be checked against. If there is a parse error it will be returned.
-func NewConstraint(c string) (*Constraints, error) {
-
- // Rewrite - ranges into a comparison operation.
- c = rewriteRange(c)
-
- ors := strings.Split(c, "||")
- or := make([][]*constraint, len(ors))
- for k, v := range ors {
-
- // TODO: Find a way to validate and fetch all the constraints in a simpler form
-
- // Validate the segment
- if !validConstraintRegex.MatchString(v) {
- return nil, fmt.Errorf("improper constraint: %s", v)
- }
-
- cs := findConstraintRegex.FindAllString(v, -1)
- if cs == nil {
- cs = append(cs, v)
- }
- result := make([]*constraint, len(cs))
- for i, s := range cs {
- pc, err := parseConstraint(s)
- if err != nil {
- return nil, err
- }
-
- result[i] = pc
- }
- or[k] = result
- }
-
- o := &Constraints{constraints: or}
- return o, nil
-}
-
-// Check tests if a version satisfies the constraints.
-func (cs Constraints) Check(v *Version) bool {
- // TODO(mattfarina): For v4 of this library consolidate the Check and Validate
- // functions as the underlying functions make that possible now.
- // loop over the ORs and check the inner ANDs
- for _, o := range cs.constraints {
- joy := true
- for _, c := range o {
- if check, _ := c.check(v); !check {
- joy = false
- break
- }
- }
-
- if joy {
- return true
- }
- }
-
- return false
-}
-
-// Validate checks if a version satisfies a constraint. If not a slice of
-// reasons for the failure are returned in addition to a bool.
-func (cs Constraints) Validate(v *Version) (bool, []error) {
- // loop over the ORs and check the inner ANDs
- var e []error
-
- // Capture the prerelease message only once. When it happens the first time
- // this var is marked
- var prerelesase bool
- for _, o := range cs.constraints {
- joy := true
- for _, c := range o {
- // Before running the check handle the case there the version is
- // a prerelease and the check is not searching for prereleases.
- if c.con.pre == "" && v.pre != "" {
- if !prerelesase {
- em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- e = append(e, em)
- prerelesase = true
- }
- joy = false
-
- } else {
-
- if _, err := c.check(v); err != nil {
- e = append(e, err)
- joy = false
- }
- }
- }
-
- if joy {
- return true, []error{}
- }
- }
-
- return false, e
-}
-
-func (cs Constraints) String() string {
- buf := make([]string, len(cs.constraints))
- var tmp bytes.Buffer
-
- for k, v := range cs.constraints {
- tmp.Reset()
- vlen := len(v)
- for kk, c := range v {
- tmp.WriteString(c.string())
-
- // Space separate the AND conditions
- if vlen > 1 && kk < vlen-1 {
- tmp.WriteString(" ")
- }
- }
- buf[k] = tmp.String()
- }
-
- return strings.Join(buf, " || ")
-}
-
-var constraintOps map[string]cfunc
-var constraintRegex *regexp.Regexp
-var constraintRangeRegex *regexp.Regexp
-
-// Used to find individual constraints within a multi-constraint string
-var findConstraintRegex *regexp.Regexp
-
-// Used to validate an segment of ANDs is valid
-var validConstraintRegex *regexp.Regexp
-
-const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
- `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
- `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
-
-func init() {
- constraintOps = map[string]cfunc{
- "": constraintTildeOrEqual,
- "=": constraintTildeOrEqual,
- "!=": constraintNotEqual,
- ">": constraintGreaterThan,
- "<": constraintLessThan,
- ">=": constraintGreaterThanEqual,
- "=>": constraintGreaterThanEqual,
- "<=": constraintLessThanEqual,
- "=<": constraintLessThanEqual,
- "~": constraintTilde,
- "~>": constraintTilde,
- "^": constraintCaret,
- }
-
- ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^`
-
- constraintRegex = regexp.MustCompile(fmt.Sprintf(
- `^\s*(%s)\s*(%s)\s*$`,
- ops,
- cvRegex))
-
- constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
- `\s*(%s)\s+-\s+(%s)\s*`,
- cvRegex, cvRegex))
-
- findConstraintRegex = regexp.MustCompile(fmt.Sprintf(
- `(%s)\s*(%s)`,
- ops,
- cvRegex))
-
- validConstraintRegex = regexp.MustCompile(fmt.Sprintf(
- `^(\s*(%s)\s*(%s)\s*\,?)+$`,
- ops,
- cvRegex))
-}
-
-// An individual constraint
-type constraint struct {
- // The version used in the constraint check. For example, if a constraint
- // is '<= 2.0.0' the con a version instance representing 2.0.0.
- con *Version
-
- // The original parsed version (e.g., 4.x from != 4.x)
- orig string
-
- // The original operator for the constraint
- origfunc string
-
- // When an x is used as part of the version (e.g., 1.x)
- minorDirty bool
- dirty bool
- patchDirty bool
-}
-
-// Check if a version meets the constraint
-func (c *constraint) check(v *Version) (bool, error) {
- return constraintOps[c.origfunc](v, c)
-}
-
-// String prints an individual constraint into a string
-func (c *constraint) string() string {
- return c.origfunc + c.orig
-}
-
-type cfunc func(v *Version, c *constraint) (bool, error)
-
-func parseConstraint(c string) (*constraint, error) {
- if len(c) > 0 {
- m := constraintRegex.FindStringSubmatch(c)
- if m == nil {
- return nil, fmt.Errorf("improper constraint: %s", c)
- }
-
- cs := &constraint{
- orig: m[2],
- origfunc: m[1],
- }
-
- ver := m[2]
- minorDirty := false
- patchDirty := false
- dirty := false
- if isX(m[3]) || m[3] == "" {
- ver = "0.0.0"
- dirty = true
- } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" {
- minorDirty = true
- dirty = true
- ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
- } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" {
- dirty = true
- patchDirty = true
- ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
- }
-
- con, err := NewVersion(ver)
- if err != nil {
-
- // The constraintRegex should catch any regex parsing errors. So,
- // we should never get here.
- return nil, errors.New("constraint Parser Error")
- }
-
- cs.con = con
- cs.minorDirty = minorDirty
- cs.patchDirty = patchDirty
- cs.dirty = dirty
-
- return cs, nil
- }
-
- // The rest is the special case where an empty string was passed in which
- // is equivalent to * or >=0.0.0
- con, err := StrictNewVersion("0.0.0")
- if err != nil {
-
- // The constraintRegex should catch any regex parsing errors. So,
- // we should never get here.
- return nil, errors.New("constraint Parser Error")
- }
-
- cs := &constraint{
- con: con,
- orig: c,
- origfunc: "",
- minorDirty: false,
- patchDirty: false,
- dirty: true,
- }
- return cs, nil
-}
-
-// Constraint functions
-func constraintNotEqual(v *Version, c *constraint) (bool, error) {
- if c.dirty {
-
- // If there is a pre-release on the version but the constraint isn't looking
- // for them assume that pre-releases are not compatible. See issue 21 for
- // more details.
- if v.Prerelease() != "" && c.con.Prerelease() == "" {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- if c.con.Major() != v.Major() {
- return true, nil
- }
- if c.con.Minor() != v.Minor() && !c.minorDirty {
- return true, nil
- } else if c.minorDirty {
- return false, fmt.Errorf("%s is equal to %s", v, c.orig)
- } else if c.con.Patch() != v.Patch() && !c.patchDirty {
- return true, nil
- } else if c.patchDirty {
- // Need to handle prereleases if present
- if v.Prerelease() != "" || c.con.Prerelease() != "" {
- eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is equal to %s", v, c.orig)
- }
- return false, fmt.Errorf("%s is equal to %s", v, c.orig)
- }
- }
-
- eq := v.Equal(c.con)
- if eq {
- return false, fmt.Errorf("%s is equal to %s", v, c.orig)
- }
-
- return true, nil
-}
-
-func constraintGreaterThan(v *Version, c *constraint) (bool, error) {
-
- // If there is a pre-release on the version but the constraint isn't looking
- // for them assume that pre-releases are not compatible. See issue 21 for
- // more details.
- if v.Prerelease() != "" && c.con.Prerelease() == "" {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- var eq bool
-
- if !c.dirty {
- eq = v.Compare(c.con) == 1
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
- }
-
- if v.Major() > c.con.Major() {
- return true, nil
- } else if v.Major() < c.con.Major() {
- return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
- } else if c.minorDirty {
- // This is a range case such as >11. When the version is something like
- // 11.1.0 is it not > 11. For that we would need 12 or higher
- return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
- } else if c.patchDirty {
- // This is for ranges such as >11.1. A version of 11.1.1 is not greater
- // which one of 11.2.1 is greater
- eq = v.Minor() > c.con.Minor()
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
- }
-
- // If we have gotten here we are not comparing pre-preleases and can use the
- // Compare function to accomplish that.
- eq = v.Compare(c.con) == 1
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
-}
-
-func constraintLessThan(v *Version, c *constraint) (bool, error) {
- // If there is a pre-release on the version but the constraint isn't looking
- // for them assume that pre-releases are not compatible. See issue 21 for
- // more details.
- if v.Prerelease() != "" && c.con.Prerelease() == "" {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- eq := v.Compare(c.con) < 0
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig)
-}
-
-func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) {
-
- // If there is a pre-release on the version but the constraint isn't looking
- // for them assume that pre-releases are not compatible. See issue 21 for
- // more details.
- if v.Prerelease() != "" && c.con.Prerelease() == "" {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- eq := v.Compare(c.con) >= 0
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is less than %s", v, c.orig)
-}
-
-func constraintLessThanEqual(v *Version, c *constraint) (bool, error) {
- // If there is a pre-release on the version but the constraint isn't looking
- // for them assume that pre-releases are not compatible. See issue 21 for
- // more details.
- if v.Prerelease() != "" && c.con.Prerelease() == "" {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- var eq bool
-
- if !c.dirty {
- eq = v.Compare(c.con) <= 0
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is greater than %s", v, c.orig)
- }
-
- if v.Major() > c.con.Major() {
- return false, fmt.Errorf("%s is greater than %s", v, c.orig)
- } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty {
- return false, fmt.Errorf("%s is greater than %s", v, c.orig)
- }
-
- return true, nil
-}
-
-// ~*, ~>* --> >= 0.0.0 (any)
-// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
-// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
-// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
-// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
-// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
-func constraintTilde(v *Version, c *constraint) (bool, error) {
- // If there is a pre-release on the version but the constraint isn't looking
- // for them assume that pre-releases are not compatible. See issue 21 for
- // more details.
- if v.Prerelease() != "" && c.con.Prerelease() == "" {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- if v.LessThan(c.con) {
- return false, fmt.Errorf("%s is less than %s", v, c.orig)
- }
-
- // ~0.0.0 is a special case where all constraints are accepted. It's
- // equivalent to >= 0.0.0.
- if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 &&
- !c.minorDirty && !c.patchDirty {
- return true, nil
- }
-
- if v.Major() != c.con.Major() {
- return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
- }
-
- if v.Minor() != c.con.Minor() && !c.minorDirty {
- return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig)
- }
-
- return true, nil
-}
-
-// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
-// it's a straight =
-func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) {
- // If there is a pre-release on the version but the constraint isn't looking
- // for them assume that pre-releases are not compatible. See issue 21 for
- // more details.
- if v.Prerelease() != "" && c.con.Prerelease() == "" {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- if c.dirty {
- return constraintTilde(v, c)
- }
-
- eq := v.Equal(c.con)
- if eq {
- return true, nil
- }
-
- return false, fmt.Errorf("%s is not equal to %s", v, c.orig)
-}
-
-// ^* --> (any)
-// ^1.2.3 --> >=1.2.3 <2.0.0
-// ^1.2 --> >=1.2.0 <2.0.0
-// ^1 --> >=1.0.0 <2.0.0
-// ^0.2.3 --> >=0.2.3 <0.3.0
-// ^0.2 --> >=0.2.0 <0.3.0
-// ^0.0.3 --> >=0.0.3 <0.0.4
-// ^0.0 --> >=0.0.0 <0.1.0
-// ^0 --> >=0.0.0 <1.0.0
-func constraintCaret(v *Version, c *constraint) (bool, error) {
- // If there is a pre-release on the version but the constraint isn't looking
- // for them assume that pre-releases are not compatible. See issue 21 for
- // more details.
- if v.Prerelease() != "" && c.con.Prerelease() == "" {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- // This less than handles prereleases
- if v.LessThan(c.con) {
- return false, fmt.Errorf("%s is less than %s", v, c.orig)
- }
-
- var eq bool
-
- // ^ when the major > 0 is >=x.y.z < x+1
- if c.con.Major() > 0 || c.minorDirty {
-
- // ^ has to be within a major range for > 0. Everything less than was
- // filtered out with the LessThan call above. This filters out those
- // that greater but not within the same major range.
- eq = v.Major() == c.con.Major()
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
- }
-
- // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1
- if c.con.Major() == 0 && v.Major() > 0 {
- return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
- }
- // If the con Minor is > 0 it is not dirty
- if c.con.Minor() > 0 || c.patchDirty {
- eq = v.Minor() == c.con.Minor()
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig)
- }
-
- // At this point the major is 0 and the minor is 0 and not dirty. The patch
- // is not dirty so we need to check if they are equal. If they are not equal
- eq = c.con.Patch() == v.Patch()
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig)
-}
-
-func isX(x string) bool {
- switch x {
- case "x", "*", "X":
- return true
- default:
- return false
- }
-}
-
-func rewriteRange(i string) string {
- m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
- if m == nil {
- return i
- }
- o := i
- for _, v := range m {
- t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
- o = strings.Replace(o, v[0], t, 1)
- }
-
- return o
-}
diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go
deleted file mode 100644
index 391aa46b..00000000
--- a/vendor/github.com/Masterminds/semver/v3/doc.go
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
-Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go.
-
-Specifically it provides the ability to:
-
- * Parse semantic versions
- * Sort semantic versions
- * Check if a semantic version fits within a set of constraints
- * Optionally work with a `v` prefix
-
-Parsing Semantic Versions
-
-There are two functions that can parse semantic versions. The `StrictNewVersion`
-function only parses valid version 2 semantic versions as outlined in the
-specification. The `NewVersion` function attempts to coerce a version into a
-semantic version and parse it. For example, if there is a leading v or a version
-listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid
-semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
-that can be sorted, compared, and used in constraints.
-
-When parsing a version an optional error can be returned if there is an issue
-parsing the version. For example,
-
- v, err := semver.NewVersion("1.2.3-beta.1+b345")
-
-The version object has methods to get the parts of the version, compare it to
-other versions, convert the version back into a string, and get the original
-string. For more details please see the documentation
-at https://godoc.org/github.com/Masterminds/semver.
-
-Sorting Semantic Versions
-
-A set of versions can be sorted using the `sort` package from the standard library.
-For example,
-
- raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
- vs := make([]*semver.Version, len(raw))
- for i, r := range raw {
- v, err := semver.NewVersion(r)
- if err != nil {
- t.Errorf("Error parsing version: %s", err)
- }
-
- vs[i] = v
- }
-
- sort.Sort(semver.Collection(vs))
-
-Checking Version Constraints and Comparing Versions
-
-There are two methods for comparing versions. One uses comparison methods on
-`Version` instances and the other is using Constraints. There are some important
-differences to notes between these two methods of comparison.
-
-1. When two versions are compared using functions such as `Compare`, `LessThan`,
- and others it will follow the specification and always include prereleases
- within the comparison. It will provide an answer valid with the comparison
- spec section at https://semver.org/#spec-item-11
-2. When constraint checking is used for checks or validation it will follow a
- different set of rules that are common for ranges with tools like npm/js
- and Rust/Cargo. This includes considering prereleases to be invalid if the
- ranges does not include on. If you want to have it include pre-releases a
- simple solution is to include `-0` in your range.
-3. Constraint ranges can have some complex rules including the shorthard use of
- ~ and ^. For more details on those see the options below.
-
-There are differences between the two methods or checking versions because the
-comparison methods on `Version` follow the specification while comparison ranges
-are not part of the specification. Different packages and tools have taken it
-upon themselves to come up with range rules. This has resulted in differences.
-For example, npm/js and Cargo/Rust follow similar patterns which PHP has a
-different pattern for ^. The comparison features in this package follow the
-npm/js and Cargo/Rust lead because applications using it have followed similar
-patters with their versions.
-
-Checking a version against version constraints is one of the most featureful
-parts of the package.
-
- c, err := semver.NewConstraint(">= 1.2.3")
- if err != nil {
- // Handle constraint not being parsable.
- }
-
- v, err := semver.NewVersion("1.3")
- if err != nil {
- // Handle version not being parsable.
- }
- // Check if the version meets the constraints. The a variable will be true.
- a := c.Check(v)
-
-Basic Comparisons
-
-There are two elements to the comparisons. First, a comparison string is a list
-of comma or space separated AND comparisons. These are then separated by || (OR)
-comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
-comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
-greater than or equal to 4.2.3. This can also be written as
-`">= 1.2, < 3.0.0 || >= 4.2.3"`
-
-The basic comparisons are:
-
- * `=`: equal (aliased to no operator)
- * `!=`: not equal
- * `>`: greater than
- * `<`: less than
- * `>=`: greater than or equal to
- * `<=`: less than or equal to
-
-Hyphen Range Comparisons
-
-There are multiple methods to handle ranges and the first is hyphens ranges.
-These look like:
-
- * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
- * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
-
-Wildcards In Comparisons
-
-The `x`, `X`, and `*` characters can be used as a wildcard character. This works
-for all comparison operators. When used on the `=` operator it falls
-back to the tilde operation. For example,
-
- * `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
- * `>= 1.2.x` is equivalent to `>= 1.2.0`
- * `<= 2.x` is equivalent to `<= 3`
- * `*` is equivalent to `>= 0.0.0`
-
-Tilde Range Comparisons (Patch)
-
-The tilde (`~`) comparison operator is for patch level ranges when a minor
-version is specified and major level changes when the minor number is missing.
-For example,
-
- * `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0`
- * `~1` is equivalent to `>= 1, < 2`
- * `~2.3` is equivalent to `>= 2.3 < 2.4`
- * `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
- * `~1.x` is equivalent to `>= 1 < 2`
-
-Caret Range Comparisons (Major)
-
-The caret (`^`) comparison operator is for major level changes once a stable
-(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
-as the API stability level. This is useful when comparisons of API versions as a
-major change is API breaking. For example,
-
- * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
- * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
- * `^2.3` is equivalent to `>= 2.3, < 3`
- * `^2.x` is equivalent to `>= 2.0.0, < 3`
- * `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
- * `^0.2` is equivalent to `>=0.2.0 <0.3.0`
- * `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
- * `^0.0` is equivalent to `>=0.0.0 <0.1.0`
- * `^0` is equivalent to `>=0.0.0 <1.0.0`
-
-Validation
-
-In addition to testing a version against a constraint, a version can be validated
-against a constraint. When validation fails a slice of errors containing why a
-version didn't meet the constraint is returned. For example,
-
- c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
- if err != nil {
- // Handle constraint not being parseable.
- }
-
- v, _ := semver.NewVersion("1.3")
- if err != nil {
- // Handle version not being parseable.
- }
-
- // Validate a version against a constraint.
- a, msgs := c.Validate(v)
- // a is false
- for _, m := range msgs {
- fmt.Println(m)
-
- // Loops over the errors which would read
- // "1.3 is greater than 1.2.3"
- // "1.3 is less than 1.4"
- }
-*/
-package semver
diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go
deleted file mode 100644
index a242ad70..00000000
--- a/vendor/github.com/Masterminds/semver/v3/fuzz.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build gofuzz
-
-package semver
-
-func Fuzz(data []byte) int {
- d := string(data)
-
- // Test NewVersion
- _, _ = NewVersion(d)
-
- // Test StrictNewVersion
- _, _ = StrictNewVersion(d)
-
- // Test NewConstraint
- _, _ = NewConstraint(d)
-
- // The return value should be 0 normally, 1 if the priority in future tests
- // should be increased, and -1 if future tests should skip passing in that
- // data. We do not have a reason to change priority so 0 is always returned.
- // There are example tests that do this.
- return 0
-}
diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go
deleted file mode 100644
index d6b9cda3..00000000
--- a/vendor/github.com/Masterminds/semver/v3/version.go
+++ /dev/null
@@ -1,606 +0,0 @@
-package semver
-
-import (
- "bytes"
- "database/sql/driver"
- "encoding/json"
- "errors"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-// The compiled version of the regex created at init() is cached here so it
-// only needs to be created once.
-var versionRegex *regexp.Regexp
-
-var (
- // ErrInvalidSemVer is returned a version is found to be invalid when
- // being parsed.
- ErrInvalidSemVer = errors.New("Invalid Semantic Version")
-
- // ErrEmptyString is returned when an empty string is passed in for parsing.
- ErrEmptyString = errors.New("Version string empty")
-
- // ErrInvalidCharacters is returned when invalid characters are found as
- // part of a version
- ErrInvalidCharacters = errors.New("Invalid characters in version")
-
- // ErrSegmentStartsZero is returned when a version segment starts with 0.
- // This is invalid in SemVer.
- ErrSegmentStartsZero = errors.New("Version segment starts with 0")
-
- // ErrInvalidMetadata is returned when the metadata is an invalid format
- ErrInvalidMetadata = errors.New("Invalid Metadata string")
-
- // ErrInvalidPrerelease is returned when the pre-release is an invalid format
- ErrInvalidPrerelease = errors.New("Invalid Prerelease string")
-)
-
-// semVerRegex is the regular expression used to parse a semantic version.
-const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
- `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
- `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
-
-// Version represents a single semantic version.
-type Version struct {
- major, minor, patch uint64
- pre string
- metadata string
- original string
-}
-
-func init() {
- versionRegex = regexp.MustCompile("^" + semVerRegex + "$")
-}
-
-const num string = "0123456789"
-const allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num
-
-// StrictNewVersion parses a given version and returns an instance of Version or
-// an error if unable to parse the version. Only parses valid semantic versions.
-// Performs checking that can find errors within the version.
-// If you want to coerce a version, such as 1 or 1.2, and perse that as the 1.x
-// releases of semver provided use the NewSemver() function.
-func StrictNewVersion(v string) (*Version, error) {
- // Parsing here does not use RegEx in order to increase performance and reduce
- // allocations.
-
- if len(v) == 0 {
- return nil, ErrEmptyString
- }
-
- // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build
- parts := strings.SplitN(v, ".", 3)
- if len(parts) != 3 {
- return nil, ErrInvalidSemVer
- }
-
- sv := &Version{
- original: v,
- }
-
- // check for prerelease or build metadata
- var extra []string
- if strings.ContainsAny(parts[2], "-+") {
- // Start with the build metadata first as it needs to be on the right
- extra = strings.SplitN(parts[2], "+", 2)
- if len(extra) > 1 {
- // build metadata found
- sv.metadata = extra[1]
- parts[2] = extra[0]
- }
-
- extra = strings.SplitN(parts[2], "-", 2)
- if len(extra) > 1 {
- // prerelease found
- sv.pre = extra[1]
- parts[2] = extra[0]
- }
- }
-
- // Validate the number segments are valid. This includes only having positive
- // numbers and no leading 0's.
- for _, p := range parts {
- if !containsOnly(p, num) {
- return nil, ErrInvalidCharacters
- }
-
- if len(p) > 1 && p[0] == '0' {
- return nil, ErrSegmentStartsZero
- }
- }
-
- // Extract the major, minor, and patch elements onto the returned Version
- var err error
- sv.major, err = strconv.ParseUint(parts[0], 10, 64)
- if err != nil {
- return nil, err
- }
-
- sv.minor, err = strconv.ParseUint(parts[1], 10, 64)
- if err != nil {
- return nil, err
- }
-
- sv.patch, err = strconv.ParseUint(parts[2], 10, 64)
- if err != nil {
- return nil, err
- }
-
- // No prerelease or build metadata found so returning now as a fastpath.
- if sv.pre == "" && sv.metadata == "" {
- return sv, nil
- }
-
- if sv.pre != "" {
- if err = validatePrerelease(sv.pre); err != nil {
- return nil, err
- }
- }
-
- if sv.metadata != "" {
- if err = validateMetadata(sv.metadata); err != nil {
- return nil, err
- }
- }
-
- return sv, nil
-}
-
-// NewVersion parses a given version and returns an instance of Version or
-// an error if unable to parse the version. If the version is SemVer-ish it
-// attempts to convert it to SemVer. If you want to validate it was a strict
-// semantic version at parse time see StrictNewVersion().
-func NewVersion(v string) (*Version, error) {
- m := versionRegex.FindStringSubmatch(v)
- if m == nil {
- return nil, ErrInvalidSemVer
- }
-
- sv := &Version{
- metadata: m[8],
- pre: m[5],
- original: v,
- }
-
- var err error
- sv.major, err = strconv.ParseUint(m[1], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("Error parsing version segment: %s", err)
- }
-
- if m[2] != "" {
- sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
- if err != nil {
- return nil, fmt.Errorf("Error parsing version segment: %s", err)
- }
- } else {
- sv.minor = 0
- }
-
- if m[3] != "" {
- sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
- if err != nil {
- return nil, fmt.Errorf("Error parsing version segment: %s", err)
- }
- } else {
- sv.patch = 0
- }
-
- // Perform some basic due diligence on the extra parts to ensure they are
- // valid.
-
- if sv.pre != "" {
- if err = validatePrerelease(sv.pre); err != nil {
- return nil, err
- }
- }
-
- if sv.metadata != "" {
- if err = validateMetadata(sv.metadata); err != nil {
- return nil, err
- }
- }
-
- return sv, nil
-}
-
-// MustParse parses a given version and panics on error.
-func MustParse(v string) *Version {
- sv, err := NewVersion(v)
- if err != nil {
- panic(err)
- }
- return sv
-}
-
-// String converts a Version object to a string.
-// Note, if the original version contained a leading v this version will not.
-// See the Original() method to retrieve the original value. Semantic Versions
-// don't contain a leading v per the spec. Instead it's optional on
-// implementation.
-func (v Version) String() string {
- var buf bytes.Buffer
-
- fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch)
- if v.pre != "" {
- fmt.Fprintf(&buf, "-%s", v.pre)
- }
- if v.metadata != "" {
- fmt.Fprintf(&buf, "+%s", v.metadata)
- }
-
- return buf.String()
-}
-
-// Original returns the original value passed in to be parsed.
-func (v *Version) Original() string {
- return v.original
-}
-
-// Major returns the major version.
-func (v Version) Major() uint64 {
- return v.major
-}
-
-// Minor returns the minor version.
-func (v Version) Minor() uint64 {
- return v.minor
-}
-
-// Patch returns the patch version.
-func (v Version) Patch() uint64 {
- return v.patch
-}
-
-// Prerelease returns the pre-release version.
-func (v Version) Prerelease() string {
- return v.pre
-}
-
-// Metadata returns the metadata on the version.
-func (v Version) Metadata() string {
- return v.metadata
-}
-
-// originalVPrefix returns the original 'v' prefix if any.
-func (v Version) originalVPrefix() string {
-
- // Note, only lowercase v is supported as a prefix by the parser.
- if v.original != "" && v.original[:1] == "v" {
- return v.original[:1]
- }
- return ""
-}
-
-// IncPatch produces the next patch version.
-// If the current version does not have prerelease/metadata information,
-// it unsets metadata and prerelease values, increments patch number.
-// If the current version has any of prerelease or metadata information,
-// it unsets both values and keeps current patch value
-func (v Version) IncPatch() Version {
- vNext := v
- // according to http://semver.org/#spec-item-9
- // Pre-release versions have a lower precedence than the associated normal version.
- // according to http://semver.org/#spec-item-10
- // Build metadata SHOULD be ignored when determining version precedence.
- if v.pre != "" {
- vNext.metadata = ""
- vNext.pre = ""
- } else {
- vNext.metadata = ""
- vNext.pre = ""
- vNext.patch = v.patch + 1
- }
- vNext.original = v.originalVPrefix() + "" + vNext.String()
- return vNext
-}
-
-// IncMinor produces the next minor version.
-// Sets patch to 0.
-// Increments minor number.
-// Unsets metadata.
-// Unsets prerelease status.
-func (v Version) IncMinor() Version {
- vNext := v
- vNext.metadata = ""
- vNext.pre = ""
- vNext.patch = 0
- vNext.minor = v.minor + 1
- vNext.original = v.originalVPrefix() + "" + vNext.String()
- return vNext
-}
-
-// IncMajor produces the next major version.
-// Sets patch to 0.
-// Sets minor to 0.
-// Increments major number.
-// Unsets metadata.
-// Unsets prerelease status.
-func (v Version) IncMajor() Version {
- vNext := v
- vNext.metadata = ""
- vNext.pre = ""
- vNext.patch = 0
- vNext.minor = 0
- vNext.major = v.major + 1
- vNext.original = v.originalVPrefix() + "" + vNext.String()
- return vNext
-}
-
-// SetPrerelease defines the prerelease value.
-// Value must not include the required 'hyphen' prefix.
-func (v Version) SetPrerelease(prerelease string) (Version, error) {
- vNext := v
- if len(prerelease) > 0 {
- if err := validatePrerelease(prerelease); err != nil {
- return vNext, err
- }
- }
- vNext.pre = prerelease
- vNext.original = v.originalVPrefix() + "" + vNext.String()
- return vNext, nil
-}
-
-// SetMetadata defines metadata value.
-// Value must not include the required 'plus' prefix.
-func (v Version) SetMetadata(metadata string) (Version, error) {
- vNext := v
- if len(metadata) > 0 {
- if err := validateMetadata(metadata); err != nil {
- return vNext, err
- }
- }
- vNext.metadata = metadata
- vNext.original = v.originalVPrefix() + "" + vNext.String()
- return vNext, nil
-}
-
-// LessThan tests if one version is less than another one.
-func (v *Version) LessThan(o *Version) bool {
- return v.Compare(o) < 0
-}
-
-// GreaterThan tests if one version is greater than another one.
-func (v *Version) GreaterThan(o *Version) bool {
- return v.Compare(o) > 0
-}
-
-// Equal tests if two versions are equal to each other.
-// Note, versions can be equal with different metadata since metadata
-// is not considered part of the comparable version.
-func (v *Version) Equal(o *Version) bool {
- return v.Compare(o) == 0
-}
-
-// Compare compares this version to another one. It returns -1, 0, or 1 if
-// the version smaller, equal, or larger than the other version.
-//
-// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is
-// lower than the version without a prerelease. Compare always takes into account
-// prereleases. If you want to work with ranges using typical range syntaxes that
-// skip prereleases if the range is not looking for them use constraints.
-func (v *Version) Compare(o *Version) int {
- // Compare the major, minor, and patch version for differences. If a
- // difference is found return the comparison.
- if d := compareSegment(v.Major(), o.Major()); d != 0 {
- return d
- }
- if d := compareSegment(v.Minor(), o.Minor()); d != 0 {
- return d
- }
- if d := compareSegment(v.Patch(), o.Patch()); d != 0 {
- return d
- }
-
- // At this point the major, minor, and patch versions are the same.
- ps := v.pre
- po := o.Prerelease()
-
- if ps == "" && po == "" {
- return 0
- }
- if ps == "" {
- return 1
- }
- if po == "" {
- return -1
- }
-
- return comparePrerelease(ps, po)
-}
-
-// UnmarshalJSON implements JSON.Unmarshaler interface.
-func (v *Version) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- temp, err := NewVersion(s)
- if err != nil {
- return err
- }
- v.major = temp.major
- v.minor = temp.minor
- v.patch = temp.patch
- v.pre = temp.pre
- v.metadata = temp.metadata
- v.original = temp.original
- return nil
-}
-
-// MarshalJSON implements JSON.Marshaler interface.
-func (v Version) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// Scan implements the SQL.Scanner interface.
-func (v *Version) Scan(value interface{}) error {
- var s string
- s, _ = value.(string)
- temp, err := NewVersion(s)
- if err != nil {
- return err
- }
- v.major = temp.major
- v.minor = temp.minor
- v.patch = temp.patch
- v.pre = temp.pre
- v.metadata = temp.metadata
- v.original = temp.original
- return nil
-}
-
-// Value implements the Driver.Valuer interface.
-func (v Version) Value() (driver.Value, error) {
- return v.String(), nil
-}
-
-func compareSegment(v, o uint64) int {
- if v < o {
- return -1
- }
- if v > o {
- return 1
- }
-
- return 0
-}
-
-func comparePrerelease(v, o string) int {
-
- // split the prelease versions by their part. The separator, per the spec,
- // is a .
- sparts := strings.Split(v, ".")
- oparts := strings.Split(o, ".")
-
- // Find the longer length of the parts to know how many loop iterations to
- // go through.
- slen := len(sparts)
- olen := len(oparts)
-
- l := slen
- if olen > slen {
- l = olen
- }
-
- // Iterate over each part of the prereleases to compare the differences.
- for i := 0; i < l; i++ {
- // Since the lentgh of the parts can be different we need to create
- // a placeholder. This is to avoid out of bounds issues.
- stemp := ""
- if i < slen {
- stemp = sparts[i]
- }
-
- otemp := ""
- if i < olen {
- otemp = oparts[i]
- }
-
- d := comparePrePart(stemp, otemp)
- if d != 0 {
- return d
- }
- }
-
- // Reaching here means two versions are of equal value but have different
- // metadata (the part following a +). They are not identical in string form
- // but the version comparison finds them to be equal.
- return 0
-}
-
-func comparePrePart(s, o string) int {
- // Fastpath if they are equal
- if s == o {
- return 0
- }
-
- // When s or o are empty we can use the other in an attempt to determine
- // the response.
- if s == "" {
- if o != "" {
- return -1
- }
- return 1
- }
-
- if o == "" {
- if s != "" {
- return 1
- }
- return -1
- }
-
- // When comparing strings "99" is greater than "103". To handle
- // cases like this we need to detect numbers and compare them. According
- // to the semver spec, numbers are always positive. If there is a - at the
- // start like -99 this is to be evaluated as an alphanum. numbers always
- // have precedence over alphanum. Parsing as Uints because negative numbers
- // are ignored.
-
- oi, n1 := strconv.ParseUint(o, 10, 64)
- si, n2 := strconv.ParseUint(s, 10, 64)
-
- // The case where both are strings compare the strings
- if n1 != nil && n2 != nil {
- if s > o {
- return 1
- }
- return -1
- } else if n1 != nil {
- // o is a string and s is a number
- return -1
- } else if n2 != nil {
- // s is a string and o is a number
- return 1
- }
- // Both are numbers
- if si > oi {
- return 1
- }
- return -1
-
-}
-
-// Like strings.ContainsAny but does an only instead of any.
-func containsOnly(s string, comp string) bool {
- return strings.IndexFunc(s, func(r rune) bool {
- return !strings.ContainsRune(comp, r)
- }) == -1
-}
-
-// From the spec, "Identifiers MUST comprise only
-// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty.
-// Numeric identifiers MUST NOT include leading zeroes.". These segments can
-// be dot separated.
-func validatePrerelease(p string) error {
- eparts := strings.Split(p, ".")
- for _, p := range eparts {
- if containsOnly(p, num) {
- if len(p) > 1 && p[0] == '0' {
- return ErrSegmentStartsZero
- }
- } else if !containsOnly(p, allowed) {
- return ErrInvalidPrerelease
- }
- }
-
- return nil
-}
-
-// From the spec, "Build metadata MAY be denoted by
-// appending a plus sign and a series of dot separated identifiers immediately
-// following the patch or pre-release version. Identifiers MUST comprise only
-// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty."
-func validateMetadata(m string) error {
- eparts := strings.Split(m, ".")
- for _, p := range eparts {
- if !containsOnly(p, allowed) {
- return ErrInvalidMetadata
- }
- }
- return nil
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore
deleted file mode 100644
index 5e3002f8..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-vendor/
-/.glide
diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md
deleted file mode 100644
index fcdd4e88..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md
+++ /dev/null
@@ -1,370 +0,0 @@
-# Changelog
-
-## Release 3.2.1 (2021-02-04)
-
-### Changed
-
-- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr)
-
-## Release 3.2.0 (2020-12-14)
-
-### Added
-
-- #211: Added randInt function (thanks @kochurovro)
-- #223: Added fromJson and mustFromJson functions (thanks @mholt)
-- #242: Added a bcrypt function (thanks @robbiet480)
-- #253: Added randBytes function (thanks @MikaelSmith)
-- #254: Added dig function for dicts (thanks @nyarly)
-- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton)
-- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl)
-- #268: Added and and all functions for testing conditions (thanks @phuslu)
-- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf
- (thanks @andrewmostello)
-- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek)
-- #270: Extend certificate functions to handle non-RSA keys + add support for
- ed25519 keys (thanks @misberner)
-
-### Changed
-
-- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer
-- Using semver 3.1.1 and mergo 0.3.11
-
-### Fixed
-
-- #249: Fix htmlDateInZone example (thanks @spawnia)
-
-NOTE: The dependency github.com/imdario/mergo reverted the breaking change in
-0.3.9 via 0.3.10 release.
-
-## Release 3.1.0 (2020-04-16)
-
-NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9
-that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8.
-
-### Added
-
-- #225: Added support for generating htpasswd hash (thanks @rustycl0ck)
-- #224: Added duration filter (thanks @frebib)
-- #205: Added `seq` function (thanks @thadc23)
-
-### Changed
-
-- #203: Unlambda functions with correct signature (thanks @muesli)
-- #236: Updated the license formatting for GitHub display purposes
-- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9
- as it causes a breaking change for sprig. That issue is tracked at
- https://github.com/imdario/mergo/issues/139
-
-### Fixed
-
-- #229: Fix `seq` example in docs (thanks @kalmant)
-
-## Release 3.0.2 (2019-12-13)
-
-### Fixed
-
-- #220: Updating to semver v3.0.3 to fix issue with <= ranges
-- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya)
-
-## Release 3.0.1 (2019-12-08)
-
-### Fixed
-
-- #212: Updated semver fixing broken constraint checking with ^0.0
-
-## Release 3.0.0 (2019-10-02)
-
-### Added
-
-- #187: Added durationRound function (thanks @yjp20)
-- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn)
-- #193: Added toRawJson support (thanks @Dean-Coakley)
-- #197: Added get support to dicts (thanks @Dean-Coakley)
-
-### Changed
-
-- #186: Moving dependency management to Go modules
-- #186: Updated semver to v3. This has changes in the way ^ is handled
-- #194: Updated documentation on merging and how it copies. Added example using deepCopy
-- #196: trunc now supports negative values (thanks @Dean-Coakley)
-
-## Release 2.22.0 (2019-10-02)
-
-### Added
-
-- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos)
-- #195: Added deepCopy function for use with dicts
-
-### Changed
-
-- Updated merge and mergeOverwrite documentation to explain copying and how to
- use deepCopy with it
-
-## Release 2.21.0 (2019-09-18)
-
-### Added
-
-- #122: Added encryptAES/decryptAES functions (thanks @n0madic)
-- #128: Added toDecimal support (thanks @Dean-Coakley)
-- #169: Added list contcat (thanks @astorath)
-- #174: Added deepEqual function (thanks @bonifaido)
-- #170: Added url parse and join functions (thanks @astorath)
-
-### Changed
-
-- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify
-
-### Fixed
-
-- #172: Fix semver wildcard example (thanks @piepmatz)
-- #175: Fix dateInZone doc example (thanks @s3than)
-
-## Release 2.20.0 (2019-06-18)
-
-### Added
-
-- #164: Adding function to get unix epoch for a time (@mattfarina)
-- #166: Adding tests for date_in_zone (@mattfarina)
-
-### Changed
-
-- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam)
-- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19)
-- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan)
-
-### Fixed
-
-## Release 2.19.0 (2019-03-02)
-
-IMPORTANT: This release reverts a change from 2.18.0
-
-In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random.
-
-We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience.
-
-### Changed
-
-- Fix substr panic 35fb796 (Alexey igrychev)
-- Remove extra period 1eb7729 (Matthew Lorimor)
-- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor)
-- README edits/fixes/suggestions 08fe136 (Lauri Apple)
-
-
-## Release 2.18.0 (2019-02-12)
-
-### Added
-
-- Added mergeOverwrite function
-- cryptographic functions that use secure random (see fe1de12)
-
-### Changed
-
-- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer)
-- Handle has for nil list 9c10885 (Daniel Cohen)
-- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder)
-- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic)
-- Replace outdated goutils imports 01893d2 (Matthew Lorimor)
-- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor)
-- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen)
-
-### Fixed
-
-- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder)
-- Fix substr var names and comments d581f80 (Dean Coakley)
-- Fix substr documentation 2737203 (Dean Coakley)
-
-## Release 2.17.1 (2019-01-03)
-
-### Fixed
-
-The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml.
-
-## Release 2.17.0 (2019-01-03)
-
-### Added
-
-- adds alder32sum function and test 6908fc2 (marshallford)
-- Added kebabcase function ca331a1 (Ilyes512)
-
-### Changed
-
-- Update goutils to 1.1.0 4e1125d (Matt Butcher)
-
-### Fixed
-
-- Fix 'has' documentation e3f2a85 (dean-coakley)
-- docs(dict): fix typo in pick example dc424f9 (Dustin Specker)
-- fixes spelling errors... not sure how that happened 4cf188a (marshallford)
-
-## Release 2.16.0 (2018-08-13)
-
-### Added
-
-- add splitn function fccb0b0 (Helgi Þorbjörnsson)
-- Add slice func df28ca7 (gongdo)
-- Generate serial number a3bdffd (Cody Coons)
-- Extract values of dict with values function df39312 (Lawrence Jones)
-
-### Changed
-
-- Modify panic message for list.slice ae38335 (gongdo)
-- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap)
-- Remove duplicated documentation 1d97af1 (Matthew Fisher)
-- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson)
-
-### Fixed
-
-- Fix file permissions c5f40b5 (gongdo)
-- Fix example for buildCustomCert 7779e0d (Tin Lam)
-
-## Release 2.15.0 (2018-04-02)
-
-### Added
-
-- #68 and #69: Add json helpers to docs (thanks @arunvelsriram)
-- #66: Add ternary function (thanks @binoculars)
-- #67: Allow keys function to take multiple dicts (thanks @binoculars)
-- #89: Added sha1sum to crypto function (thanks @benkeil)
-- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei)
-- #92: Add travis testing for go 1.10
-- #93: Adding appveyor config for windows testing
-
-### Changed
-
-- #90: Updating to more recent dependencies
-- #73: replace satori/go.uuid with google/uuid (thanks @petterw)
-
-### Fixed
-
-- #76: Fixed documentation typos (thanks @Thiht)
-- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older
-
-## Release 2.14.1 (2017-12-01)
-
-### Fixed
-
-- #60: Fix typo in function name documentation (thanks @neil-ca-moore)
-- #61: Removing line with {{ due to blocking github pages genertion
-- #64: Update the list functions to handle int, string, and other slices for compatibility
-
-## Release 2.14.0 (2017-10-06)
-
-This new version of Sprig adds a set of functions for generating and working with SSL certificates.
-
-- `genCA` generates an SSL Certificate Authority
-- `genSelfSignedCert` generates an SSL self-signed certificate
-- `genSignedCert` generates an SSL certificate and key based on a given CA
-
-## Release 2.13.0 (2017-09-18)
-
-This release adds new functions, including:
-
-- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions
-- `floor`, `ceil`, and `round` math functions
-- `toDate` converts a string to a date
-- `nindent` is just like `indent` but also prepends a new line
-- `ago` returns the time from `time.Now`
-
-### Added
-
-- #40: Added basic regex functionality (thanks @alanquillin)
-- #41: Added ceil floor and round functions (thanks @alanquillin)
-- #48: Added toDate function (thanks @andreynering)
-- #50: Added nindent function (thanks @binoculars)
-- #46: Added ago function (thanks @slayer)
-
-### Changed
-
-- #51: Updated godocs to include new string functions (thanks @curtisallen)
-- #49: Added ability to merge multiple dicts (thanks @binoculars)
-
-## Release 2.12.0 (2017-05-17)
-
-- `snakecase`, `camelcase`, and `shuffle` are three new string functions
-- `fail` allows you to bail out of a template render when conditions are not met
-
-## Release 2.11.0 (2017-05-02)
-
-- Added `toJson` and `toPrettyJson`
-- Added `merge`
-- Refactored documentation
-
-## Release 2.10.0 (2017-03-15)
-
-- Added `semver` and `semverCompare` for Semantic Versions
-- `list` replaces `tuple`
-- Fixed issue with `join`
-- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
-
-## Release 2.9.0 (2017-02-23)
-
-- Added `splitList` to split a list
-- Added crypto functions of `genPrivateKey` and `derivePassword`
-
-## Release 2.8.0 (2016-12-21)
-
-- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`)
-- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`)
-
-## Release 2.7.0 (2016-12-01)
-
-- Added `sha256sum` to generate a hash of an input
-- Added functions to convert a numeric or string to `int`, `int64`, `float64`
-
-## Release 2.6.0 (2016-10-03)
-
-- Added a `uuidv4` template function for generating UUIDs inside of a template.
-
-## Release 2.5.0 (2016-08-19)
-
-- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions
-- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`)
-- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0
-
-## Release 2.4.0 (2016-08-16)
-
-- Adds two functions: `until` and `untilStep`
-
-## Release 2.3.0 (2016-06-21)
-
-- cat: Concatenate strings with whitespace separators.
-- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First"
-- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos"
-- indent: Indent blocks of text in a way that is sensitive to "\n" characters.
-
-## Release 2.2.0 (2016-04-21)
-
-- Added a `genPrivateKey` function (Thanks @bacongobbler)
-
-## Release 2.1.0 (2016-03-30)
-
-- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`.
-- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output.
-
-## Release 2.0.0 (2016-03-29)
-
-Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented.
-
-- `min` complements `max` (formerly `biggest`)
-- `empty` indicates that a value is the empty value for its type
-- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}`
-- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
-- Date formatters have been added for HTML dates (as used in `date` input fields)
-- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`).
-
-## Release 1.2.0 (2016-02-01)
-
-- Added quote and squote
-- Added b32enc and b32dec
-- add now takes varargs
-- biggest now takes varargs
-
-## Release 1.1.0 (2015-12-29)
-
-- Added #4: Added contains function. strings.Contains, but with the arguments
- switched to simplify common pipelines. (thanks krancour)
-- Added Travis-CI testing support
-
-## Release 1.0.0 (2015-12-23)
-
-- Initial release
diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt
deleted file mode 100644
index f311b1ea..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (C) 2013-2020 Masterminds
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile
deleted file mode 100644
index 78d409cd..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-.PHONY: test
-test:
- @echo "==> Running tests"
- GO111MODULE=on go test -v
-
-.PHONY: test-cover
-test-cover:
- @echo "==> Running Tests with coverage"
- GO111MODULE=on go test -cover .
diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md
deleted file mode 100644
index c37ba01c..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/README.md
+++ /dev/null
@@ -1,101 +0,0 @@
-# Sprig: Template functions for Go templates
-
-[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3)
-[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig)
-[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html)
-[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions)
-
-The Go language comes with a [built-in template
-language](http://golang.org/pkg/text/template/), but not
-very many template functions. Sprig is a library that provides more than 100 commonly
-used template functions.
-
-It is inspired by the template functions found in
-[Twig](http://twig.sensiolabs.org/documentation) and in various
-JavaScript libraries, such as [underscore.js](http://underscorejs.org/).
-
-## IMPORTANT NOTES
-
-Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In
-its v0.3.9 release there was a behavior change that impacts merging template
-functions in sprig. It is currently recommended to use v0.3.8 of that package.
-Using v0.3.9 will cause sprig tests to fail. The issue in mergo is tracked at
-https://github.com/imdario/mergo/issues/139.
-
-## Package Versions
-
-There are two active major versions of the `sprig` package.
-
-* v3 is currently stable release series on the `master` branch. The Go API should
- remain compatible with v2, the current stable version. Behavior change behind
- some functions is the reason for the new major version.
-* v2 is the previous stable release series. It has been more than three years since
- the initial release of v2. You can read the documentation and see the code
- on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch.
- Bug fixes to this major version will continue for some time.
-
-## Usage
-
-**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for
-detailed instructions and code snippets for the >100 template functions available.
-
-**Go developers**: If you'd like to include Sprig as a library in your program,
-our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig).
-
-For standard usage, read on.
-
-### Load the Sprig library
-
-To load the Sprig `FuncMap`:
-
-```go
-
-import (
- "github.com/Masterminds/sprig"
- "html/template"
-)
-
-// This example illustrates that the FuncMap *must* be set before the
-// templates themselves are loaded.
-tpl := template.Must(
- template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html")
-)
-
-
-```
-
-### Calling the functions inside of templates
-
-By convention, all functions are lowercase. This seems to follow the Go
-idiom for template functions (as opposed to template methods, which are
-TitleCase). For example, this:
-
-```
-{{ "hello!" | upper | repeat 5 }}
-```
-
-produces this:
-
-```
-HELLO!HELLO!HELLO!HELLO!HELLO!
-```
-
-## Principles Driving Our Function Selection
-
-We followed these principles to decide which functions to add and how to implement them:
-
-- Use template functions to build layout. The following
- types of operations are within the domain of template functions:
- - Formatting
- - Layout
- - Simple type conversions
- - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic)
-- Template functions should not return errors unless there is no way to print
- a sensible value. For example, converting a string to an integer should not
- produce an error if conversion fails. Instead, it should display a default
- value.
-- Simple math is necessary for grid layouts, pagers, and so on. Complex math
- (anything other than arithmetic) should be done outside of templates.
-- Template functions only deal with the data passed into them. They never retrieve
- data from a source.
-- Finally, do not override core Go template functions.
diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go
deleted file mode 100644
index 13a5cd55..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/crypto.go
+++ /dev/null
@@ -1,653 +0,0 @@
-package sprig
-
-import (
- "bytes"
- "crypto"
- "crypto/aes"
- "crypto/cipher"
- "crypto/dsa"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/hmac"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/asn1"
- "encoding/base64"
- "encoding/binary"
- "encoding/hex"
- "encoding/pem"
- "errors"
- "fmt"
- "hash/adler32"
- "io"
- "math/big"
- "net"
- "time"
-
- "strings"
-
- "github.com/google/uuid"
- bcrypt_lib "golang.org/x/crypto/bcrypt"
- "golang.org/x/crypto/scrypt"
-)
-
-func sha256sum(input string) string {
- hash := sha256.Sum256([]byte(input))
- return hex.EncodeToString(hash[:])
-}
-
-func sha1sum(input string) string {
- hash := sha1.Sum([]byte(input))
- return hex.EncodeToString(hash[:])
-}
-
-func adler32sum(input string) string {
- hash := adler32.Checksum([]byte(input))
- return fmt.Sprintf("%d", hash)
-}
-
-func bcrypt(input string) string {
- hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost)
- if err != nil {
- return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err)
- }
-
- return string(hash)
-}
-
-func htpasswd(username string, password string) string {
- if strings.Contains(username, ":") {
- return fmt.Sprintf("invalid username: %s", username)
- }
- return fmt.Sprintf("%s:%s", username, bcrypt(password))
-}
-
-func randBytes(count int) (string, error) {
- buf := make([]byte, count)
- if _, err := rand.Read(buf); err != nil {
- return "", err
- }
- return base64.StdEncoding.EncodeToString(buf), nil
-}
-
-// uuidv4 provides a safe and secure UUID v4 implementation
-func uuidv4() string {
- return uuid.New().String()
-}
-
-var masterPasswordSeed = "com.lyndir.masterpassword"
-
-var passwordTypeTemplates = map[string][][]byte{
- "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")},
- "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"),
- []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"),
- []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"),
- []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"),
- []byte("CvccCvcvCvccno")},
- "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")},
- "short": {[]byte("Cvcn")},
- "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")},
- "pin": {[]byte("nnnn")},
-}
-
-var templateCharacters = map[byte]string{
- 'V': "AEIOU",
- 'C': "BCDFGHJKLMNPQRSTVWXYZ",
- 'v': "aeiou",
- 'c': "bcdfghjklmnpqrstvwxyz",
- 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ",
- 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz",
- 'n': "0123456789",
- 'o': "@&%?,=[]_:-+*$#!'^~;()/.",
- 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()",
-}
-
-func derivePassword(counter uint32, passwordType, password, user, site string) string {
- var templates = passwordTypeTemplates[passwordType]
- if templates == nil {
- return fmt.Sprintf("cannot find password template %s", passwordType)
- }
-
- var buffer bytes.Buffer
- buffer.WriteString(masterPasswordSeed)
- binary.Write(&buffer, binary.BigEndian, uint32(len(user)))
- buffer.WriteString(user)
-
- salt := buffer.Bytes()
- key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64)
- if err != nil {
- return fmt.Sprintf("failed to derive password: %s", err)
- }
-
- buffer.Truncate(len(masterPasswordSeed))
- binary.Write(&buffer, binary.BigEndian, uint32(len(site)))
- buffer.WriteString(site)
- binary.Write(&buffer, binary.BigEndian, counter)
-
- var hmacv = hmac.New(sha256.New, key)
- hmacv.Write(buffer.Bytes())
- var seed = hmacv.Sum(nil)
- var temp = templates[int(seed[0])%len(templates)]
-
- buffer.Truncate(0)
- for i, element := range temp {
- passChars := templateCharacters[element]
- passChar := passChars[int(seed[i+1])%len(passChars)]
- buffer.WriteByte(passChar)
- }
-
- return buffer.String()
-}
-
-func generatePrivateKey(typ string) string {
- var priv interface{}
- var err error
- switch typ {
- case "", "rsa":
- // good enough for government work
- priv, err = rsa.GenerateKey(rand.Reader, 4096)
- case "dsa":
- key := new(dsa.PrivateKey)
- // again, good enough for government work
- if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil {
- return fmt.Sprintf("failed to generate dsa params: %s", err)
- }
- err = dsa.GenerateKey(key, rand.Reader)
- priv = key
- case "ecdsa":
- // again, good enough for government work
- priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- case "ed25519":
- _, priv, err = ed25519.GenerateKey(rand.Reader)
- default:
- return "Unknown type " + typ
- }
- if err != nil {
- return fmt.Sprintf("failed to generate private key: %s", err)
- }
-
- return string(pem.EncodeToMemory(pemBlockForKey(priv)))
-}
-
-// DSAKeyFormat stores the format for DSA keys.
-// Used by pemBlockForKey
-type DSAKeyFormat struct {
- Version int
- P, Q, G, Y, X *big.Int
-}
-
-func pemBlockForKey(priv interface{}) *pem.Block {
- switch k := priv.(type) {
- case *rsa.PrivateKey:
- return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}
- case *dsa.PrivateKey:
- val := DSAKeyFormat{
- P: k.P, Q: k.Q, G: k.G,
- Y: k.Y, X: k.X,
- }
- bytes, _ := asn1.Marshal(val)
- return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes}
- case *ecdsa.PrivateKey:
- b, _ := x509.MarshalECPrivateKey(k)
- return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
- default:
- // attempt PKCS#8 format for all other keys
- b, err := x509.MarshalPKCS8PrivateKey(k)
- if err != nil {
- return nil
- }
- return &pem.Block{Type: "PRIVATE KEY", Bytes: b}
- }
-}
-
-func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) {
- block, _ := pem.Decode([]byte(pemBlock))
- if block == nil {
- return nil, errors.New("no PEM data in input")
- }
-
- if block.Type == "PRIVATE KEY" {
- priv, err := x509.ParsePKCS8PrivateKey(block.Bytes)
- if err != nil {
- return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err)
- }
- return priv, nil
- } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") {
- return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type)
- }
-
- switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY"
- case "RSA":
- priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
- if err != nil {
- return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err)
- }
- return priv, nil
- case "EC":
- priv, err := x509.ParseECPrivateKey(block.Bytes)
- if err != nil {
- return nil, fmt.Errorf("parsing EC private key from PEM: %s", err)
- }
- return priv, nil
- case "DSA":
- var k DSAKeyFormat
- _, err := asn1.Unmarshal(block.Bytes, &k)
- if err != nil {
- return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err)
- }
- priv := &dsa.PrivateKey{
- PublicKey: dsa.PublicKey{
- Parameters: dsa.Parameters{
- P: k.P, Q: k.Q, G: k.G,
- },
- Y: k.Y,
- },
- X: k.X,
- }
- return priv, nil
- default:
- return nil, fmt.Errorf("invalid private key type %s", block.Type)
- }
-}
-
-func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) {
- switch k := priv.(type) {
- case interface{ Public() crypto.PublicKey }:
- return k.Public(), nil
- case *dsa.PrivateKey:
- return &k.PublicKey, nil
- default:
- return nil, fmt.Errorf("unable to get public key for type %T", priv)
- }
-}
-
-type certificate struct {
- Cert string
- Key string
-}
-
-func buildCustomCertificate(b64cert string, b64key string) (certificate, error) {
- crt := certificate{}
-
- cert, err := base64.StdEncoding.DecodeString(b64cert)
- if err != nil {
- return crt, errors.New("unable to decode base64 certificate")
- }
-
- key, err := base64.StdEncoding.DecodeString(b64key)
- if err != nil {
- return crt, errors.New("unable to decode base64 private key")
- }
-
- decodedCert, _ := pem.Decode(cert)
- if decodedCert == nil {
- return crt, errors.New("unable to decode certificate")
- }
- _, err = x509.ParseCertificate(decodedCert.Bytes)
- if err != nil {
- return crt, fmt.Errorf(
- "error parsing certificate: decodedCert.Bytes: %s",
- err,
- )
- }
-
- _, err = parsePrivateKeyPEM(string(key))
- if err != nil {
- return crt, fmt.Errorf(
- "error parsing private key: %s",
- err,
- )
- }
-
- crt.Cert = string(cert)
- crt.Key = string(key)
-
- return crt, nil
-}
-
-func generateCertificateAuthority(
- cn string,
- daysValid int,
-) (certificate, error) {
- priv, err := rsa.GenerateKey(rand.Reader, 2048)
- if err != nil {
- return certificate{}, fmt.Errorf("error generating rsa key: %s", err)
- }
-
- return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv)
-}
-
-func generateCertificateAuthorityWithPEMKey(
- cn string,
- daysValid int,
- privPEM string,
-) (certificate, error) {
- priv, err := parsePrivateKeyPEM(privPEM)
- if err != nil {
- return certificate{}, fmt.Errorf("parsing private key: %s", err)
- }
- return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv)
-}
-
-func generateCertificateAuthorityWithKeyInternal(
- cn string,
- daysValid int,
- priv crypto.PrivateKey,
-) (certificate, error) {
- ca := certificate{}
-
- template, err := getBaseCertTemplate(cn, nil, nil, daysValid)
- if err != nil {
- return ca, err
- }
- // Override KeyUsage and IsCA
- template.KeyUsage = x509.KeyUsageKeyEncipherment |
- x509.KeyUsageDigitalSignature |
- x509.KeyUsageCertSign
- template.IsCA = true
-
- ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv)
-
- return ca, err
-}
-
-func generateSelfSignedCertificate(
- cn string,
- ips []interface{},
- alternateDNS []interface{},
- daysValid int,
-) (certificate, error) {
- priv, err := rsa.GenerateKey(rand.Reader, 2048)
- if err != nil {
- return certificate{}, fmt.Errorf("error generating rsa key: %s", err)
- }
- return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv)
-}
-
-func generateSelfSignedCertificateWithPEMKey(
- cn string,
- ips []interface{},
- alternateDNS []interface{},
- daysValid int,
- privPEM string,
-) (certificate, error) {
- priv, err := parsePrivateKeyPEM(privPEM)
- if err != nil {
- return certificate{}, fmt.Errorf("parsing private key: %s", err)
- }
- return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv)
-}
-
-func generateSelfSignedCertificateWithKeyInternal(
- cn string,
- ips []interface{},
- alternateDNS []interface{},
- daysValid int,
- priv crypto.PrivateKey,
-) (certificate, error) {
- cert := certificate{}
-
- template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid)
- if err != nil {
- return cert, err
- }
-
- cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv)
-
- return cert, err
-}
-
-func generateSignedCertificate(
- cn string,
- ips []interface{},
- alternateDNS []interface{},
- daysValid int,
- ca certificate,
-) (certificate, error) {
- priv, err := rsa.GenerateKey(rand.Reader, 2048)
- if err != nil {
- return certificate{}, fmt.Errorf("error generating rsa key: %s", err)
- }
- return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv)
-}
-
-func generateSignedCertificateWithPEMKey(
- cn string,
- ips []interface{},
- alternateDNS []interface{},
- daysValid int,
- ca certificate,
- privPEM string,
-) (certificate, error) {
- priv, err := parsePrivateKeyPEM(privPEM)
- if err != nil {
- return certificate{}, fmt.Errorf("parsing private key: %s", err)
- }
- return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv)
-}
-
-func generateSignedCertificateWithKeyInternal(
- cn string,
- ips []interface{},
- alternateDNS []interface{},
- daysValid int,
- ca certificate,
- priv crypto.PrivateKey,
-) (certificate, error) {
- cert := certificate{}
-
- decodedSignerCert, _ := pem.Decode([]byte(ca.Cert))
- if decodedSignerCert == nil {
- return cert, errors.New("unable to decode certificate")
- }
- signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes)
- if err != nil {
- return cert, fmt.Errorf(
- "error parsing certificate: decodedSignerCert.Bytes: %s",
- err,
- )
- }
- signerKey, err := parsePrivateKeyPEM(ca.Key)
- if err != nil {
- return cert, fmt.Errorf(
- "error parsing private key: %s",
- err,
- )
- }
-
- template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid)
- if err != nil {
- return cert, err
- }
-
- cert.Cert, cert.Key, err = getCertAndKey(
- template,
- priv,
- signerCert,
- signerKey,
- )
-
- return cert, err
-}
-
-func getCertAndKey(
- template *x509.Certificate,
- signeeKey crypto.PrivateKey,
- parent *x509.Certificate,
- signingKey crypto.PrivateKey,
-) (string, string, error) {
- signeePubKey, err := getPublicKey(signeeKey)
- if err != nil {
- return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err)
- }
- derBytes, err := x509.CreateCertificate(
- rand.Reader,
- template,
- parent,
- signeePubKey,
- signingKey,
- )
- if err != nil {
- return "", "", fmt.Errorf("error creating certificate: %s", err)
- }
-
- certBuffer := bytes.Buffer{}
- if err := pem.Encode(
- &certBuffer,
- &pem.Block{Type: "CERTIFICATE", Bytes: derBytes},
- ); err != nil {
- return "", "", fmt.Errorf("error pem-encoding certificate: %s", err)
- }
-
- keyBuffer := bytes.Buffer{}
- if err := pem.Encode(
- &keyBuffer,
- pemBlockForKey(signeeKey),
- ); err != nil {
- return "", "", fmt.Errorf("error pem-encoding key: %s", err)
- }
-
- return certBuffer.String(), keyBuffer.String(), nil
-}
-
-func getBaseCertTemplate(
- cn string,
- ips []interface{},
- alternateDNS []interface{},
- daysValid int,
-) (*x509.Certificate, error) {
- ipAddresses, err := getNetIPs(ips)
- if err != nil {
- return nil, err
- }
- dnsNames, err := getAlternateDNSStrs(alternateDNS)
- if err != nil {
- return nil, err
- }
- serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128)
- serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound)
- if err != nil {
- return nil, err
- }
- return &x509.Certificate{
- SerialNumber: serialNumber,
- Subject: pkix.Name{
- CommonName: cn,
- },
- IPAddresses: ipAddresses,
- DNSNames: dnsNames,
- NotBefore: time.Now(),
- NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)),
- KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
- ExtKeyUsage: []x509.ExtKeyUsage{
- x509.ExtKeyUsageServerAuth,
- x509.ExtKeyUsageClientAuth,
- },
- BasicConstraintsValid: true,
- }, nil
-}
-
-func getNetIPs(ips []interface{}) ([]net.IP, error) {
- if ips == nil {
- return []net.IP{}, nil
- }
- var ipStr string
- var ok bool
- var netIP net.IP
- netIPs := make([]net.IP, len(ips))
- for i, ip := range ips {
- ipStr, ok = ip.(string)
- if !ok {
- return nil, fmt.Errorf("error parsing ip: %v is not a string", ip)
- }
- netIP = net.ParseIP(ipStr)
- if netIP == nil {
- return nil, fmt.Errorf("error parsing ip: %s", ipStr)
- }
- netIPs[i] = netIP
- }
- return netIPs, nil
-}
-
-func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) {
- if alternateDNS == nil {
- return []string{}, nil
- }
- var dnsStr string
- var ok bool
- alternateDNSStrs := make([]string, len(alternateDNS))
- for i, dns := range alternateDNS {
- dnsStr, ok = dns.(string)
- if !ok {
- return nil, fmt.Errorf(
- "error processing alternate dns name: %v is not a string",
- dns,
- )
- }
- alternateDNSStrs[i] = dnsStr
- }
- return alternateDNSStrs, nil
-}
-
-func encryptAES(password string, plaintext string) (string, error) {
- if plaintext == "" {
- return "", nil
- }
-
- key := make([]byte, 32)
- copy(key, []byte(password))
- block, err := aes.NewCipher(key)
- if err != nil {
- return "", err
- }
-
- content := []byte(plaintext)
- blockSize := block.BlockSize()
- padding := blockSize - len(content)%blockSize
- padtext := bytes.Repeat([]byte{byte(padding)}, padding)
- content = append(content, padtext...)
-
- ciphertext := make([]byte, aes.BlockSize+len(content))
-
- iv := ciphertext[:aes.BlockSize]
- if _, err := io.ReadFull(rand.Reader, iv); err != nil {
- return "", err
- }
-
- mode := cipher.NewCBCEncrypter(block, iv)
- mode.CryptBlocks(ciphertext[aes.BlockSize:], content)
-
- return base64.StdEncoding.EncodeToString(ciphertext), nil
-}
-
-func decryptAES(password string, crypt64 string) (string, error) {
- if crypt64 == "" {
- return "", nil
- }
-
- key := make([]byte, 32)
- copy(key, []byte(password))
-
- crypt, err := base64.StdEncoding.DecodeString(crypt64)
- if err != nil {
- return "", err
- }
-
- block, err := aes.NewCipher(key)
- if err != nil {
- return "", err
- }
-
- iv := crypt[:aes.BlockSize]
- crypt = crypt[aes.BlockSize:]
- decrypted := make([]byte, len(crypt))
- mode := cipher.NewCBCDecrypter(block, iv)
- mode.CryptBlocks(decrypted, crypt)
-
- return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go
deleted file mode 100644
index ed022dda..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/date.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package sprig
-
-import (
- "strconv"
- "time"
-)
-
-// Given a format and a date, format the date string.
-//
-// Date can be a `time.Time` or an `int, int32, int64`.
-// In the later case, it is treated as seconds since UNIX
-// epoch.
-func date(fmt string, date interface{}) string {
- return dateInZone(fmt, date, "Local")
-}
-
-func htmlDate(date interface{}) string {
- return dateInZone("2006-01-02", date, "Local")
-}
-
-func htmlDateInZone(date interface{}, zone string) string {
- return dateInZone("2006-01-02", date, zone)
-}
-
-func dateInZone(fmt string, date interface{}, zone string) string {
- var t time.Time
- switch date := date.(type) {
- default:
- t = time.Now()
- case time.Time:
- t = date
- case *time.Time:
- t = *date
- case int64:
- t = time.Unix(date, 0)
- case int:
- t = time.Unix(int64(date), 0)
- case int32:
- t = time.Unix(int64(date), 0)
- }
-
- loc, err := time.LoadLocation(zone)
- if err != nil {
- loc, _ = time.LoadLocation("UTC")
- }
-
- return t.In(loc).Format(fmt)
-}
-
-func dateModify(fmt string, date time.Time) time.Time {
- d, err := time.ParseDuration(fmt)
- if err != nil {
- return date
- }
- return date.Add(d)
-}
-
-func mustDateModify(fmt string, date time.Time) (time.Time, error) {
- d, err := time.ParseDuration(fmt)
- if err != nil {
- return time.Time{}, err
- }
- return date.Add(d), nil
-}
-
-func dateAgo(date interface{}) string {
- var t time.Time
-
- switch date := date.(type) {
- default:
- t = time.Now()
- case time.Time:
- t = date
- case int64:
- t = time.Unix(date, 0)
- case int:
- t = time.Unix(int64(date), 0)
- }
- // Drop resolution to seconds
- duration := time.Since(t).Round(time.Second)
- return duration.String()
-}
-
-func duration(sec interface{}) string {
- var n int64
- switch value := sec.(type) {
- default:
- n = 0
- case string:
- n, _ = strconv.ParseInt(value, 10, 64)
- case int64:
- n = value
- }
- return (time.Duration(n) * time.Second).String()
-}
-
-func durationRound(duration interface{}) string {
- var d time.Duration
- switch duration := duration.(type) {
- default:
- d = 0
- case string:
- d, _ = time.ParseDuration(duration)
- case int64:
- d = time.Duration(duration)
- case time.Time:
- d = time.Since(duration)
- }
-
- u := uint64(d)
- neg := d < 0
- if neg {
- u = -u
- }
-
- var (
- year = uint64(time.Hour) * 24 * 365
- month = uint64(time.Hour) * 24 * 30
- day = uint64(time.Hour) * 24
- hour = uint64(time.Hour)
- minute = uint64(time.Minute)
- second = uint64(time.Second)
- )
- switch {
- case u > year:
- return strconv.FormatUint(u/year, 10) + "y"
- case u > month:
- return strconv.FormatUint(u/month, 10) + "mo"
- case u > day:
- return strconv.FormatUint(u/day, 10) + "d"
- case u > hour:
- return strconv.FormatUint(u/hour, 10) + "h"
- case u > minute:
- return strconv.FormatUint(u/minute, 10) + "m"
- case u > second:
- return strconv.FormatUint(u/second, 10) + "s"
- }
- return "0s"
-}
-
-func toDate(fmt, str string) time.Time {
- t, _ := time.ParseInLocation(fmt, str, time.Local)
- return t
-}
-
-func mustToDate(fmt, str string) (time.Time, error) {
- return time.ParseInLocation(fmt, str, time.Local)
-}
-
-func unixEpoch(date time.Time) string {
- return strconv.FormatInt(date.Unix(), 10)
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go
deleted file mode 100644
index b9f97966..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/defaults.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package sprig
-
-import (
- "bytes"
- "encoding/json"
- "math/rand"
- "reflect"
- "strings"
- "time"
-)
-
-func init() {
- rand.Seed(time.Now().UnixNano())
-}
-
-// dfault checks whether `given` is set, and returns default if not set.
-//
-// This returns `d` if `given` appears not to be set, and `given` otherwise.
-//
-// For numeric types 0 is unset.
-// For strings, maps, arrays, and slices, len() = 0 is considered unset.
-// For bool, false is unset.
-// Structs are never considered unset.
-//
-// For everything else, including pointers, a nil value is unset.
-func dfault(d interface{}, given ...interface{}) interface{} {
-
- if empty(given) || empty(given[0]) {
- return d
- }
- return given[0]
-}
-
-// empty returns true if the given value has the zero value for its type.
-func empty(given interface{}) bool {
- g := reflect.ValueOf(given)
- if !g.IsValid() {
- return true
- }
-
- // Basically adapted from text/template.isTrue
- switch g.Kind() {
- default:
- return g.IsNil()
- case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
- return g.Len() == 0
- case reflect.Bool:
- return !g.Bool()
- case reflect.Complex64, reflect.Complex128:
- return g.Complex() == 0
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return g.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return g.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return g.Float() == 0
- case reflect.Struct:
- return false
- }
-}
-
-// coalesce returns the first non-empty value.
-func coalesce(v ...interface{}) interface{} {
- for _, val := range v {
- if !empty(val) {
- return val
- }
- }
- return nil
-}
-
-// all returns true if empty(x) is false for all values x in the list.
-// If the list is empty, return true.
-func all(v ...interface{}) bool {
- for _, val := range v {
- if empty(val) {
- return false
- }
- }
- return true
-}
-
-// any returns true if empty(x) is false for any x in the list.
-// If the list is empty, return false.
-func any(v ...interface{}) bool {
- for _, val := range v {
- if !empty(val) {
- return true
- }
- }
- return false
-}
-
-// fromJson decodes JSON into a structured value, ignoring errors.
-func fromJson(v string) interface{} {
- output, _ := mustFromJson(v)
- return output
-}
-
-// mustFromJson decodes JSON into a structured value, returning errors.
-func mustFromJson(v string) (interface{}, error) {
- var output interface{}
- err := json.Unmarshal([]byte(v), &output)
- return output, err
-}
-
-// toJson encodes an item into a JSON string
-func toJson(v interface{}) string {
- output, _ := json.Marshal(v)
- return string(output)
-}
-
-func mustToJson(v interface{}) (string, error) {
- output, err := json.Marshal(v)
- if err != nil {
- return "", err
- }
- return string(output), nil
-}
-
-// toPrettyJson encodes an item into a pretty (indented) JSON string
-func toPrettyJson(v interface{}) string {
- output, _ := json.MarshalIndent(v, "", " ")
- return string(output)
-}
-
-func mustToPrettyJson(v interface{}) (string, error) {
- output, err := json.MarshalIndent(v, "", " ")
- if err != nil {
- return "", err
- }
- return string(output), nil
-}
-
-// toRawJson encodes an item into a JSON string with no escaping of HTML characters.
-func toRawJson(v interface{}) string {
- output, err := mustToRawJson(v)
- if err != nil {
- panic(err)
- }
- return string(output)
-}
-
-// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters.
-func mustToRawJson(v interface{}) (string, error) {
- buf := new(bytes.Buffer)
- enc := json.NewEncoder(buf)
- enc.SetEscapeHTML(false)
- err := enc.Encode(&v)
- if err != nil {
- return "", err
- }
- return strings.TrimSuffix(buf.String(), "\n"), nil
-}
-
-// ternary returns the first value if the last value is true, otherwise returns the second value.
-func ternary(vt interface{}, vf interface{}, v bool) interface{} {
- if v {
- return vt
- }
-
- return vf
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go
deleted file mode 100644
index ade88969..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/dict.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package sprig
-
-import (
- "github.com/imdario/mergo"
- "github.com/mitchellh/copystructure"
-)
-
-func get(d map[string]interface{}, key string) interface{} {
- if val, ok := d[key]; ok {
- return val
- }
- return ""
-}
-
-func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} {
- d[key] = value
- return d
-}
-
-func unset(d map[string]interface{}, key string) map[string]interface{} {
- delete(d, key)
- return d
-}
-
-func hasKey(d map[string]interface{}, key string) bool {
- _, ok := d[key]
- return ok
-}
-
-func pluck(key string, d ...map[string]interface{}) []interface{} {
- res := []interface{}{}
- for _, dict := range d {
- if val, ok := dict[key]; ok {
- res = append(res, val)
- }
- }
- return res
-}
-
-func keys(dicts ...map[string]interface{}) []string {
- k := []string{}
- for _, dict := range dicts {
- for key := range dict {
- k = append(k, key)
- }
- }
- return k
-}
-
-func pick(dict map[string]interface{}, keys ...string) map[string]interface{} {
- res := map[string]interface{}{}
- for _, k := range keys {
- if v, ok := dict[k]; ok {
- res[k] = v
- }
- }
- return res
-}
-
-func omit(dict map[string]interface{}, keys ...string) map[string]interface{} {
- res := map[string]interface{}{}
-
- omit := make(map[string]bool, len(keys))
- for _, k := range keys {
- omit[k] = true
- }
-
- for k, v := range dict {
- if _, ok := omit[k]; !ok {
- res[k] = v
- }
- }
- return res
-}
-
-func dict(v ...interface{}) map[string]interface{} {
- dict := map[string]interface{}{}
- lenv := len(v)
- for i := 0; i < lenv; i += 2 {
- key := strval(v[i])
- if i+1 >= lenv {
- dict[key] = ""
- continue
- }
- dict[key] = v[i+1]
- }
- return dict
-}
-
-func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} {
- for _, src := range srcs {
- if err := mergo.Merge(&dst, src); err != nil {
- // Swallow errors inside of a template.
- return ""
- }
- }
- return dst
-}
-
-func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) {
- for _, src := range srcs {
- if err := mergo.Merge(&dst, src); err != nil {
- return nil, err
- }
- }
- return dst, nil
-}
-
-func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} {
- for _, src := range srcs {
- if err := mergo.MergeWithOverwrite(&dst, src); err != nil {
- // Swallow errors inside of a template.
- return ""
- }
- }
- return dst
-}
-
-func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) {
- for _, src := range srcs {
- if err := mergo.MergeWithOverwrite(&dst, src); err != nil {
- return nil, err
- }
- }
- return dst, nil
-}
-
-func values(dict map[string]interface{}) []interface{} {
- values := []interface{}{}
- for _, value := range dict {
- values = append(values, value)
- }
-
- return values
-}
-
-func deepCopy(i interface{}) interface{} {
- c, err := mustDeepCopy(i)
- if err != nil {
- panic("deepCopy error: " + err.Error())
- }
-
- return c
-}
-
-func mustDeepCopy(i interface{}) (interface{}, error) {
- return copystructure.Copy(i)
-}
-
-func dig(ps ...interface{}) (interface{}, error) {
- if len(ps) < 3 {
- panic("dig needs at least three arguments")
- }
- dict := ps[len(ps)-1].(map[string]interface{})
- def := ps[len(ps)-2]
- ks := make([]string, len(ps)-2)
- for i := 0; i < len(ks); i++ {
- ks[i] = ps[i].(string)
- }
-
- return digFromDict(dict, def, ks)
-}
-
-func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) {
- k, ns := ks[0], ks[1:len(ks)]
- step, has := dict[k]
- if !has {
- return d, nil
- }
- if len(ns) == 0 {
- return step, nil
- }
- return digFromDict(step.(map[string]interface{}), d, ns)
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go
deleted file mode 100644
index aabb9d44..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Package sprig provides template functions for Go.
-
-This package contains a number of utility functions for working with data
-inside of Go `html/template` and `text/template` files.
-
-To add these functions, use the `template.Funcs()` method:
-
- t := templates.New("foo").Funcs(sprig.FuncMap())
-
-Note that you should add the function map before you parse any template files.
-
- In several cases, Sprig reverses the order of arguments from the way they
- appear in the standard library. This is to make it easier to pipe
- arguments into functions.
-
-See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions.
-*/
-package sprig
diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go
deleted file mode 100644
index 57fcec1d..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/functions.go
+++ /dev/null
@@ -1,382 +0,0 @@
-package sprig
-
-import (
- "errors"
- "html/template"
- "math/rand"
- "os"
- "path"
- "path/filepath"
- "reflect"
- "strconv"
- "strings"
- ttemplate "text/template"
- "time"
-
- util "github.com/Masterminds/goutils"
- "github.com/huandu/xstrings"
- "github.com/shopspring/decimal"
-)
-
-// FuncMap produces the function map.
-//
-// Use this to pass the functions into the template engine:
-//
-// tpl := template.New("foo").Funcs(sprig.FuncMap()))
-//
-func FuncMap() template.FuncMap {
- return HtmlFuncMap()
-}
-
-// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions.
-func HermeticTxtFuncMap() ttemplate.FuncMap {
- r := TxtFuncMap()
- for _, name := range nonhermeticFunctions {
- delete(r, name)
- }
- return r
-}
-
-// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions.
-func HermeticHtmlFuncMap() template.FuncMap {
- r := HtmlFuncMap()
- for _, name := range nonhermeticFunctions {
- delete(r, name)
- }
- return r
-}
-
-// TxtFuncMap returns a 'text/template'.FuncMap
-func TxtFuncMap() ttemplate.FuncMap {
- return ttemplate.FuncMap(GenericFuncMap())
-}
-
-// HtmlFuncMap returns an 'html/template'.Funcmap
-func HtmlFuncMap() template.FuncMap {
- return template.FuncMap(GenericFuncMap())
-}
-
-// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}.
-func GenericFuncMap() map[string]interface{} {
- gfm := make(map[string]interface{}, len(genericMap))
- for k, v := range genericMap {
- gfm[k] = v
- }
- return gfm
-}
-
-// These functions are not guaranteed to evaluate to the same result for given input, because they
-// refer to the environment or global state.
-var nonhermeticFunctions = []string{
- // Date functions
- "date",
- "date_in_zone",
- "date_modify",
- "now",
- "htmlDate",
- "htmlDateInZone",
- "dateInZone",
- "dateModify",
-
- // Strings
- "randAlphaNum",
- "randAlpha",
- "randAscii",
- "randNumeric",
- "randBytes",
- "uuidv4",
-
- // OS
- "env",
- "expandenv",
-
- // Network
- "getHostByName",
-}
-
-var genericMap = map[string]interface{}{
- "hello": func() string { return "Hello!" },
-
- // Date functions
- "ago": dateAgo,
- "date": date,
- "date_in_zone": dateInZone,
- "date_modify": dateModify,
- "dateInZone": dateInZone,
- "dateModify": dateModify,
- "duration": duration,
- "durationRound": durationRound,
- "htmlDate": htmlDate,
- "htmlDateInZone": htmlDateInZone,
- "must_date_modify": mustDateModify,
- "mustDateModify": mustDateModify,
- "mustToDate": mustToDate,
- "now": time.Now,
- "toDate": toDate,
- "unixEpoch": unixEpoch,
-
- // Strings
- "abbrev": abbrev,
- "abbrevboth": abbrevboth,
- "trunc": trunc,
- "trim": strings.TrimSpace,
- "upper": strings.ToUpper,
- "lower": strings.ToLower,
- "title": strings.Title,
- "untitle": untitle,
- "substr": substring,
- // Switch order so that "foo" | repeat 5
- "repeat": func(count int, str string) string { return strings.Repeat(str, count) },
- // Deprecated: Use trimAll.
- "trimall": func(a, b string) string { return strings.Trim(b, a) },
- // Switch order so that "$foo" | trimall "$"
- "trimAll": func(a, b string) string { return strings.Trim(b, a) },
- "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) },
- "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) },
- "nospace": util.DeleteWhiteSpace,
- "initials": initials,
- "randAlphaNum": randAlphaNumeric,
- "randAlpha": randAlpha,
- "randAscii": randAscii,
- "randNumeric": randNumeric,
- "swapcase": util.SwapCase,
- "shuffle": xstrings.Shuffle,
- "snakecase": xstrings.ToSnakeCase,
- "camelcase": xstrings.ToCamelCase,
- "kebabcase": xstrings.ToKebabCase,
- "wrap": func(l int, s string) string { return util.Wrap(s, l) },
- "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) },
- // Switch order so that "foobar" | contains "foo"
- "contains": func(substr string, str string) bool { return strings.Contains(str, substr) },
- "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
- "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) },
- "quote": quote,
- "squote": squote,
- "cat": cat,
- "indent": indent,
- "nindent": nindent,
- "replace": replace,
- "plural": plural,
- "sha1sum": sha1sum,
- "sha256sum": sha256sum,
- "adler32sum": adler32sum,
- "toString": strval,
-
- // Wrap Atoi to stop errors.
- "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i },
- "int64": toInt64,
- "int": toInt,
- "float64": toFloat64,
- "seq": seq,
- "toDecimal": toDecimal,
-
- //"gt": func(a, b int) bool {return a > b},
- //"gte": func(a, b int) bool {return a >= b},
- //"lt": func(a, b int) bool {return a < b},
- //"lte": func(a, b int) bool {return a <= b},
-
- // split "/" foo/bar returns map[int]string{0: foo, 1: bar}
- "split": split,
- "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) },
- // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu}
- "splitn": splitn,
- "toStrings": strslice,
-
- "until": until,
- "untilStep": untilStep,
-
- // VERY basic arithmetic.
- "add1": func(i interface{}) int64 { return toInt64(i) + 1 },
- "add": func(i ...interface{}) int64 {
- var a int64 = 0
- for _, b := range i {
- a += toInt64(b)
- }
- return a
- },
- "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) },
- "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) },
- "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) },
- "mul": func(a interface{}, v ...interface{}) int64 {
- val := toInt64(a)
- for _, b := range v {
- val = val * toInt64(b)
- }
- return val
- },
- "randInt": func(min, max int) int { return rand.Intn(max-min) + min },
- "add1f": func(i interface{}) float64 {
- return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) })
- },
- "addf": func(i ...interface{}) float64 {
- a := interface{}(float64(0))
- return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) })
- },
- "subf": func(a interface{}, v ...interface{}) float64 {
- return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) })
- },
- "divf": func(a interface{}, v ...interface{}) float64 {
- return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) })
- },
- "mulf": func(a interface{}, v ...interface{}) float64 {
- return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) })
- },
- "biggest": max,
- "max": max,
- "min": min,
- "maxf": maxf,
- "minf": minf,
- "ceil": ceil,
- "floor": floor,
- "round": round,
-
- // string slices. Note that we reverse the order b/c that's better
- // for template processing.
- "join": join,
- "sortAlpha": sortAlpha,
-
- // Defaults
- "default": dfault,
- "empty": empty,
- "coalesce": coalesce,
- "all": all,
- "any": any,
- "compact": compact,
- "mustCompact": mustCompact,
- "fromJson": fromJson,
- "toJson": toJson,
- "toPrettyJson": toPrettyJson,
- "toRawJson": toRawJson,
- "mustFromJson": mustFromJson,
- "mustToJson": mustToJson,
- "mustToPrettyJson": mustToPrettyJson,
- "mustToRawJson": mustToRawJson,
- "ternary": ternary,
- "deepCopy": deepCopy,
- "mustDeepCopy": mustDeepCopy,
-
- // Reflection
- "typeOf": typeOf,
- "typeIs": typeIs,
- "typeIsLike": typeIsLike,
- "kindOf": kindOf,
- "kindIs": kindIs,
- "deepEqual": reflect.DeepEqual,
-
- // OS:
- "env": os.Getenv,
- "expandenv": os.ExpandEnv,
-
- // Network:
- "getHostByName": getHostByName,
-
- // Paths:
- "base": path.Base,
- "dir": path.Dir,
- "clean": path.Clean,
- "ext": path.Ext,
- "isAbs": path.IsAbs,
-
- // Filepaths:
- "osBase": filepath.Base,
- "osClean": filepath.Clean,
- "osDir": filepath.Dir,
- "osExt": filepath.Ext,
- "osIsAbs": filepath.IsAbs,
-
- // Encoding:
- "b64enc": base64encode,
- "b64dec": base64decode,
- "b32enc": base32encode,
- "b32dec": base32decode,
-
- // Data Structures:
- "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable.
- "list": list,
- "dict": dict,
- "get": get,
- "set": set,
- "unset": unset,
- "hasKey": hasKey,
- "pluck": pluck,
- "keys": keys,
- "pick": pick,
- "omit": omit,
- "merge": merge,
- "mergeOverwrite": mergeOverwrite,
- "mustMerge": mustMerge,
- "mustMergeOverwrite": mustMergeOverwrite,
- "values": values,
-
- "append": push, "push": push,
- "mustAppend": mustPush, "mustPush": mustPush,
- "prepend": prepend,
- "mustPrepend": mustPrepend,
- "first": first,
- "mustFirst": mustFirst,
- "rest": rest,
- "mustRest": mustRest,
- "last": last,
- "mustLast": mustLast,
- "initial": initial,
- "mustInitial": mustInitial,
- "reverse": reverse,
- "mustReverse": mustReverse,
- "uniq": uniq,
- "mustUniq": mustUniq,
- "without": without,
- "mustWithout": mustWithout,
- "has": has,
- "mustHas": mustHas,
- "slice": slice,
- "mustSlice": mustSlice,
- "concat": concat,
- "dig": dig,
- "chunk": chunk,
- "mustChunk": mustChunk,
-
- // Crypto:
- "bcrypt": bcrypt,
- "htpasswd": htpasswd,
- "genPrivateKey": generatePrivateKey,
- "derivePassword": derivePassword,
- "buildCustomCert": buildCustomCertificate,
- "genCA": generateCertificateAuthority,
- "genCAWithKey": generateCertificateAuthorityWithPEMKey,
- "genSelfSignedCert": generateSelfSignedCertificate,
- "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey,
- "genSignedCert": generateSignedCertificate,
- "genSignedCertWithKey": generateSignedCertificateWithPEMKey,
- "encryptAES": encryptAES,
- "decryptAES": decryptAES,
- "randBytes": randBytes,
-
- // UUIDs:
- "uuidv4": uuidv4,
-
- // SemVer:
- "semver": semver,
- "semverCompare": semverCompare,
-
- // Flow Control:
- "fail": func(msg string) (string, error) { return "", errors.New(msg) },
-
- // Regex
- "regexMatch": regexMatch,
- "mustRegexMatch": mustRegexMatch,
- "regexFindAll": regexFindAll,
- "mustRegexFindAll": mustRegexFindAll,
- "regexFind": regexFind,
- "mustRegexFind": mustRegexFind,
- "regexReplaceAll": regexReplaceAll,
- "mustRegexReplaceAll": mustRegexReplaceAll,
- "regexReplaceAllLiteral": regexReplaceAllLiteral,
- "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral,
- "regexSplit": regexSplit,
- "mustRegexSplit": mustRegexSplit,
- "regexQuoteMeta": regexQuoteMeta,
-
- // URLs:
- "urlParse": urlParse,
- "urlJoin": urlJoin,
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go
deleted file mode 100644
index ca0fbb78..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/list.go
+++ /dev/null
@@ -1,464 +0,0 @@
-package sprig
-
-import (
- "fmt"
- "math"
- "reflect"
- "sort"
-)
-
-// Reflection is used in these functions so that slices and arrays of strings,
-// ints, and other types not implementing []interface{} can be worked with.
-// For example, this is useful if you need to work on the output of regexs.
-
-func list(v ...interface{}) []interface{} {
- return v
-}
-
-func push(list interface{}, v interface{}) []interface{} {
- l, err := mustPush(list, v)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustPush(list interface{}, v interface{}) ([]interface{}, error) {
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
-
- l := l2.Len()
- nl := make([]interface{}, l)
- for i := 0; i < l; i++ {
- nl[i] = l2.Index(i).Interface()
- }
-
- return append(nl, v), nil
-
- default:
- return nil, fmt.Errorf("Cannot push on type %s", tp)
- }
-}
-
-func prepend(list interface{}, v interface{}) []interface{} {
- l, err := mustPrepend(list, v)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) {
- //return append([]interface{}{v}, list...)
-
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
-
- l := l2.Len()
- nl := make([]interface{}, l)
- for i := 0; i < l; i++ {
- nl[i] = l2.Index(i).Interface()
- }
-
- return append([]interface{}{v}, nl...), nil
-
- default:
- return nil, fmt.Errorf("Cannot prepend on type %s", tp)
- }
-}
-
-func chunk(size int, list interface{}) [][]interface{} {
- l, err := mustChunk(size, list)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustChunk(size int, list interface{}) ([][]interface{}, error) {
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
-
- l := l2.Len()
-
- cs := int(math.Floor(float64(l-1)/float64(size)) + 1)
- nl := make([][]interface{}, cs)
-
- for i := 0; i < cs; i++ {
- clen := size
- if i == cs-1 {
- clen = int(math.Floor(math.Mod(float64(l), float64(size))))
- if clen == 0 {
- clen = size
- }
- }
-
- nl[i] = make([]interface{}, clen)
-
- for j := 0; j < clen; j++ {
- ix := i*size + j
- nl[i][j] = l2.Index(ix).Interface()
- }
- }
-
- return nl, nil
-
- default:
- return nil, fmt.Errorf("Cannot chunk type %s", tp)
- }
-}
-
-func last(list interface{}) interface{} {
- l, err := mustLast(list)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustLast(list interface{}) (interface{}, error) {
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
-
- l := l2.Len()
- if l == 0 {
- return nil, nil
- }
-
- return l2.Index(l - 1).Interface(), nil
- default:
- return nil, fmt.Errorf("Cannot find last on type %s", tp)
- }
-}
-
-func first(list interface{}) interface{} {
- l, err := mustFirst(list)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustFirst(list interface{}) (interface{}, error) {
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
-
- l := l2.Len()
- if l == 0 {
- return nil, nil
- }
-
- return l2.Index(0).Interface(), nil
- default:
- return nil, fmt.Errorf("Cannot find first on type %s", tp)
- }
-}
-
-func rest(list interface{}) []interface{} {
- l, err := mustRest(list)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustRest(list interface{}) ([]interface{}, error) {
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
-
- l := l2.Len()
- if l == 0 {
- return nil, nil
- }
-
- nl := make([]interface{}, l-1)
- for i := 1; i < l; i++ {
- nl[i-1] = l2.Index(i).Interface()
- }
-
- return nl, nil
- default:
- return nil, fmt.Errorf("Cannot find rest on type %s", tp)
- }
-}
-
-func initial(list interface{}) []interface{} {
- l, err := mustInitial(list)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustInitial(list interface{}) ([]interface{}, error) {
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
-
- l := l2.Len()
- if l == 0 {
- return nil, nil
- }
-
- nl := make([]interface{}, l-1)
- for i := 0; i < l-1; i++ {
- nl[i] = l2.Index(i).Interface()
- }
-
- return nl, nil
- default:
- return nil, fmt.Errorf("Cannot find initial on type %s", tp)
- }
-}
-
-func sortAlpha(list interface{}) []string {
- k := reflect.Indirect(reflect.ValueOf(list)).Kind()
- switch k {
- case reflect.Slice, reflect.Array:
- a := strslice(list)
- s := sort.StringSlice(a)
- s.Sort()
- return s
- }
- return []string{strval(list)}
-}
-
-func reverse(v interface{}) []interface{} {
- l, err := mustReverse(v)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustReverse(v interface{}) ([]interface{}, error) {
- tp := reflect.TypeOf(v).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(v)
-
- l := l2.Len()
- // We do not sort in place because the incoming array should not be altered.
- nl := make([]interface{}, l)
- for i := 0; i < l; i++ {
- nl[l-i-1] = l2.Index(i).Interface()
- }
-
- return nl, nil
- default:
- return nil, fmt.Errorf("Cannot find reverse on type %s", tp)
- }
-}
-
-func compact(list interface{}) []interface{} {
- l, err := mustCompact(list)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustCompact(list interface{}) ([]interface{}, error) {
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
-
- l := l2.Len()
- nl := []interface{}{}
- var item interface{}
- for i := 0; i < l; i++ {
- item = l2.Index(i).Interface()
- if !empty(item) {
- nl = append(nl, item)
- }
- }
-
- return nl, nil
- default:
- return nil, fmt.Errorf("Cannot compact on type %s", tp)
- }
-}
-
-func uniq(list interface{}) []interface{} {
- l, err := mustUniq(list)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustUniq(list interface{}) ([]interface{}, error) {
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
-
- l := l2.Len()
- dest := []interface{}{}
- var item interface{}
- for i := 0; i < l; i++ {
- item = l2.Index(i).Interface()
- if !inList(dest, item) {
- dest = append(dest, item)
- }
- }
-
- return dest, nil
- default:
- return nil, fmt.Errorf("Cannot find uniq on type %s", tp)
- }
-}
-
-func inList(haystack []interface{}, needle interface{}) bool {
- for _, h := range haystack {
- if reflect.DeepEqual(needle, h) {
- return true
- }
- }
- return false
-}
-
-func without(list interface{}, omit ...interface{}) []interface{} {
- l, err := mustWithout(list, omit...)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) {
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
-
- l := l2.Len()
- res := []interface{}{}
- var item interface{}
- for i := 0; i < l; i++ {
- item = l2.Index(i).Interface()
- if !inList(omit, item) {
- res = append(res, item)
- }
- }
-
- return res, nil
- default:
- return nil, fmt.Errorf("Cannot find without on type %s", tp)
- }
-}
-
-func has(needle interface{}, haystack interface{}) bool {
- l, err := mustHas(needle, haystack)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustHas(needle interface{}, haystack interface{}) (bool, error) {
- if haystack == nil {
- return false, nil
- }
- tp := reflect.TypeOf(haystack).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(haystack)
- var item interface{}
- l := l2.Len()
- for i := 0; i < l; i++ {
- item = l2.Index(i).Interface()
- if reflect.DeepEqual(needle, item) {
- return true, nil
- }
- }
-
- return false, nil
- default:
- return false, fmt.Errorf("Cannot find has on type %s", tp)
- }
-}
-
-// $list := [1, 2, 3, 4, 5]
-// slice $list -> list[0:5] = list[:]
-// slice $list 0 3 -> list[0:3] = list[:3]
-// slice $list 3 5 -> list[3:5]
-// slice $list 3 -> list[3:5] = list[3:]
-func slice(list interface{}, indices ...interface{}) interface{} {
- l, err := mustSlice(list, indices...)
- if err != nil {
- panic(err)
- }
-
- return l
-}
-
-func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) {
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
-
- l := l2.Len()
- if l == 0 {
- return nil, nil
- }
-
- var start, end int
- if len(indices) > 0 {
- start = toInt(indices[0])
- }
- if len(indices) < 2 {
- end = l
- } else {
- end = toInt(indices[1])
- }
-
- return l2.Slice(start, end).Interface(), nil
- default:
- return nil, fmt.Errorf("list should be type of slice or array but %s", tp)
- }
-}
-
-func concat(lists ...interface{}) interface{} {
- var res []interface{}
- for _, list := range lists {
- tp := reflect.TypeOf(list).Kind()
- switch tp {
- case reflect.Slice, reflect.Array:
- l2 := reflect.ValueOf(list)
- for i := 0; i < l2.Len(); i++ {
- res = append(res, l2.Index(i).Interface())
- }
- default:
- panic(fmt.Sprintf("Cannot concat type %s as list", tp))
- }
- }
- return res
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go
deleted file mode 100644
index 108d78a9..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/network.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package sprig
-
-import (
- "math/rand"
- "net"
-)
-
-func getHostByName(name string) string {
- addrs, _ := net.LookupHost(name)
- //TODO: add error handing when release v3 comes out
- return addrs[rand.Intn(len(addrs))]
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go
deleted file mode 100644
index f68e4182..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/numeric.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package sprig
-
-import (
- "fmt"
- "math"
- "strconv"
- "strings"
-
- "github.com/spf13/cast"
- "github.com/shopspring/decimal"
-)
-
-// toFloat64 converts 64-bit floats
-func toFloat64(v interface{}) float64 {
- return cast.ToFloat64(v)
-}
-
-func toInt(v interface{}) int {
- return cast.ToInt(v)
-}
-
-// toInt64 converts integer types to 64-bit integers
-func toInt64(v interface{}) int64 {
- return cast.ToInt64(v)
-}
-
-func max(a interface{}, i ...interface{}) int64 {
- aa := toInt64(a)
- for _, b := range i {
- bb := toInt64(b)
- if bb > aa {
- aa = bb
- }
- }
- return aa
-}
-
-func maxf(a interface{}, i ...interface{}) float64 {
- aa := toFloat64(a)
- for _, b := range i {
- bb := toFloat64(b)
- aa = math.Max(aa, bb)
- }
- return aa
-}
-
-func min(a interface{}, i ...interface{}) int64 {
- aa := toInt64(a)
- for _, b := range i {
- bb := toInt64(b)
- if bb < aa {
- aa = bb
- }
- }
- return aa
-}
-
-func minf(a interface{}, i ...interface{}) float64 {
- aa := toFloat64(a)
- for _, b := range i {
- bb := toFloat64(b)
- aa = math.Min(aa, bb)
- }
- return aa
-}
-
-func until(count int) []int {
- step := 1
- if count < 0 {
- step = -1
- }
- return untilStep(0, count, step)
-}
-
-func untilStep(start, stop, step int) []int {
- v := []int{}
-
- if stop < start {
- if step >= 0 {
- return v
- }
- for i := start; i > stop; i += step {
- v = append(v, i)
- }
- return v
- }
-
- if step <= 0 {
- return v
- }
- for i := start; i < stop; i += step {
- v = append(v, i)
- }
- return v
-}
-
-func floor(a interface{}) float64 {
- aa := toFloat64(a)
- return math.Floor(aa)
-}
-
-func ceil(a interface{}) float64 {
- aa := toFloat64(a)
- return math.Ceil(aa)
-}
-
-func round(a interface{}, p int, rOpt ...float64) float64 {
- roundOn := .5
- if len(rOpt) > 0 {
- roundOn = rOpt[0]
- }
- val := toFloat64(a)
- places := toFloat64(p)
-
- var round float64
- pow := math.Pow(10, places)
- digit := pow * val
- _, div := math.Modf(digit)
- if div >= roundOn {
- round = math.Ceil(digit)
- } else {
- round = math.Floor(digit)
- }
- return round / pow
-}
-
-// converts unix octal to decimal
-func toDecimal(v interface{}) int64 {
- result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64)
- if err != nil {
- return 0
- }
- return result
-}
-
-func seq(params ...int) string {
- increment := 1
- switch len(params) {
- case 0:
- return ""
- case 1:
- start := 1
- end := params[0]
- if end < start {
- increment = -1
- }
- return intArrayToString(untilStep(start, end+increment, increment), " ")
- case 3:
- start := params[0]
- end := params[2]
- step := params[1]
- if end < start {
- increment = -1
- if step > 0 {
- return ""
- }
- }
- return intArrayToString(untilStep(start, end+increment, step), " ")
- case 2:
- start := params[0]
- end := params[1]
- step := 1
- if end < start {
- step = -1
- }
- return intArrayToString(untilStep(start, end+step, step), " ")
- default:
- return ""
- }
-}
-
-func intArrayToString(slice []int, delimeter string) string {
- return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]")
-}
-
-// performs a float and subsequent decimal.Decimal conversion on inputs,
-// and iterates through a and b executing the mathmetical operation f
-func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 {
- prt := decimal.NewFromFloat(toFloat64(a))
- for _, x := range b {
- dx := decimal.NewFromFloat(toFloat64(x))
- prt = f(prt, dx)
- }
- rslt, _ := prt.Float64()
- return rslt
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go
deleted file mode 100644
index 8a65c132..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/reflect.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package sprig
-
-import (
- "fmt"
- "reflect"
-)
-
-// typeIs returns true if the src is the type named in target.
-func typeIs(target string, src interface{}) bool {
- return target == typeOf(src)
-}
-
-func typeIsLike(target string, src interface{}) bool {
- t := typeOf(src)
- return target == t || "*"+target == t
-}
-
-func typeOf(src interface{}) string {
- return fmt.Sprintf("%T", src)
-}
-
-func kindIs(target string, src interface{}) bool {
- return target == kindOf(src)
-}
-
-func kindOf(src interface{}) string {
- return reflect.ValueOf(src).Kind().String()
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go
deleted file mode 100644
index fab55101..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/regex.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package sprig
-
-import (
- "regexp"
-)
-
-func regexMatch(regex string, s string) bool {
- match, _ := regexp.MatchString(regex, s)
- return match
-}
-
-func mustRegexMatch(regex string, s string) (bool, error) {
- return regexp.MatchString(regex, s)
-}
-
-func regexFindAll(regex string, s string, n int) []string {
- r := regexp.MustCompile(regex)
- return r.FindAllString(s, n)
-}
-
-func mustRegexFindAll(regex string, s string, n int) ([]string, error) {
- r, err := regexp.Compile(regex)
- if err != nil {
- return []string{}, err
- }
- return r.FindAllString(s, n), nil
-}
-
-func regexFind(regex string, s string) string {
- r := regexp.MustCompile(regex)
- return r.FindString(s)
-}
-
-func mustRegexFind(regex string, s string) (string, error) {
- r, err := regexp.Compile(regex)
- if err != nil {
- return "", err
- }
- return r.FindString(s), nil
-}
-
-func regexReplaceAll(regex string, s string, repl string) string {
- r := regexp.MustCompile(regex)
- return r.ReplaceAllString(s, repl)
-}
-
-func mustRegexReplaceAll(regex string, s string, repl string) (string, error) {
- r, err := regexp.Compile(regex)
- if err != nil {
- return "", err
- }
- return r.ReplaceAllString(s, repl), nil
-}
-
-func regexReplaceAllLiteral(regex string, s string, repl string) string {
- r := regexp.MustCompile(regex)
- return r.ReplaceAllLiteralString(s, repl)
-}
-
-func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) {
- r, err := regexp.Compile(regex)
- if err != nil {
- return "", err
- }
- return r.ReplaceAllLiteralString(s, repl), nil
-}
-
-func regexSplit(regex string, s string, n int) []string {
- r := regexp.MustCompile(regex)
- return r.Split(s, n)
-}
-
-func mustRegexSplit(regex string, s string, n int) ([]string, error) {
- r, err := regexp.Compile(regex)
- if err != nil {
- return []string{}, err
- }
- return r.Split(s, n), nil
-}
-
-func regexQuoteMeta(s string) string {
- return regexp.QuoteMeta(s)
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go
deleted file mode 100644
index 3fbe08aa..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/semver.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package sprig
-
-import (
- sv2 "github.com/Masterminds/semver/v3"
-)
-
-func semverCompare(constraint, version string) (bool, error) {
- c, err := sv2.NewConstraint(constraint)
- if err != nil {
- return false, err
- }
-
- v, err := sv2.NewVersion(version)
- if err != nil {
- return false, err
- }
-
- return c.Check(v), nil
-}
-
-func semver(version string) (*sv2.Version, error) {
- return sv2.NewVersion(version)
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go
deleted file mode 100644
index e0ae628c..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/strings.go
+++ /dev/null
@@ -1,236 +0,0 @@
-package sprig
-
-import (
- "encoding/base32"
- "encoding/base64"
- "fmt"
- "reflect"
- "strconv"
- "strings"
-
- util "github.com/Masterminds/goutils"
-)
-
-func base64encode(v string) string {
- return base64.StdEncoding.EncodeToString([]byte(v))
-}
-
-func base64decode(v string) string {
- data, err := base64.StdEncoding.DecodeString(v)
- if err != nil {
- return err.Error()
- }
- return string(data)
-}
-
-func base32encode(v string) string {
- return base32.StdEncoding.EncodeToString([]byte(v))
-}
-
-func base32decode(v string) string {
- data, err := base32.StdEncoding.DecodeString(v)
- if err != nil {
- return err.Error()
- }
- return string(data)
-}
-
-func abbrev(width int, s string) string {
- if width < 4 {
- return s
- }
- r, _ := util.Abbreviate(s, width)
- return r
-}
-
-func abbrevboth(left, right int, s string) string {
- if right < 4 || left > 0 && right < 7 {
- return s
- }
- r, _ := util.AbbreviateFull(s, left, right)
- return r
-}
-func initials(s string) string {
- // Wrap this just to eliminate the var args, which templates don't do well.
- return util.Initials(s)
-}
-
-func randAlphaNumeric(count int) string {
- // It is not possible, it appears, to actually generate an error here.
- r, _ := util.CryptoRandomAlphaNumeric(count)
- return r
-}
-
-func randAlpha(count int) string {
- r, _ := util.CryptoRandomAlphabetic(count)
- return r
-}
-
-func randAscii(count int) string {
- r, _ := util.CryptoRandomAscii(count)
- return r
-}
-
-func randNumeric(count int) string {
- r, _ := util.CryptoRandomNumeric(count)
- return r
-}
-
-func untitle(str string) string {
- return util.Uncapitalize(str)
-}
-
-func quote(str ...interface{}) string {
- out := make([]string, 0, len(str))
- for _, s := range str {
- if s != nil {
- out = append(out, fmt.Sprintf("%q", strval(s)))
- }
- }
- return strings.Join(out, " ")
-}
-
-func squote(str ...interface{}) string {
- out := make([]string, 0, len(str))
- for _, s := range str {
- if s != nil {
- out = append(out, fmt.Sprintf("'%v'", s))
- }
- }
- return strings.Join(out, " ")
-}
-
-func cat(v ...interface{}) string {
- v = removeNilElements(v)
- r := strings.TrimSpace(strings.Repeat("%v ", len(v)))
- return fmt.Sprintf(r, v...)
-}
-
-func indent(spaces int, v string) string {
- pad := strings.Repeat(" ", spaces)
- return pad + strings.Replace(v, "\n", "\n"+pad, -1)
-}
-
-func nindent(spaces int, v string) string {
- return "\n" + indent(spaces, v)
-}
-
-func replace(old, new, src string) string {
- return strings.Replace(src, old, new, -1)
-}
-
-func plural(one, many string, count int) string {
- if count == 1 {
- return one
- }
- return many
-}
-
-func strslice(v interface{}) []string {
- switch v := v.(type) {
- case []string:
- return v
- case []interface{}:
- b := make([]string, 0, len(v))
- for _, s := range v {
- if s != nil {
- b = append(b, strval(s))
- }
- }
- return b
- default:
- val := reflect.ValueOf(v)
- switch val.Kind() {
- case reflect.Array, reflect.Slice:
- l := val.Len()
- b := make([]string, 0, l)
- for i := 0; i < l; i++ {
- value := val.Index(i).Interface()
- if value != nil {
- b = append(b, strval(value))
- }
- }
- return b
- default:
- if v == nil {
- return []string{}
- }
-
- return []string{strval(v)}
- }
- }
-}
-
-func removeNilElements(v []interface{}) []interface{} {
- newSlice := make([]interface{}, 0, len(v))
- for _, i := range v {
- if i != nil {
- newSlice = append(newSlice, i)
- }
- }
- return newSlice
-}
-
-func strval(v interface{}) string {
- switch v := v.(type) {
- case string:
- return v
- case []byte:
- return string(v)
- case error:
- return v.Error()
- case fmt.Stringer:
- return v.String()
- default:
- return fmt.Sprintf("%v", v)
- }
-}
-
-func trunc(c int, s string) string {
- if c < 0 && len(s)+c > 0 {
- return s[len(s)+c:]
- }
- if c >= 0 && len(s) > c {
- return s[:c]
- }
- return s
-}
-
-func join(sep string, v interface{}) string {
- return strings.Join(strslice(v), sep)
-}
-
-func split(sep, orig string) map[string]string {
- parts := strings.Split(orig, sep)
- res := make(map[string]string, len(parts))
- for i, v := range parts {
- res["_"+strconv.Itoa(i)] = v
- }
- return res
-}
-
-func splitn(sep string, n int, orig string) map[string]string {
- parts := strings.SplitN(orig, sep, n)
- res := make(map[string]string, len(parts))
- for i, v := range parts {
- res["_"+strconv.Itoa(i)] = v
- }
- return res
-}
-
-// substring creates a substring of the given string.
-//
-// If start is < 0, this calls string[:end].
-//
-// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:]
-//
-// Otherwise, this calls string[start, end].
-func substring(start, end int, s string) string {
- if start < 0 {
- return s[:end]
- }
- if end < 0 || end > len(s) {
- return s[start:]
- }
- return s[start:end]
-}
diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go
deleted file mode 100644
index b8e120e1..00000000
--- a/vendor/github.com/Masterminds/sprig/v3/url.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package sprig
-
-import (
- "fmt"
- "net/url"
- "reflect"
-)
-
-func dictGetOrEmpty(dict map[string]interface{}, key string) string {
- value, ok := dict[key]
- if !ok {
- return ""
- }
- tp := reflect.TypeOf(value).Kind()
- if tp != reflect.String {
- panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String()))
- }
- return reflect.ValueOf(value).String()
-}
-
-// parses given URL to return dict object
-func urlParse(v string) map[string]interface{} {
- dict := map[string]interface{}{}
- parsedURL, err := url.Parse(v)
- if err != nil {
- panic(fmt.Sprintf("unable to parse url: %s", err))
- }
- dict["scheme"] = parsedURL.Scheme
- dict["host"] = parsedURL.Host
- dict["hostname"] = parsedURL.Hostname()
- dict["path"] = parsedURL.Path
- dict["query"] = parsedURL.RawQuery
- dict["opaque"] = parsedURL.Opaque
- dict["fragment"] = parsedURL.Fragment
- if parsedURL.User != nil {
- dict["userinfo"] = parsedURL.User.String()
- } else {
- dict["userinfo"] = ""
- }
-
- return dict
-}
-
-// join given dict to URL string
-func urlJoin(d map[string]interface{}) string {
- resURL := url.URL{
- Scheme: dictGetOrEmpty(d, "scheme"),
- Host: dictGetOrEmpty(d, "host"),
- Path: dictGetOrEmpty(d, "path"),
- RawQuery: dictGetOrEmpty(d, "query"),
- Opaque: dictGetOrEmpty(d, "opaque"),
- Fragment: dictGetOrEmpty(d, "fragment"),
- }
- userinfo := dictGetOrEmpty(d, "userinfo")
- var user *url.Userinfo
- if userinfo != "" {
- tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo))
- if err != nil {
- panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err))
- }
- user = tempURL.User
- }
-
- resURL.User = user
- return resURL.String()
-}
diff --git a/vendor/github.com/antlr/antlr4/LICENSE.txt b/vendor/github.com/antlr/antlr4/LICENSE.txt
deleted file mode 100644
index 2042d1bd..00000000
--- a/vendor/github.com/antlr/antlr4/LICENSE.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-[The "BSD 3-clause license"]
-Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- 3. Neither the name of the copyright holder nor the names of its contributors
- may be used to endorse or promote products derived from this software
- without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-=====
-
-MIT License for codepointat.js from https://git.io/codepointat
-MIT License for fromcodepoint.js from https://git.io/vDW1m
-
-Copyright Mathias Bynens
-// This implementation prints messages to {@link System//err} containing the -// values of {@code line}, {@code charPositionInLine}, and {@code msg} using -// the following format.
-// -//-// line line:charPositionInLine msg -//-// -func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) -} - -type ProxyErrorListener struct { - *DefaultErrorListener - delegates []ErrorListener -} - -func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { - if delegates == nil { - panic("delegates is not provided") - } - l := new(ProxyErrorListener) - l.delegates = delegates - return l -} - -func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - for _, d := range p.delegates { - d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) - } -} - -func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go deleted file mode 100644 index 977a6e45..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go +++ /dev/null @@ -1,758 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -type ErrorStrategy interface { - reset(Parser) - RecoverInline(Parser) Token - Recover(Parser, RecognitionException) - Sync(Parser) - inErrorRecoveryMode(Parser) bool - ReportError(Parser, RecognitionException) - ReportMatch(Parser) -} - -// This is the default implementation of {@link ANTLRErrorStrategy} used for -// error Reporting and recovery in ANTLR parsers. -// -type DefaultErrorStrategy struct { - errorRecoveryMode bool - lastErrorIndex int - lastErrorStates *IntervalSet -} - -var _ ErrorStrategy = &DefaultErrorStrategy{} - -func NewDefaultErrorStrategy() *DefaultErrorStrategy { - - d := new(DefaultErrorStrategy) - - // Indicates whether the error strategy is currently "recovering from an - // error". This is used to suppress Reporting multiple error messages while - // attempting to recover from a detected syntax error. - // - // @see //inErrorRecoveryMode - // - d.errorRecoveryMode = false - - // The index into the input stream where the last error occurred. - // This is used to prevent infinite loops where an error is found - // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least - // one token/tree node is consumed for two errors. - // - d.lastErrorIndex = -1 - d.lastErrorStates = nil - return d -} - -//
The default implementation simply calls {@link //endErrorCondition} to -// ensure that the handler is not in error recovery mode.
-func (d *DefaultErrorStrategy) reset(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// -// This method is called to enter error recovery mode when a recognition -// exception is Reported. -// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { - d.errorRecoveryMode = true -} - -func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool { - return d.errorRecoveryMode -} - -// -// This method is called to leave error recovery mode after recovering from -// a recognition exception. -// -// @param recognizer -// -func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { - d.errorRecoveryMode = false - d.lastErrorStates = nil - d.lastErrorIndex = -1 -} - -// -// {@inheritDoc} -// -//The default implementation simply calls {@link //endErrorCondition}.
-// -func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// -// {@inheritDoc} -// -//The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the Reporting task based on the runtime type of {@code e} -// according to the following table.
-// -//The default implementation reSynchronizes the parser by consuming tokens -// until we find one in the reSynchronization set--loosely the set of tokens -// that can follow the current rule.
-// -func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - - if d.lastErrorIndex == recognizer.GetInputStream().Index() && - d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { - // uh oh, another error at same token index and previously-Visited - // state in ATN must be a case where LT(1) is in the recovery - // token set so nothing got consumed. Consume a single token - // at least to prevent an infinite loop d is a failsafe. - recognizer.Consume() - } - d.lastErrorIndex = recognizer.GetInputStream().Index() - if d.lastErrorStates == nil { - d.lastErrorStates = NewIntervalSet() - } - d.lastErrorStates.addOne(recognizer.GetState()) - followSet := d.getErrorRecoverySet(recognizer) - d.consumeUntil(recognizer, followSet) -} - -// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at d point in the ATN. You can call d anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. -// -//Implements Jim Idle's magic Sync mechanism in closures and optional -// subrules. E.g.,
-// -//-// a : Sync ( stuff Sync )* -// Sync : {consume to what can follow Sync} -//-// -// At the start of a sub rule upon error, {@link //Sync} performs single -// token deletion, if possible. If it can't do that, it bails on the current -// rule and uses the default error recovery, which consumes until the -// reSynchronization set of the current rule. -// -//
If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.
-// -//During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.
-// -//ORIGINS
-// -//Previous versions of ANTLR did a poor job of their recovery within loops. -// A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule
-// -//-// classfunc : 'class' ID '{' member* '}' -//-// -// input with an extra token between members would force the parser to -// consume until it found the next class definition rather than the next -// member definition of the current class. -// -//
This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off d -// functionality by simply overriding d method as a blank { }.
-// -func (d *DefaultErrorStrategy) Sync(recognizer Parser) { - // If already recovering, don't try to Sync - if d.inErrorRecoveryMode(recognizer) { - return - } - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - la := recognizer.GetTokenStream().LA(1) - - // try cheaper subset first might get lucky. seems to shave a wee bit off - nextTokens := recognizer.GetATN().NextTokens(s, nil) - if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { - return - } - - switch s.GetStateType() { - case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: - // Report error and recover if possible - if d.SingleTokenDeletion(recognizer) != nil { - return - } - panic(NewInputMisMatchException(recognizer)) - case ATNStatePlusLoopBack, ATNStateStarLoopBack: - d.ReportUnwantedToken(recognizer) - expecting := NewIntervalSet() - expecting.addSet(recognizer.GetExpectedTokens()) - whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) - d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) - default: - // do nothing if we can't identify the exact kind of ATN state - } -} - -// This is called by {@link //ReportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { - tokens := recognizer.GetTokenStream() - var input string - if tokens != nil { - if e.startToken.GetTokenType() == TokenEOF { - input = "This method is called when {@link //singleTokenDeletion} identifies -// single-token deletion as a viable recovery strategy for a mismatched -// input error.
-// -//The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.
-// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { - if d.inErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - tokenName := d.GetTokenErrorDisplay(t) - expecting := d.GetExpectedTokens(recognizer) - msg := "extraneous input " + tokenName + " expecting " + - expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -// This method is called to Report a syntax error which requires the -// insertion of a missing token into the input stream. At the time d -// method is called, the missing token has not yet been inserted. When d -// method returns, {@code recognizer} is in error recovery mode. -// -//This method is called when {@link //singleTokenInsertion} identifies -// single-token insertion as a viable recovery strategy for a mismatched -// input error.
-// -//The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.
-// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { - if d.inErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + - " at " + d.GetTokenErrorDisplay(t) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -//The default implementation attempts to recover from the mismatched input -// by using single token insertion and deletion as described below. If the -// recovery attempt fails, d method panics an -// {@link InputMisMatchException}.
-// -//EXTRA TOKEN (single token deletion)
-// -//{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious -// token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the Match operation.
-// -//This recovery strategy is implemented by {@link -// //singleTokenDeletion}.
-// -//MISSING TOKEN (single token insertion)
-// -//If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the Match operation.
-// -//This recovery strategy is implemented by {@link -// //singleTokenInsertion}.
-// -//EXAMPLE
-// -//For example, Input {@code i=(3} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:
-// -//-// stat &rarr expr &rarr atom -//-// -// and it will be trying to Match the {@code ')'} at d point in the -// derivation: -// -//
-// => ID '=' '(' INT ')' ('+' atom)* '' -// ^ -//-// -// The attempt to Match {@code ')'} will fail when it sees {@code ''} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''} -// is in the set of tokens that can follow the {@code ')'} token reference -// in rule {@code atom}. It can assume that you forgot the {@code ')'}. -// -func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { - // SINGLE TOKEN DELETION - MatchedSymbol := d.SingleTokenDeletion(recognizer) - if MatchedSymbol != nil { - // we have deleted the extra token. - // now, move past ttype token as if all were ok - recognizer.Consume() - return MatchedSymbol - } - // SINGLE TOKEN INSERTION - if d.SingleTokenInsertion(recognizer) { - return d.GetMissingSymbol(recognizer) - } - // even that didn't work must panic the exception - panic(NewInputMisMatchException(recognizer)) -} - -// -// This method implements the single-token insertion inline error recovery -// strategy. It is called by {@link //recoverInline} if the single-token -// deletion strategy fails to recover from the mismatched input. If this -// method returns {@code true}, {@code recognizer} will be in error recovery -// mode. -// -//
This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully Matched -// if it were instead the {@code LA(2)} symbol. If d method returns -// {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce d behavior.
-// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} -// -func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { - currentSymbolType := recognizer.GetTokenStream().LA(1) - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token error recovery - // is free to conjure up and insert the missing token - atn := recognizer.GetInterpreter().atn - currentState := atn.states[recognizer.GetState()] - next := currentState.GetTransitions()[0].getTarget() - expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) - if expectingAtLL2.contains(currentSymbolType) { - d.ReportMissingToken(recognizer) - return true - } - - return false -} - -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover -// from mismatched input. If this method returns nil, the parser and error -// handler state will not have changed. If this method returns non-nil, -// {@code recognizer} will not be in error recovery mode since the -// returned token was a successful Match. -// -//If the single-token deletion is successful, d method calls -// {@link //ReportUnwantedToken} to Report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //ReportMatch} is called to signal a successful -// Match.
-// -// @param recognizer the parser instance -// @return the successfully Matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code nil} -// -func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { - NextTokenType := recognizer.GetTokenStream().LA(2) - expecting := d.GetExpectedTokens(recognizer) - if expecting.contains(NextTokenType) { - d.ReportUnwantedToken(recognizer) - // print("recoverFromMisMatchedToken deleting " \ - // + str(recognizer.GetTokenStream().LT(1)) \ - // + " since " + str(recognizer.GetTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.Consume() // simply delete extra token - // we want to return the token we're actually Matching - MatchedSymbol := recognizer.GetCurrentToken() - d.ReportMatch(recognizer) // we know current token is correct - return MatchedSymbol - } - - return nil -} - -// Conjure up a missing token during error recovery. -// -// The recognizer attempts to recover from single missing -// symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x)}. The action clearly assumes -// that there has been an identifier Matched previously and that -// $x points at that token. If that token is missing, but -// the next token in the stream is what we want we assume that -// d token is missing and we keep going. Because we -// have to return some token to replace the missing token, -// we have to conjure one up. This method gives the user control -// over the tokens returned for missing tokens. Mostly, -// you will want to create something special for identifier -// tokens. For literals such as '{' and ',', the default -// action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override d method to create the appropriate tokens. -// -func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { - currentSymbol := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - expectedTokenType := expecting.first() - var tokenText string - - if expectedTokenType == TokenEOF { - tokenText = "-// This error strategy is useful in the following scenarios.
-// -//-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}
-// -// @see Parser//setErrorHandler(ANTLRErrorStrategy) - -type BailErrorStrategy struct { - *DefaultErrorStrategy -} - -var _ ErrorStrategy = &BailErrorStrategy{} - -func NewBailErrorStrategy() *BailErrorStrategy { - - b := new(BailErrorStrategy) - - b.DefaultErrorStrategy = NewDefaultErrorStrategy() - - return b -} - -// Instead of recovering from exception {@code e}, re-panic it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule func catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. -// -func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - context := recognizer.GetParserRuleContext() - for context != nil { - context.SetException(e) - context = context.GetParent().(ParserRuleContext) - } - panic(NewParseCancellationException()) // TODO we don't emit e properly -} - -// Make sure we don't attempt to recover inline if the parser -// successfully recovers, it won't panic an exception. -// -func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { - b.Recover(recognizer, NewInputMisMatchException(recognizer)) - - return nil -} - -// Make sure we don't attempt to recover from problems in subrules.// -func (b *BailErrorStrategy) Sync(recognizer Parser) { - // pass -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go deleted file mode 100644 index 2ef74926..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just -// 3 kinds of errors: prediction errors, failed predicate errors, and -// mismatched input errors. In each case, the parser knows where it is -// in the input, where it is in the ATN, the rule invocation stack, -// and what kind of problem occurred. - -type RecognitionException interface { - GetOffendingToken() Token - GetMessage() string - GetInputStream() IntStream -} - -type BaseRecognitionException struct { - message string - recognizer Recognizer - offendingToken Token - offendingState int - ctx RuleContext - input IntStream -} - -func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { - - // todo - // Error.call(this) - // - // if (!!Error.captureStackTrace) { - // Error.captureStackTrace(this, RecognitionException) - // } else { - // stack := NewError().stack - // } - // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int - - t := new(BaseRecognitionException) - - t.message = message - t.recognizer = recognizer - t.input = input - t.ctx = ctx - // The current {@link Token} when an error occurred. Since not all streams - // support accessing symbols by index, we have to track the {@link Token} - // instance itself. - t.offendingToken = nil - // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't Match. - t.offendingState = -1 - if t.recognizer != nil { - t.offendingState = t.recognizer.GetState() - } - - return t -} - -func (b *BaseRecognitionException) GetMessage() string { - return b.message -} - -func (b *BaseRecognitionException) GetOffendingToken() Token { - return b.offendingToken -} - -func (b *BaseRecognitionException) GetInputStream() IntStream { - return b.input -} - -//If the state number is not known, b method returns -1.
- -// -// Gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time b exception was panicn. -// -//If the set of expected tokens is not known and could not be computed, -// b method returns {@code nil}.
-// -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code nil} if the information is not available. -// / -func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { - if b.recognizer != nil { - return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) - } - - return nil -} - -func (b *BaseRecognitionException) String() string { - return b.message -} - -type LexerNoViableAltException struct { - *BaseRecognitionException - - startIndex int - deadEndConfigs ATNConfigSet -} - -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { - - l := new(LexerNoViableAltException) - - l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) - - l.startIndex = startIndex - l.deadEndConfigs = deadEndConfigs - - return l -} - -func (l *LexerNoViableAltException) String() string { - symbol := "" - if l.startIndex >= 0 && l.startIndex < l.input.Size() { - symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) - } - return "LexerNoViableAltException" + symbol -} - -type NoViableAltException struct { - *BaseRecognitionException - - startToken Token - offendingToken Token - ctx ParserRuleContext - deadEndConfigs ATNConfigSet -} - -// Indicates that the parser could not decide which of two or more paths -// to take based upon the remaining input. It tracks the starting token -// of the offending input and also knows where the parser was -// in the various paths when the error. Reported by ReportNoViableAlternative() -// -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { - - if ctx == nil { - ctx = recognizer.GetParserRuleContext() - } - - if offendingToken == nil { - offendingToken = recognizer.GetCurrentToken() - } - - if startToken == nil { - startToken = recognizer.GetCurrentToken() - } - - if input == nil { - input = recognizer.GetInputStream().(TokenStream) - } - - n := new(NoViableAltException) - n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) - - // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1)?// - n.deadEndConfigs = deadEndConfigs - // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - n.startToken = startToken - n.offendingToken = offendingToken - - return n -} - -type InputMisMatchException struct { - *BaseRecognitionException -} - -// This signifies any kind of mismatched input exceptions such as -// when the current input does not Match the expected token. -// -func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { - - i := new(InputMisMatchException) - i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - i.offendingToken = recognizer.GetCurrentToken() - - return i - -} - -// A semantic predicate failed during validation. Validation of predicates -// occurs when normally parsing the alternative just like Matching a token. -// Disambiguating predicate evaluation occurs when we test a predicate during -// prediction. - -type FailedPredicateException struct { - *BaseRecognitionException - - ruleIndex int - predicateIndex int - predicate string -} - -func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { - - f := new(FailedPredicateException) - - f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - trans := s.GetTransitions()[0] - if trans2, ok := trans.(*PredicateTransition); ok { - f.ruleIndex = trans2.ruleIndex - f.predicateIndex = trans2.predIndex - } else { - f.ruleIndex = 0 - f.predicateIndex = 0 - } - f.predicate = predicate - f.offendingToken = recognizer.GetCurrentToken() - - return f -} - -func (f *FailedPredicateException) formatMessage(predicate, message string) string { - if message != "" { - return message - } - - return "failed predicate: {" + predicate + "}?" -} - -type ParseCancellationException struct { -} - -func NewParseCancellationException() *ParseCancellationException { - // Error.call(this) - // Error.captureStackTrace(this, ParseCancellationException) - return new(ParseCancellationException) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go deleted file mode 100644 index 842170c0..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "io" - "os" -) - -// This is an InputStream that is loaded from a file all at once -// when you construct the object. - -type FileStream struct { - *InputStream - - filename string -} - -func NewFileStream(fileName string) (*FileStream, error) { - - buf := bytes.NewBuffer(nil) - - f, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer f.Close() - _, err = io.Copy(buf, f) - if err != nil { - return nil, err - } - - fs := new(FileStream) - - fs.filename = fileName - s := string(buf.Bytes()) - - fs.InputStream = NewInputStream(s) - - return fs, nil - -} - -func (f *FileStream) GetSourceName() string { - return f.filename -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go deleted file mode 100644 index 5ff270f5..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type InputStream struct { - name string - index int - data []rune - size int -} - -func NewInputStream(data string) *InputStream { - - is := new(InputStream) - - is.name = "The {@code Skip} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.
-type LexerSkipAction struct { - *BaseLexerAction -} - -func NewLexerSkipAction() *LexerSkipAction { - la := new(LexerSkipAction) - la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) - return la -} - -// Provides a singleton instance of l parameterless lexer action. -var LexerSkipActionINSTANCE = NewLexerSkipAction() - -func (l *LexerSkipAction) execute(lexer Lexer) { - lexer.Skip() -} - -func (l *LexerSkipAction) String() string { - return "skip" -} - -// Implements the {@code type} lexer action by calling {@link Lexer//setType} -// with the assigned type. -type LexerTypeAction struct { - *BaseLexerAction - - thetype int -} - -func NewLexerTypeAction(thetype int) *LexerTypeAction { - l := new(LexerTypeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) - l.thetype = thetype - return l -} - -func (l *LexerTypeAction) execute(lexer Lexer) { - lexer.SetType(l.thetype) -} - -func (l *LexerTypeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.thetype) - return murmurFinish(h, 2) -} - -func (l *LexerTypeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerTypeAction); !ok { - return false - } else { - return l.thetype == other.(*LexerTypeAction).thetype - } -} - -func (l *LexerTypeAction) String() string { - return "actionType(" + strconv.Itoa(l.thetype) + ")" -} - -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. -type LexerPushModeAction struct { - *BaseLexerAction - - mode int -} - -func NewLexerPushModeAction(mode int) *LexerPushModeAction { - - l := new(LexerPushModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) - - l.mode = mode - return l -} - -//This action is implemented by calling {@link Lexer//pushMode} with the -// value provided by {@link //getMode}.
-func (l *LexerPushModeAction) execute(lexer Lexer) { - lexer.PushMode(l.mode) -} - -func (l *LexerPushModeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.mode) - return murmurFinish(h, 2) -} - -func (l *LexerPushModeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerPushModeAction); !ok { - return false - } else { - return l.mode == other.(*LexerPushModeAction).mode - } -} - -func (l *LexerPushModeAction) String() string { - return "pushMode(" + strconv.Itoa(l.mode) + ")" -} - -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. -// -//The {@code popMode} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.
-type LexerPopModeAction struct { - *BaseLexerAction -} - -func NewLexerPopModeAction() *LexerPopModeAction { - - l := new(LexerPopModeAction) - - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) - - return l -} - -var LexerPopModeActionINSTANCE = NewLexerPopModeAction() - -//This action is implemented by calling {@link Lexer//popMode}.
-func (l *LexerPopModeAction) execute(lexer Lexer) { - lexer.PopMode() -} - -func (l *LexerPopModeAction) String() string { - return "popMode" -} - -// Implements the {@code more} lexer action by calling {@link Lexer//more}. -// -//The {@code more} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.
- -type LexerMoreAction struct { - *BaseLexerAction -} - -func NewLexerMoreAction() *LexerMoreAction { - l := new(LexerMoreAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) - - return l -} - -var LexerMoreActionINSTANCE = NewLexerMoreAction() - -//This action is implemented by calling {@link Lexer//popMode}.
-func (l *LexerMoreAction) execute(lexer Lexer) { - lexer.More() -} - -func (l *LexerMoreAction) String() string { - return "more" -} - -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with -// the assigned mode. -type LexerModeAction struct { - *BaseLexerAction - - mode int -} - -func NewLexerModeAction(mode int) *LexerModeAction { - l := new(LexerModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) - l.mode = mode - return l -} - -//This action is implemented by calling {@link Lexer//mode} with the -// value provided by {@link //getMode}.
-func (l *LexerModeAction) execute(lexer Lexer) { - lexer.SetMode(l.mode) -} - -func (l *LexerModeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.mode) - return murmurFinish(h, 2) -} - -func (l *LexerModeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerModeAction); !ok { - return false - } else { - return l.mode == other.(*LexerModeAction).mode - } -} - -func (l *LexerModeAction) String() string { - return "mode(" + strconv.Itoa(l.mode) + ")" -} - -// Executes a custom lexer action by calling {@link Recognizer//action} with the -// rule and action indexes assigned to the custom action. The implementation of -// a custom action is added to the generated code for the lexer in an override -// of {@link Recognizer//action} when the grammar is compiled. -// -//This class may represent embedded actions created with the {...}
-// syntax in ANTLR 4, as well as actions created for lexer commands where the
-// command argument could not be evaluated when the grammar was compiled.
Custom actions are implemented by calling {@link Lexer//action} with the -// appropriate rule and action indexes.
-func (l *LexerCustomAction) execute(lexer Lexer) { - lexer.Action(nil, l.ruleIndex, l.actionIndex) -} - -func (l *LexerCustomAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.ruleIndex) - h = murmurUpdate(h, l.actionIndex) - return murmurFinish(h, 3) -} - -func (l *LexerCustomAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerCustomAction); !ok { - return false - } else { - return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex - } -} - -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a New{@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. -type LexerChannelAction struct { - *BaseLexerAction - - channel int -} - -func NewLexerChannelAction(channel int) *LexerChannelAction { - l := new(LexerChannelAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) - l.channel = channel - return l -} - -//This action is implemented by calling {@link Lexer//setChannel} with the -// value provided by {@link //getChannel}.
-func (l *LexerChannelAction) execute(lexer Lexer) { - lexer.SetChannel(l.channel) -} - -func (l *LexerChannelAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.channel) - return murmurFinish(h, 2) -} - -func (l *LexerChannelAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerChannelAction); !ok { - return false - } else { - return l.channel == other.(*LexerChannelAction).channel - } -} - -func (l *LexerChannelAction) String() string { - return "channel(" + strconv.Itoa(l.channel) + ")" -} - -// This implementation of {@link LexerAction} is used for tracking input offsets -// for position-dependent actions within a {@link LexerActionExecutor}. -// -//This action is not serialized as part of the ATN, and is only required for -// position-dependent lexer actions which appear at a location other than the -// end of a rule. For more information about DFA optimizations employed for -// lexer actions, see {@link LexerActionExecutor//append} and -// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
- -// Constructs a Newindexed custom action by associating a character offset -// with a {@link LexerAction}. -// -//Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.
-// -// @param offset The offset into the input {@link CharStream}, relative to -// the token start index, at which the specified lexer action should be -// executed. -// @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. -type LexerIndexedCustomAction struct { - *BaseLexerAction - - offset int - lexerAction LexerAction - isPositionDependent bool -} - -func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { - - l := new(LexerIndexedCustomAction) - l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) - - l.offset = offset - l.lexerAction = lexerAction - l.isPositionDependent = true - - return l -} - -//This method calls {@link //execute} on the result of {@link //getAction} -// using the provided {@code lexer}.
-func (l *LexerIndexedCustomAction) execute(lexer Lexer) { - // assume the input stream position was properly set by the calling code - l.lexerAction.execute(lexer) -} - -func (l *LexerIndexedCustomAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.offset) - h = murmurUpdate(h, l.lexerAction.hash()) - return murmurFinish(h, 3) -} - -func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerIndexedCustomAction); !ok { - return false - } else { - return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go deleted file mode 100644 index 80b949a1..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// Represents an executor for a sequence of lexer actions which traversed during -// the Matching operation of a lexer rule (token). -// -//The executor tracks position information for position-dependent lexer actions -// efficiently, ensuring that actions appearing only at the end of the rule do -// not cause bloating of the {@link DFA} created for the lexer.
- -type LexerActionExecutor struct { - lexerActions []LexerAction - cachedHash int -} - -func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { - - if lexerActions == nil { - lexerActions = make([]LexerAction, 0) - } - - l := new(LexerActionExecutor) - - l.lexerActions = lexerActions - - // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - l.cachedHash = murmurInit(57) - for _, a := range lexerActions { - l.cachedHash = murmurUpdate(l.cachedHash, a.hash()) - } - - return l -} - -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while Matching a token within a particular -// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. -func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { - if lexerActionExecutor == nil { - return NewLexerActionExecutor([]LexerAction{lexerAction}) - } - - return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) -} - -// Creates a {@link LexerActionExecutor} which encodes the current offset -// for position-dependent lexer actions. -// -//Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end -// of a lexer rule, even when the lexer rule Matches a variable number of -// characters.
-// -//Prior to traversing a Match transition in the ATN, the current offset -// from the token start index is assigned to all position-dependent lexer -// actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of -// lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.
-// -//If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.
-// -// @param offset The current offset to assign to all position-dependent -// lexer actions which do not already have offsets assigned. -// -// @return A {@link LexerActionExecutor} which stores input stream offsets -// for all position-dependent lexer actions. -// / -func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { - var updatedLexerActions []LexerAction - for i := 0; i < len(l.lexerActions); i++ { - _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) - if l.lexerActions[i].getIsPositionDependent() && !ok { - if updatedLexerActions == nil { - updatedLexerActions = make([]LexerAction, 0) - - for _, a := range l.lexerActions { - updatedLexerActions = append(updatedLexerActions, a) - } - } - - updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) - } - } - if updatedLexerActions == nil { - return l - } - - return NewLexerActionExecutor(updatedLexerActions) -} - -// Execute the actions encapsulated by l executor within the context of a -// particular {@link Lexer}. -// -//This method calls {@link IntStream//seek} to set the position of the -// {@code input} {@link CharStream} prior to calling -// {@link LexerAction//execute} on a position-dependent action. Before the -// method returns, the input position will be restored to the same position -// it was in when the method was invoked.
-// -// @param lexer The lexer instance. -// @param input The input stream which is the source for the current token. -// When l method is called, the current {@link IntStream//index} for -// {@code input} should be the start of the following token, i.e. 1 -// character past the end of the current token. -// @param startIndex The token start index. This value may be passed to -// {@link IntStream//seek} to set the {@code input} position to the beginning -// of the token. -// / -func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { - requiresSeek := false - stopIndex := input.Index() - - defer func() { - if requiresSeek { - input.Seek(stopIndex) - } - }() - - for i := 0; i < len(l.lexerActions); i++ { - lexerAction := l.lexerActions[i] - if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { - offset := la.offset - input.Seek(startIndex + offset) - lexerAction = la.lexerAction - requiresSeek = (startIndex + offset) != stopIndex - } else if lexerAction.getIsPositionDependent() { - input.Seek(stopIndex) - requiresSeek = false - } - lexerAction.execute(lexer) - } -} - -func (l *LexerActionExecutor) hash() int { - if l == nil { - return 61 - } - return l.cachedHash -} - -func (l *LexerActionExecutor) equals(other interface{}) bool { - if l == other { - return true - } else if _, ok := other.(*LexerActionExecutor); !ok { - return false - } else { - return l.cachedHash == other.(*LexerActionExecutor).cachedHash && - &l.lexerActions == &other.(*LexerActionExecutor).lexerActions - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go deleted file mode 100644 index 131364f7..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go +++ /dev/null @@ -1,658 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -var ( - LexerATNSimulatorDebug = false - LexerATNSimulatorDFADebug = false - - LexerATNSimulatorMinDFAEdge = 0 - LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN - - LexerATNSimulatorMatchCalls = 0 -) - -type ILexerATNSimulator interface { - IATNSimulator - - reset() - Match(input CharStream, mode int) int - GetCharPositionInLine() int - GetLine() int - GetText(input CharStream) string - Consume(input CharStream) -} - -type LexerATNSimulator struct { - *BaseATNSimulator - - recog Lexer - predictionMode int - mergeCache DoubleDict - startIndex int - Line int - CharPositionInLine int - mode int - prevAccept *SimState - MatchCalls int -} - -func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := new(LexerATNSimulator) - - l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - l.decisionToDFA = decisionToDFA - l.recog = recog - // The current token's starting index into the character stream. - // Shared across DFA to ATN simulation in case the ATN fails and the - // DFA did not have a previous accept state. In l case, we use the - // ATN-generated exception object. - l.startIndex = -1 - // line number 1..n within the input/// - l.Line = 1 - // The index of the character relative to the beginning of the line - // 0..n-1/// - l.CharPositionInLine = 0 - l.mode = LexerDefaultMode - // Used during DFA/ATN exec to record the most recent accept configuration - // info - l.prevAccept = NewSimState() - // done - return l -} - -func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { - l.CharPositionInLine = simulator.CharPositionInLine - l.Line = simulator.Line - l.mode = simulator.mode - l.startIndex = simulator.startIndex -} - -func (l *LexerATNSimulator) Match(input CharStream, mode int) int { - l.MatchCalls++ - l.mode = mode - mark := input.Mark() - - defer func() { - input.Release(mark) - }() - - l.startIndex = input.Index() - l.prevAccept.reset() - - dfa := l.decisionToDFA[mode] - - if dfa.s0 == nil { - return l.MatchATN(input) - } - - return l.execATN(input, dfa.s0) -} - -func (l *LexerATNSimulator) reset() { - l.prevAccept.reset() - l.startIndex = -1 - l.Line = 1 - l.CharPositionInLine = 0 - l.mode = LexerDefaultMode -} - -func (l *LexerATNSimulator) MatchATN(input CharStream) int { - startState := l.atn.modeToStartState[l.mode] - - if LexerATNSimulatorDebug { - fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) - } - oldMode := l.mode - s0Closure := l.computeStartState(input, startState) - suppressEdge := s0Closure.hasSemanticContext - s0Closure.hasSemanticContext = false - - next := l.addDFAState(s0Closure) - - if !suppressEdge { - l.decisionToDFA[l.mode].setS0(next) - } - - predict := l.execATN(input, next) - - if LexerATNSimulatorDebug { - fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) - } - return predict -} - -func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - - if LexerATNSimulatorDebug { - fmt.Println("start state closure=" + ds0.configs.String()) - } - if ds0.isAcceptState { - // allow zero-length tokens - l.captureSimState(l.prevAccept, input, ds0) - } - t := input.LA(1) - s := ds0 // s is current/from DFA state - - for { // while more work - if LexerATNSimulatorDebug { - fmt.Println("execATN loop starting closure: " + s.configs.String()) - } - - // As we move src->trg, src->trg, we keep track of the previous trg to - // avoid looking up the DFA state again, which is expensive. - // If the previous target was already part of the DFA, we might - // be able to avoid doing a reach operation upon t. If s!=nil, - // it means that semantic predicates didn't prevent us from - // creating a DFA state. Once we know s!=nil, we check to see if - // the DFA state has an edge already for t. If so, we can just reuse - // it's configuration set there's no point in re-computing it. - // This is kind of like doing DFA simulation within the ATN - // simulation because DFA simulation is really just a way to avoid - // computing reach/closure sets. Technically, once we know that - // we have a previously added DFA state, we could jump over to - // the DFA simulator. But, that would mean popping back and forth - // a lot and making things more complicated algorithmically. - // This optimization makes a lot of sense for loops within DFA. - // A character will take us back to an existing DFA state - // that already has lots of edges out of it. e.g., .* in comments. - target := l.getExistingTargetState(s, t) - if target == nil { - target = l.computeTargetState(input, s, t) - // print("Computed:" + str(target)) - } - if target == ATNSimulatorError { - break - } - // If l is a consumable input element, make sure to consume before - // capturing the accept state so the input index, line, and char - // position accurately reflect the state of the interpreter at the - // end of the token. - if t != TokenEOF { - l.Consume(input) - } - if target.isAcceptState { - l.captureSimState(l.prevAccept, input, target) - if t == TokenEOF { - break - } - } - t = input.LA(1) - s = target // flip current DFA target becomes Newsrc/from state - } - - return l.failOrAccept(l.prevAccept, input, s.configs, t) -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// l method returns {@code nil}. -// -// @param s The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for l edge is not -// already cached -func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { - if s.edges == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { - return nil - } - - target := s.edges[t-LexerATNSimulatorMinDFAEdge] - if LexerATNSimulatorDebug && target != nil { - fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) - } - return target -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param input The input stream -// @param s The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, l method -// returns {@link //ERROR}. -func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { - reach := NewOrderedATNConfigSet() - - // if we don't find an existing DFA state - // Fill reach starting from closure, following t transitions - l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) - - if len(reach.configs) == 0 { // we got nowhere on t from s - if !reach.hasSemanticContext { - // we got nowhere on t, don't panic out l knowledge it'd - // cause a failover from DFA later. - l.addDFAEdge(s, t, ATNSimulatorError, nil) - } - // stop when we can't Match any more char - return ATNSimulatorError - } - // Add an edge from s to target DFA found/created for reach - return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) -} - -func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { - if l.prevAccept.dfaState != nil { - lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor - l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) - return prevAccept.dfaState.prediction - } - - // if no accept and EOF is first char, return EOF - if t == TokenEOF && input.Index() == l.startIndex { - return TokenEOF - } - - panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) -} - -// Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return -// parameter. -func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { - // l is used to Skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule - SkipAlt := ATNInvalidAltNumber - - for _, cfg := range closure.GetItems() { - currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) - if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { - continue - } - - if LexerATNSimulatorDebug { - - fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) - } - - for _, trans := range cfg.GetState().GetTransitions() { - target := l.getReachableTarget(trans, t) - if target != nil { - lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor - if lexerActionExecutor != nil { - lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) - } - treatEOFAsEpsilon := (t == TokenEOF) - config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) - if l.closure(input, config, reach, - currentAltReachedAcceptState, true, treatEOFAsEpsilon) { - // any remaining configs for l alt have a lower priority - // than the one that just reached an accept state. - SkipAlt = cfg.GetAlt() - } - } - } - } -} - -func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if LexerATNSimulatorDebug { - fmt.Printf("ACTION %s\n", lexerActionExecutor) - } - // seek to after last char in token - input.Seek(index) - l.Line = line - l.CharPositionInLine = charPos - if lexerActionExecutor != nil && l.recog != nil { - lexerActionExecutor.execute(l.recog, input, startIndex) - } -} - -func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState { - if trans.Matches(t, 0, LexerMaxCharValue) { - return trans.getTarget() - } - - return nil -} - -func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { - configs := NewOrderedATNConfigSet() - for i := 0; i < len(p.GetTransitions()); i++ { - target := p.GetTransitions()[i].getTarget() - cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) - l.closure(input, cfg, configs, false, false, false) - } - - return configs -} - -// Since the alternatives within any lexer decision are ordered by -// preference, l method stops pursuing the closure as soon as an accept -// state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for -// l rule would have a lower priority. -// -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. -func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, - currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - - if LexerATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") - } - - _, ok := config.state.(*RuleStopState) - if ok { - - if LexerATNSimulatorDebug { - if l.recog != nil { - fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) - } else { - fmt.Printf("closure at rule stop %s\n", config) - } - } - - if config.context == nil || config.context.hasEmptyPath() { - if config.context == nil || config.context.isEmpty() { - configs.Add(config, nil) - return true - } - - configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil) - currentAltReachedAcceptState = true - } - if config.context != nil && !config.context.isEmpty() { - for i := 0; i < config.context.length(); i++ { - if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState { - newContext := config.context.GetParent(i) // "pop" return state - returnState := l.atn.states[config.context.getReturnState(i)] - cfg := NewLexerATNConfig2(config, returnState, newContext) - currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) - } - } - } - return currentAltReachedAcceptState - } - // optimization - if !config.state.GetEpsilonOnlyTransitions() { - if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { - configs.Add(config, nil) - } - } - for j := 0; j < len(config.state.GetTransitions()); j++ { - trans := config.state.GetTransitions()[j] - cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon) - if cfg != nil { - currentAltReachedAcceptState = l.closure(input, cfg, configs, - currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) - } - } - return currentAltReachedAcceptState -} - -// side-effect: can alter configs.hasSemanticContext -func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, - configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { - - var cfg *LexerATNConfig - - if trans.getSerializationType() == TransitionRULE { - - rt := trans.(*RuleTransition) - newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber()) - cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) - - } else if trans.getSerializationType() == TransitionPRECEDENCE { - panic("Precedence predicates are not supported in lexers.") - } else if trans.getSerializationType() == TransitionPREDICATE { - // Track traversing semantic predicates. If we traverse, - // we cannot add a DFA state for l "reach" computation - // because the DFA would not test the predicate again in the - // future. Rather than creating collections of semantic predicates - // like v3 and testing them on prediction, v4 will test them on the - // fly all the time using the ATN not the DFA. This is slower but - // semantically it's not used that often. One of the key elements to - // l predicate mechanism is not adding DFA states that see - // predicates immediately afterwards in the ATN. For example, - - // a : ID {p1}? | ID {p2}? - - // should create the start state for rule 'a' (to save start state - // competition), but should not create target of ID state. The - // collection of ATN states the following ID references includes - // states reached by traversing predicates. Since l is when we - // test them, we cannot cash the DFA state target of ID. - - pt := trans.(*PredicateTransition) - - if LexerATNSimulatorDebug { - fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) - } - configs.SetHasSemanticContext(true) - if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } else if trans.getSerializationType() == TransitionACTION { - if config.context == nil || config.context.hasEmptyPath() { - // execute actions anywhere in the start rule for a token. - // - // TODO: if the entry rule is invoked recursively, some - // actions may be executed during the recursive call. The - // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In l case, the config needs to be - // split into two contexts - one with just the empty path - // and another with everything but the empty path. - // Unfortunately, the current algorithm does not allow - // getEpsilonTarget to return two configurations, so - // additional modifications are needed before we can support - // the split operation. - lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex]) - cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor) - } else { - // ignore actions in referenced rules - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } else if trans.getSerializationType() == TransitionEPSILON { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } else if trans.getSerializationType() == TransitionATOM || - trans.getSerializationType() == TransitionRANGE || - trans.getSerializationType() == TransitionSET { - if treatEOFAsEpsilon { - if trans.Matches(TokenEOF, 0, LexerMaxCharValue) { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } - } - return cfg -} - -// Evaluate a predicate specified in the lexer. -// -//If {@code speculative} is {@code true}, l method was called before -// {@link //consume} for the Matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.
-// -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / -func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { - // assume true if no recognizer was provided - if l.recog == nil { - return true - } - if !speculative { - return l.recog.Sempred(nil, ruleIndex, predIndex) - } - savedcolumn := l.CharPositionInLine - savedLine := l.Line - index := input.Index() - marker := input.Mark() - - defer func() { - l.CharPositionInLine = savedcolumn - l.Line = savedLine - input.Seek(index) - input.Release(marker) - }() - - l.Consume(input) - return l.recog.Sempred(nil, ruleIndex, predIndex) -} - -func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { - settings.index = input.Index() - settings.line = l.Line - settings.column = l.CharPositionInLine - settings.dfaState = dfaState -} - -func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { - if to == nil && cfgs != nil { - // leading to l call, ATNConfigSet.hasSemanticContext is used as a - // marker indicating dynamic predicate evaluation makes l edge - // dependent on the specific input sequence, so the static edge in the - // DFA should be omitted. The target DFAState is still created since - // execATN has the ability to reSynchronize with the DFA state cache - // following the predicate evaluation step. - // - // TJP notes: next time through the DFA, we see a pred again and eval. - // If that gets us to a previously created (but dangling) DFA - // state, we can continue in pure DFA mode from there. - // / - suppressEdge := cfgs.HasSemanticContext() - cfgs.SetHasSemanticContext(false) - - to = l.addDFAState(cfgs) - - if suppressEdge { - return to - } - } - // add the edge - if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge { - // Only track edges within the DFA bounds - return to - } - if LexerATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) - } - if from.edges == nil { - // make room for tokens 1..n and -1 masquerading as index 0 - from.edges = make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1) - } - from.edges[tk-LexerATNSimulatorMinDFAEdge] = to // connect - - return to -} - -// Add a NewDFA state if there isn't one with l set of -// configurations already. This method also detects the first -// configuration containing an ATN rule stop state. Later, when -// traversing the DFA, we will know which rule to accept. -func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState { - - proposed := NewDFAState(-1, configs) - var firstConfigWithRuleStopState ATNConfig - - for _, cfg := range configs.GetItems() { - - _, ok := cfg.GetState().(*RuleStopState) - - if ok { - firstConfigWithRuleStopState = cfg - break - } - } - if firstConfigWithRuleStopState != nil { - proposed.isAcceptState = true - proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor - proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) - } - hash := proposed.hash() - dfa := l.decisionToDFA[l.mode] - existing, ok := dfa.getState(hash) - if ok { - return existing - } - newState := proposed - newState.stateNumber = dfa.numStates() - configs.SetReadOnly(true) - newState.configs = configs - dfa.setState(hash, newState) - return newState -} - -func (l *LexerATNSimulator) getDFA(mode int) *DFA { - return l.decisionToDFA[mode] -} - -// Get the text Matched so far for the current token. -func (l *LexerATNSimulator) GetText(input CharStream) string { - // index is first lookahead char, don't include. - return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) -} - -func (l *LexerATNSimulator) Consume(input CharStream) { - curChar := input.LA(1) - if curChar == int('\n') { - l.Line++ - l.CharPositionInLine = 0 - } else { - l.CharPositionInLine++ - } - input.Consume() -} - -func (l *LexerATNSimulator) GetCharPositionInLine() int { - return l.CharPositionInLine -} - -func (l *LexerATNSimulator) GetLine() int { - return l.Line -} - -func (l *LexerATNSimulator) GetTokenName(tt int) string { - if tt == -1 { - return "EOF" - } - - return "'" + string(tt) + "'" -} - -func resetSimState(sim *SimState) { - sim.index = -1 - sim.line = 0 - sim.column = -1 - sim.dfaState = nil -} - -type SimState struct { - index int - line int - column int - dfaState *DFAState -} - -func NewSimState() *SimState { - s := new(SimState) - resetSimState(s) - return s -} - -func (s *SimState) reset() { - resetSimState(s) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go deleted file mode 100644 index f5afd09b..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type LL1Analyzer struct { - atn *ATN -} - -func NewLL1Analyzer(atn *ATN) *LL1Analyzer { - la := new(LL1Analyzer) - la.atn = atn - return la -} - -//* Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -/// -const ( - LL1AnalyzerHitPred = TokenInvalidType -) - -//* -// Calculates the SLL(1) expected lookahead set for each outgoing transition -// of an {@link ATNState}. The returned array has one element for each -// outgoing transition in {@code s}. If the closure from transition -// i leads to a semantic predicate before Matching a symbol, the -// element at index i of the result will be {@code nil}. -// -// @param s the ATN state -// @return the expected symbols for each outgoing transition of {@code s}. -func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { - if s == nil { - return nil - } - count := len(s.GetTransitions()) - look := make([]*IntervalSet, count) - for alt := 0; alt < count; alt++ { - look[alt] = NewIntervalSet() - lookBusy := NewSet(nil, nil) - seeThruPreds := false // fail to get lookahead upon pred - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) - // Wipe out lookahead for la alternative if we found nothing - // or we had a predicate when we !seeThruPreds - if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { - look[alt] = nil - } - } - return look -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//If {@code ctx} is {@code nil} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code nil} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.
-// -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code nil} if the context -// should be ignored -// -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -/// -func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { - r := NewIntervalSet() - seeThruPreds := true // ignore preds get all lookahead - var lookContext PredictionContext - if ctx != nil { - lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) - } - la.look1(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true) - return r -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//If {@code ctx} is {@code nil} and {@code stopState} or the end of the -// rule containing {@code s} is reached, {@link Token//EPSILON} is added to -// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is -// {@code true} and {@code stopState} or the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.
-// -// @param s the ATN state. -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx The outer context, or {@code nil} if the outer context should -// not be used. -// @param look The result lookahead set. -// @param lookBusy A set used for preventing epsilon closures in the ATN -// from causing a stack overflow. Outside code should pass -// {@code NewSetIf the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-// -// @param ttype the token type to Match -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// {@code ttype} and the error strategy could not recover from the -// mismatched symbol - -func (p *BaseParser) Match(ttype int) Token { - - t := p.GetCurrentToken() - - if t.GetTokenType() == ttype { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - - return t -} - -// Match current input symbol as a wildcard. If the symbol type Matches -// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} -// and {@link //consume} are called to complete the Match process. -// -//If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-// -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// a wildcard and the error strategy could not recover from the mismatched -// symbol - -func (p *BaseParser) MatchWildcard() Token { - t := p.GetCurrentToken() - if t.GetTokenType() > 0 { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - return t -} - -func (p *BaseParser) GetParserRuleContext() ParserRuleContext { - return p.ctx -} - -func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { - p.ctx = v -} - -func (p *BaseParser) GetParseListeners() []ParseTreeListener { - if p.parseListeners == nil { - return make([]ParseTreeListener, 0) - } - return p.parseListeners -} - -// Registers {@code listener} to receive events during the parsing process. -// -//To support output-preserving grammar transformations (including but not -// limited to left-recursion removal, automated left-factoring, and -// optimized code generation), calls to listener methods during the parse -// may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In -// particular, rule entry and exit events may occur in a different order -// during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.
-// -//With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.
-// -//If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.
-// @param listener the listener to remove -// -func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { - - if p.parseListeners != nil { - - idx := -1 - for i, v := range p.parseListeners { - if v == listener { - idx = i - break - } - } - - if idx == -1 { - return - } - - // remove the listener from the slice - p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) - - if len(p.parseListeners) == 0 { - p.parseListeners = nil - } - } -} - -// Remove all parse listeners. -func (p *BaseParser) removeParseListeners() { - p.parseListeners = nil -} - -// Notify any parse listeners of an enter rule event. -func (p *BaseParser) TriggerEnterRuleEvent() { - if p.parseListeners != nil { - ctx := p.ctx - for _, listener := range p.parseListeners { - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) - } - } -} - -// -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener -// -func (p *BaseParser) TriggerExitRuleEvent() { - if p.parseListeners != nil { - // reverse order walk of listeners - ctx := p.ctx - l := len(p.parseListeners) - 1 - - for i := range p.parseListeners { - listener := p.parseListeners[l-i] - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) - } - } -} - -func (p *BaseParser) GetInterpreter() *ParserATNSimulator { - return p.Interpreter -} - -func (p *BaseParser) GetATN() *ATN { - return p.Interpreter.atn -} - -func (p *BaseParser) GetTokenFactory() TokenFactory { - return p.input.GetTokenSource().GetTokenFactory() -} - -// Tell our token source and error strategy about a Newway to create tokens.// -func (p *BaseParser) setTokenFactory(factory TokenFactory) { - p.input.GetTokenSource().setTokenFactory(factory) -} - -// The ATN with bypass alternatives is expensive to create so we create it -// lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. -// -func (p *BaseParser) GetATNWithBypassAlts() { - - // TODO - panic("Not implemented!") - - // serializedAtn := p.getSerializedATN() - // if (serializedAtn == nil) { - // panic("The current parser does not support an ATN with bypass alternatives.") - // } - // result := p.bypassAltsAtnCache[serializedAtn] - // if (result == nil) { - // deserializationOptions := NewATNDeserializationOptions(nil) - // deserializationOptions.generateRuleBypassTransitions = true - // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) - // p.bypassAltsAtnCache[serializedAtn] = result - // } - // return result -} - -// The preferred method of getting a tree pattern. For example, here's a -// sample use: -// -//-// ParseTree t = parser.expr() -// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", -// MyParser.RULE_expr) -// ParseTreeMatch m = p.Match(t) -// String id = m.Get("ID") -//- -func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { - - panic("NewParseTreePatternMatcher not implemented!") - // - // if (lexer == nil) { - // if (p.GetTokenStream() != nil) { - // tokenSource := p.GetTokenStream().GetTokenSource() - // if _, ok := tokenSource.(ILexer); ok { - // lexer = tokenSource - // } - // } - // } - // if (lexer == nil) { - // panic("Parser can't discover a lexer to use") - // } - - // m := NewParseTreePatternMatcher(lexer, p) - // return m.compile(pattern, patternRuleIndex) -} - -func (p *BaseParser) GetInputStream() IntStream { - return p.GetTokenStream() -} - -func (p *BaseParser) SetInputStream(input TokenStream) { - p.SetTokenStream(input) -} - -func (p *BaseParser) GetTokenStream() TokenStream { - return p.input -} - -// Set the token stream and reset the parser.// -func (p *BaseParser) SetTokenStream(input TokenStream) { - p.input = nil - p.reset() - p.input = input -} - -// Match needs to return the current input symbol, which gets put -// into the label for the associated token ref e.g., x=ID. -// -func (p *BaseParser) GetCurrentToken() Token { - return p.input.LT(1) -} - -func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { - if offendingToken == nil { - offendingToken = p.GetCurrentToken() - } - p._SyntaxErrors++ - line := offendingToken.GetLine() - column := offendingToken.GetColumn() - listener := p.GetErrorListenerDispatch() - listener.SyntaxError(p, offendingToken, line, column, msg, err) -} - -func (p *BaseParser) Consume() Token { - o := p.GetCurrentToken() - if o.GetTokenType() != TokenEOF { - p.GetInputStream().Consume() - } - hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 - if p.BuildParseTrees || hasListener { - if p.errHandler.inErrorRecoveryMode(p) { - node := p.ctx.AddErrorNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitErrorNode(node) - } - } - - } else { - node := p.ctx.AddTokenNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitTerminal(node) - } - } - } - // node.invokingState = p.state - } - - return o -} - -func (p *BaseParser) addContextToParseTree() { - // add current context to parent if we have a parent - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) - } -} - -func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { - p.SetState(state) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.BuildParseTrees { - p.addContextToParseTree() - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() - } -} - -func (p *BaseParser) ExitRule() { - p.ctx.SetStop(p.input.LT(-1)) - // trigger event on ctx, before it reverts to parent - if p.parseListeners != nil { - p.TriggerExitRuleEvent() - } - p.SetState(p.ctx.GetInvokingState()) - if p.ctx.GetParent() != nil { - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } else { - p.ctx = nil - } -} - -func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { - localctx.SetAltNumber(altNum) - // if we have Newlocalctx, make sure we replace existing ctx - // that is previous child of parse tree - if p.BuildParseTrees && p.ctx != localctx { - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() - p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) - } - } - p.ctx = localctx -} - -// Get the precedence level for the top-most precedence rule. -// -// @return The precedence level for the top-most precedence rule, or -1 if -// the parser context is not nested within a precedence rule. - -func (p *BaseParser) GetPrecedence() int { - if len(p.precedenceStack) == 0 { - return -1 - } - - return p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { - p.SetState(state) - p.precedenceStack.Push(precedence) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -// -// Like {@link //EnterRule} but for recursive rules. - -func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { - previous := p.ctx - previous.SetParent(localctx) - previous.SetInvokingState(state) - previous.SetStop(p.input.LT(-1)) - - p.ctx = localctx - p.ctx.SetStart(previous.GetStart()) - if p.BuildParseTrees { - p.ctx.AddChild(previous) - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { - p.precedenceStack.Pop() - p.ctx.SetStop(p.input.LT(-1)) - retCtx := p.ctx // save current ctx (return value) - // unroll so ctx is as it was before call to recursive method - if p.parseListeners != nil { - for p.ctx != parentCtx { - p.TriggerExitRuleEvent() - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } - } else { - p.ctx = parentCtx - } - // hook into tree - retCtx.SetParent(parentCtx) - if p.BuildParseTrees && parentCtx != nil { - // add return ctx into invoking rule's tree - parentCtx.AddChild(retCtx) - } -} - -func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { - ctx := p.ctx - for ctx != nil { - if ctx.GetRuleIndex() == ruleIndex { - return ctx - } - ctx = ctx.GetParent().(ParserRuleContext) - } - return nil -} - -func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { - return precedence >= p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) inContext(context ParserRuleContext) bool { - // TODO: useful in parser? - return false -} - -// -// Checks whether or not {@code symbol} can follow the current state in the -// ATN. The behavior of p.method is equivalent to the following, but is -// implemented such that the complete context-sensitive follow set does not -// need to be explicitly constructed. -// -//
-// return getExpectedTokens().contains(symbol) -//-// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - -func (p *BaseParser) IsExpectedToken(symbol int) bool { - atn := p.Interpreter.atn - ctx := p.ctx - s := atn.states[p.state] - following := atn.NextTokens(s, nil) - if following.contains(symbol) { - return true - } - if !following.contains(TokenEpsilon) { - return false - } - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - following = atn.NextTokens(rt.(*RuleTransition).followState, nil) - if following.contains(symbol) { - return true - } - ctx = ctx.GetParent().(ParserRuleContext) - } - if following.contains(TokenEpsilon) && symbol == TokenEOF { - return true - } - - return false -} - -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, -// respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) -// -func (p *BaseParser) GetExpectedTokens() *IntervalSet { - return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) -} - -func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { - atn := p.Interpreter.atn - s := atn.states[p.state] - return atn.NextTokens(s, nil) -} - -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// -func (p *BaseParser) GetRuleIndex(ruleName string) int { - var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] - if ok { - return ruleIndex - } - - return -1 -} - -// Return List<String> of the rule names in your parser instance -// leading up to a call to the current rule. You could override if -// you want more details such as the file/line info of where -// in the ATN a rule is invoked. -// -// this very useful for error messages. - -func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { - if c == nil { - c = p.ctx - } - stack := make([]string, 0) - for c != nil { - // compute what follows who invoked us - ruleIndex := c.GetRuleIndex() - if ruleIndex < 0 { - stack = append(stack, "n/a") - } else { - stack = append(stack, p.GetRuleNames()[ruleIndex]) - } - - vp := c.GetParent() - - if vp == nil { - break - } - - c = vp.(ParserRuleContext) - } - return stack -} - -// For debugging and other purposes.// -func (p *BaseParser) GetDFAStrings() string { - return fmt.Sprint(p.Interpreter.decisionToDFA) -} - -// For debugging and other purposes.// -func (p *BaseParser) DumpDFA() { - seenOne := false - for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.numStates() > 0 { - if seenOne { - fmt.Println() - } - fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") - fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) - seenOne = true - } - } -} - -func (p *BaseParser) GetSourceName() string { - return p.GrammarFileName -} - -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. p.is for quick and dirty debugging. -// -func (p *BaseParser) SetTrace(trace *TraceListener) { - if trace == nil { - p.RemoveParseListener(p.tracer) - p.tracer = nil - } else { - if p.tracer != nil { - p.RemoveParseListener(p.tracer) - } - p.tracer = NewTraceListener(p) - p.AddParseListener(p.tracer) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go deleted file mode 100644 index 128b9a96..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go +++ /dev/null @@ -1,1473 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - ParserATNSimulatorDebug = false - ParserATNSimulatorListATNDecisions = false - ParserATNSimulatorDFADebug = false - ParserATNSimulatorRetryDebug = false -) - -type ParserATNSimulator struct { - *BaseATNSimulator - - parser Parser - predictionMode int - input TokenStream - startIndex int - dfa *DFA - mergeCache *DoubleDict - outerContext ParserRuleContext -} - -func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - - p := new(ParserATNSimulator) - - p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - p.parser = parser - p.decisionToDFA = decisionToDFA - // SLL, LL, or LL + exact ambig detection?// - p.predictionMode = PredictionModeLL - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - p.input = nil - p.startIndex = 0 - p.outerContext = nil - p.dfa = nil - // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't Synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid - // the merge if we ever see a and b again. Note that (b,a)&rarrc should - // also be examined during cache lookup. - // - p.mergeCache = nil - - return p -} - -func (p *ParserATNSimulator) GetPredictionMode() int { - return p.predictionMode -} - -func (p *ParserATNSimulator) SetPredictionMode(v int) { - p.predictionMode = v -} - -func (p *ParserATNSimulator) reset() { -} - -func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + - strconv.Itoa(input.LT(1).GetColumn())) - } - - p.input = input - p.startIndex = input.Index() - p.outerContext = outerContext - - dfa := p.decisionToDFA[decision] - p.dfa = dfa - m := input.Mark() - index := input.Index() - - defer func() { - p.dfa = nil - p.mergeCache = nil // wack cache after each prediction - input.Seek(index) - input.Release(m) - }() - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - var s0 *DFAState - if dfa.precedenceDfa { - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.s0 - } - - if s0 == nil { - if outerContext == nil { - outerContext = RuleContextEmpty - } - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) - } - // If p is not a precedence DFA, we check the ATN start state - // to determine if p ATN start state is the decision for the - // closure block that determines whether a precedence rule - // should continue or complete. - - t2 := dfa.atnStartState - t, ok := t2.(*StarLoopEntryState) - if !dfa.precedenceDfa && ok { - if t.precedenceRuleDecision { - dfa.setPrecedenceDfa(true) - } - } - fullCtx := false - s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx) - - if dfa.precedenceDfa { - // If p is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - s0Closure = p.applyPrecedenceFilter(s0Closure) - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) - } else { - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.s0 = s0 - } - } - alt := p.execATN(dfa, s0, input, index, outerContext) - if ParserATNSimulatorDebug { - fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) - } - return alt - -} - -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. - -// There are some key conditions we're looking for after computing a new -// set of ATN configs (proposed DFA state): -// if the set is empty, there is no viable alternative for current symbol -// does the state uniquely predict an alternative? -// does the state have a conflict that would prevent us from -// putting it on the work list? - -// We also have some key operations to do: -// add an edge from previous DFA state to potentially NewDFA state, D, -// upon current symbol but only if adding to work list, which means in all -// cases except no viable alternative (and possibly non-greedy decisions?) -// collecting predicates and adding semantic context to DFA accept states -// adding rule context to context-sensitive DFA accept states -// consuming an input symbol -// Reporting a conflict -// Reporting an ambiguity -// Reporting a context sensitivity -// Reporting insufficient predicates - -// cover these cases: -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -// -func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) - } - - previousD := s0 - - if ParserATNSimulatorDebug { - fmt.Println("s0 = " + s0.String()) - } - t := input.LA(1) - for { // for more work - D := p.getExistingTargetState(previousD, t) - if D == nil { - D = p.computeTargetState(dfa, previousD, t) - } - if D == ATNSimulatorError { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - if D.requiresFullContext && p.predictionMode != PredictionModeSLL { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - conflictingAlts := D.configs.GetConflictingAlts() - if D.predicates != nil { - if ParserATNSimulatorDebug { - fmt.Println("DFA state has preds in DFA sim LL failover") - } - conflictIndex := input.Index() - if conflictIndex != startIndex { - input.Seek(startIndex) - } - conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) - if conflictingAlts.length() == 1 { - if ParserATNSimulatorDebug { - fmt.Println("Full LL avoided") - } - return conflictingAlts.minValue() - } - if conflictIndex != startIndex { - // restore the index so Reporting the fallback to full - // context occurs with the index at the correct spot - input.Seek(conflictIndex) - } - } - if ParserATNSimulatorDFADebug { - fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) - } - fullCtx := true - s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) - p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) - alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) - return alt - } - if D.isAcceptState { - if D.predicates == nil { - return D.prediction - } - stopIndex := input.Index() - input.Seek(startIndex) - alts := p.evalSemanticContext(D.predicates, outerContext, true) - if alts.length() == 0 { - panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) - } else if alts.length() == 1 { - return alts.minValue() - } else { - // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. - p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue() - } - } - previousD = D - - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - - panic("Should not have reached p state") -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// p method returns {@code nil}. -// -// @param previousD The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for p edge is not -// already cached - -func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { - edges := previousD.edges - if edges == nil { - return nil - } - - return edges[t+1] -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param dfa The DFA -// @param previousD The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, p method -// returns {@link //ERROR}. - -func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { - reach := p.computeReachSet(previousD.configs, t, false) - - if reach == nil { - p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) - return ATNSimulatorError - } - // create Newtarget state we'll add to DFA after it's complete - D := NewDFAState(-1, reach) - - predictedAlt := p.getUniqueAlt(reach) - - if ParserATNSimulatorDebug { - altSubSets := PredictionModegetConflictingAltSubsets(reach) - fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + - ", previous=" + previousD.configs.String() + - ", configs=" + reach.String() + - ", predict=" + strconv.Itoa(predictedAlt) + - ", allSubsetsConflict=" + - fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + - ", conflictingAlts=" + p.getConflictingAlts(reach).String()) - } - if predictedAlt != ATNInvalidAltNumber { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true - D.configs.SetUniqueAlt(predictedAlt) - D.setPrediction(predictedAlt) - } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) - D.requiresFullContext = true - // in SLL-only mode, we will stop at p state and return the minimum alt - D.isAcceptState = true - D.setPrediction(D.configs.GetConflictingAlts().minValue()) - } - if D.isAcceptState && D.configs.HasSemanticContext() { - p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) - if D.predicates != nil { - D.setPrediction(ATNInvalidAltNumber) - } - } - // all adds to dfa are done after we've created full D state - D = p.addDFAEdge(dfa, previousD, t, D) - return D -} - -func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - nalts := len(decisionState.GetTransitions()) - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) - altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) - if altToPred != nil { - dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) - dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.setPrediction(altsToCollectPredsFrom.minValue()) - } -} - -// comes back with reach.uniqueAlt set to a valid alt -func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("execATNWithFullContext " + s0.String()) - } - - fullCtx := true - foundExactAmbig := false - var reach ATNConfigSet - previous := s0 - input.Seek(startIndex) - t := input.LA(1) - predictedAlt := -1 - - for { // for more work - reach = p.computeReachSet(previous, t, fullCtx) - if reach == nil { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previous, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - altSubSets := PredictionModegetConflictingAltSubsets(reach) - if ParserATNSimulatorDebug { - fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + - strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + - fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) - } - reach.SetUniqueAlt(p.getUniqueAlt(reach)) - // unique prediction? - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - predictedAlt = reach.GetUniqueAlt() - break - } else if p.predictionMode != PredictionModeLLExactAmbigDetection { - predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) - if predictedAlt != ATNInvalidAltNumber { - break - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { - foundExactAmbig = true - predictedAlt = PredictionModegetSingleViableAlt(altSubSets) - break - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) - return predictedAlt - } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. - - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In p case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve p without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. - - p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, nil, reach) - - return predictedAlt -} - -func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { - if ParserATNSimulatorDebug { - fmt.Println("in computeReachSet, starting closure: " + closure.String()) - } - if p.mergeCache == nil { - p.mergeCache = NewDoubleDict() - } - intermediate := NewBaseATNConfigSet(fullCtx) - - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative Matching the longest overall sequence is - // chosen when multiple such configurations can Match the input. - - var SkippedStopStates []*BaseATNConfig - - // First figure out where we can reach on input t - for _, c := range closure.GetItems() { - if ParserATNSimulatorDebug { - fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) - } - - _, ok := c.GetState().(*RuleStopState) - - if ok { - if fullCtx || t == TokenEOF { - if SkippedStopStates == nil { - SkippedStopStates = make([]*BaseATNConfig, 0) - } - SkippedStopStates = append(SkippedStopStates, c.(*BaseATNConfig)) - if ParserATNSimulatorDebug { - fmt.Println("added " + c.String() + " to SkippedStopStates") - } - } - continue - } - - for j := 0; j < len(c.GetState().GetTransitions()); j++ { - trans := c.GetState().GetTransitions()[j] - target := p.getReachableTarget(trans, t) - if target != nil { - cfg := NewBaseATNConfig4(c, target) - intermediate.Add(cfg, p.mergeCache) - if ParserATNSimulatorDebug { - fmt.Println("added " + cfg.String() + " to intermediate") - } - } - } - } - // Now figure out where the reach operation can take us... - var reach ATNConfigSet - - // This block optimizes the reach operation for intermediate sets which - // trivially indicate a termination state for the overall - // AdaptivePredict operation. - // - // The conditions assume that intermediate - // contains all configurations relevant to the reach set, but p - // condition is not true when one or more configurations have been - // withheld in SkippedStopStates, or when the current symbol is EOF. - // - if SkippedStopStates == nil && t != TokenEOF { - if len(intermediate.configs) == 1 { - // Don't pursue the closure if there is just one state. - // It can only have one alternative just add to result - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } - } - // If the reach set could not be trivially determined, perform a closure - // operation on the intermediate set to compute its initial value. - // - if reach == nil { - reach = NewBaseATNConfigSet(fullCtx) - closureBusy := NewSet(nil, nil) - treatEOFAsEpsilon := t == TokenEOF - for k := 0; k < len(intermediate.configs); k++ { - p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) - } - } - if t == TokenEOF { - // After consuming EOF no additional input is possible, so we are - // only interested in configurations which reached the end of the - // decision rule (local context) or end of the start rule (full - // context). Update reach to contain only these configurations. This - // handles both explicit EOF transitions in the grammar and implicit - // EOF transitions following the end of the decision or start rule. - // - // When reach==intermediate, no closure operation was performed. In - // p case, removeAllConfigsNotInRuleStopState needs to check for - // reachable rule stop states as well as configurations already in - // a rule stop state. - // - // This is handled before the configurations in SkippedStopStates, - // because any configurations potentially added from that list are - // already guaranteed to meet p condition whether or not it's - // required. - // - reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) - } - // If SkippedStopStates!=nil, then it contains at least one - // configuration. For full-context reach operations, these - // configurations reached the end of the start rule, in which case we - // only add them back to reach if no configuration during the current - // closure operation reached such a state. This ensures AdaptivePredict - // chooses an alternative Matching the longest overall sequence when - // multiple alternatives are viable. - // - if SkippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { - for l := 0; l < len(SkippedStopStates); l++ { - reach.Add(SkippedStopStates[l], p.mergeCache) - } - } - if len(reach.GetItems()) == 0 { - return nil - } - - return reach -} - -// -// Return a configuration set containing only the configurations from -// {@code configs} which are in a {@link RuleStopState}. If all -// configurations in {@code configs} are already in a rule stop state, p -// method simply returns {@code configs}. -// -//
When {@code lookToEndOfRule} is true, p method uses -// {@link ATN//NextTokens} for each configuration in {@code configs} which is -// not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.
-// -// @param configs the configuration set to update -// @param lookToEndOfRule when true, p method checks for rule stop states -// reachable by epsilon-only transitions from each configuration in -// {@code configs}. -// -// @return {@code configs} if all configurations in {@code configs} are in a -// rule stop state, otherwise return a Newconfiguration set containing only -// the configurations from {@code configs} which are in a rule stop state -// -func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { - if PredictionModeallConfigsInRuleStopStates(configs) { - return configs - } - result := NewBaseATNConfigSet(configs.FullContext()) - for _, config := range configs.GetItems() { - - _, ok := config.GetState().(*RuleStopState) - - if ok { - result.Add(config, p.mergeCache) - continue - } - if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { - NextTokens := p.atn.NextTokens(config.GetState(), nil) - if NextTokens.contains(TokenEpsilon) { - endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) - } - } - } - return result -} - -func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { - // always at least the implicit call to start rule - initialContext := predictionContextFromRuleContext(p.atn, ctx) - configs := NewBaseATNConfigSet(fullCtx) - for i := 0; i < len(a.GetTransitions()); i++ { - target := a.GetTransitions()[i].getTarget() - c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := NewSet(nil, nil) - p.closure(c, configs, closureBusy, true, fullCtx, false) - } - return configs -} - -// -// This method transforms the start state computed by -// {@link //computeStartState} to the special start state used by a -// precedence DFA for a particular precedence value. The transformation -// process applies the following changes to the start state's configuration -// set. -// -//-// The prediction context must be considered by p filter to address -// situations like the following. -//
-//
-//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-//
-//
-// -// If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge -// of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these -// configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} -// from being eliminated by the filter. -//
-// -// @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. -// @return The transformed configuration set representing the start state -// for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). -// -func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { - - statesFromAlt1 := make(map[int]PredictionContext) - configSet := NewBaseATNConfigSet(configs.FullContext()) - - for _, config := range configs.GetItems() { - // handle alt 1 first - if config.GetAlt() != 1 { - continue - } - updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) - if updatedContext == nil { - // the configuration was eliminated - continue - } - statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() - if updatedContext != config.GetSemanticContext() { - configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) - } else { - configSet.Add(config, p.mergeCache) - } - } - for _, config := range configs.GetItems() { - - if config.GetAlt() == 1 { - // already handled - continue - } - // In the future, p elimination step could be updated to also - // filter the prediction context for alternatives predicting alt>1 - // (basically a graph subtraction algorithm). - if !config.getPrecedenceFilterSuppressed() { - context := statesFromAlt1[config.GetState().GetStateNumber()] - if context != nil && context.equals(config.GetContext()) { - // eliminated - continue - } - } - configSet.Add(config, p.mergeCache) - } - return configSet -} - -func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { - if trans.Matches(ttype, 0, p.atn.maxTokenType) { - return trans.getTarget() - } - - return nil -} - -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { - - altToPred := make([]SemanticContext, nalts+1) - for _, c := range configs.GetItems() { - if ambigAlts.contains(c.GetAlt()) { - altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) - } - } - nPredAlts := 0 - for i := 1; i < nalts+1; i++ { - pred := altToPred[i] - if pred == nil { - altToPred[i] = SemanticContextNone - } else if pred != SemanticContextNone { - nPredAlts++ - } - } - // nonambig alts are nil in altToPred - if nPredAlts == 0 { - altToPred = nil - } - if ParserATNSimulatorDebug { - fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) - } - return altToPred -} - -func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { - pairs := make([]*PredPrediction, 0) - containsPredicate := false - for i := 1; i < len(altToPred); i++ { - pred := altToPred[i] - // unpredicated is indicated by SemanticContextNONE - if ambigAlts != nil && ambigAlts.contains(i) { - pairs = append(pairs, NewPredPrediction(pred, i)) - } - if pred != SemanticContextNone { - containsPredicate = true - } - } - if !containsPredicate { - return nil - } - return pairs -} - -// -// This method is used to improve the localization of error messages by -// choosing an alternative rather than panicing a -// {@link NoViableAltException} in particular prediction scenarios where the -// {@link //ERROR} state was reached during ATN simulation. -// -//-// The default implementation of p method uses the following -// algorithm to identify an ATN configuration which successfully parsed the -// decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete -// and valid, and the syntax error will be Reported later at a more -// localized location.
-// -//-// In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in -// the parser. Specifically, p could occur if the only configuration -// capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing p alternative within -// {@link //AdaptivePredict} instead of panicing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific -// predicate which is preventing the parser from successfully parsing the -// decision rule, which helps developers identify and correct logic errors -// in semantic predicates. -//
-// -// @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached -// @param outerContext The is the \gamma_0 initial parser context from the paper -// or the parser stack at the instant before prediction commences. -// -// @return The value to return from {@link //AdaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //AdaptivePredict} should Report an error instead. -// -func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { - cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) - semValidConfigs := cfgs[0] - semInvalidConfigs := cfgs[1] - alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) - if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists - return alt - } - // Is there a syntactically valid path with a failed pred? - if len(semInvalidConfigs.GetItems()) > 0 { - alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) - if alt != ATNInvalidAltNumber { // syntactically viable path exists - return alt - } - } - return ATNInvalidAltNumber -} - -func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { - alts := NewIntervalSet() - - for _, c := range configs.GetItems() { - _, ok := c.GetState().(*RuleStopState) - - if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { - alts.addOne(c.GetAlt()) - } - } - if alts.length() == 0 { - return ATNInvalidAltNumber - } - - return alts.first() -} - -// Walk the list of configurations and split them according to -// those that have preds evaluating to true/false. If no pred, assume -// true pred and include in succeeded set. Returns Pair of sets. -// -// Create a NewSet so as not to alter the incoming parameter. -// -// Assumption: the input stream has been restored to the starting point -// prediction, which is where predicates need to evaluate. - -type ATNConfigSetPair struct { - item0, item1 ATNConfigSet -} - -func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { - succeeded := NewBaseATNConfigSet(configs.FullContext()) - failed := NewBaseATNConfigSet(configs.FullContext()) - - for _, c := range configs.GetItems() { - if c.GetSemanticContext() != SemanticContextNone { - predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) - if predicateEvaluationResult { - succeeded.Add(c, nil) - } else { - failed.Add(c, nil) - } - } else { - succeeded.Add(c, nil) - } - } - return []ATNConfigSet{succeeded, failed} -} - -// Look through a list of predicate/alt pairs, returning alts for the -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. -// -func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { - predictions := NewBitSet() - for i := 0; i < len(predPredictions); i++ { - pair := predPredictions[i] - if pair.pred == SemanticContextNone { - predictions.add(pair.alt) - if !complete { - break - } - continue - } - - predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) - } - if predicateEvaluationResult { - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) - } - predictions.add(pair.alt) - if !complete { - break - } - } - } - return predictions -} - -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { - initialDepth := 0 - p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, - fullCtx, initialDepth, treatEOFAsEpsilon) -} - -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - - if ParserATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") - fmt.Println("configs(" + configs.String() + ")") - if config.GetReachesIntoOuterContext() > 50 { - panic("problem") - } - } - - _, ok := config.GetState().(*RuleStopState) - if ok { - // We hit rule end. If we have context info, use it - // run thru all possible stack tops in ctx - if !config.GetContext().isEmpty() { - for i := 0; i < config.GetContext().length(); i++ { - if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { - if fullCtx { - configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) - continue - } else { - // we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) - } - continue - } - returnState := p.atn.states[config.GetContext().getReturnState(i)] - newContext := config.GetContext().GetParent(i) // "pop" return state - - c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) - // While we have context to pop back from, we may have - // gotten that context AFTER having falling off a rule. - // Make sure we track that we are now out of context. - c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) - p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) - } - return - } else if fullCtx { - // reached end of start rule - configs.Add(config, p.mergeCache) - return - } else { - // else if we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - } - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) -} - -// Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - state := config.GetState() - // optimization - if !state.GetEpsilonOnlyTransitions() { - configs.Add(config, p.mergeCache) - // make sure to not return here, because EOF transitions can act as - // both epsilon transitions and non-epsilon transitions. - } - for i := 0; i < len(state.GetTransitions()); i++ { - t := state.GetTransitions()[i] - _, ok := t.(*ActionTransition) - continueCollecting := collectPredicates && !ok - c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) - if ci, ok := c.(*BaseATNConfig); ok && ci != nil { - if !t.getIsEpsilon() && closureBusy.add(c) != c { - // avoid infinite recursion for EOF* and EOF+ - continue - } - newDepth := depth - - if _, ok := config.GetState().(*RuleStopState); ok { - - // target fell off end of rule mark resulting c as having dipped into outer context - // We can't get here if incoming config was rule stop and we had context - // track how far we dip into outer context. Might - // come in handy and we avoid evaluating context dependent - // preds if p is > 0. - - if closureBusy.add(c) != c { - // avoid infinite recursion for right-recursive rules - continue - } - - if p.dfa != nil && p.dfa.precedenceDfa { - if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { - c.setPrecedenceFilterSuppressed(true) - } - } - - c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) - configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method - newDepth-- - if ParserATNSimulatorDebug { - fmt.Println("dips into outer ctx: " + c.String()) - } - } else if _, ok := t.(*RuleTransition); ok { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if newDepth >= 0 { - newDepth++ - } - } - p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) - } - } -} - -func (p *ParserATNSimulator) getRuleName(index int) string { - if p.parser != nil && index >= 0 { - return p.parser.GetRuleNames()[index] - } - - return "If {@code to} is {@code nil}, p method returns {@code nil}. -// Otherwise, p method returns the {@link DFAState} returned by calling -// {@link //addDFAState} for the {@code to} state.
-// -// @param dfa The DFA -// @param from The source state for the edge -// @param t The input symbol -// @param to The target state for the edge -// -// @return If {@code to} is {@code nil}, p method returns {@code nil} -// otherwise p method returns the result of calling {@link //addDFAState} -// on {@code to} -// -func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { - if ParserATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) - } - if to == nil { - return nil - } - to = p.addDFAState(dfa, to) // used existing if possible not incoming - if from == nil || t < -1 || t > p.atn.maxTokenType { - return to - } - if from.edges == nil { - from.edges = make([]*DFAState, p.atn.maxTokenType+1+1) - } - from.edges[t+1] = to // connect - - if ParserATNSimulatorDebug { - var names []string - if p.parser != nil { - names = p.parser.GetLiteralNames() - } - - fmt.Println("DFA=\n" + dfa.String(names, nil)) - } - return to -} - -// -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} -// is already in the DFA, the existing state is returned. Otherwise p -// method returns {@code D} after adding it to the DFA. -// -//If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and -// does not change the DFA.
-// -// @param dfa The dfa -// @param D The DFA state to add -// @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the -// state was not already present. -// -func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { - if d == ATNSimulatorError { - return d - } - hash := d.hash() - existing, ok := dfa.getState(hash) - if ok { - return existing - } - d.stateNumber = dfa.numStates() - if !d.configs.ReadOnly() { - d.configs.OptimizeConfigs(p.BaseATNSimulator) - d.configs.SetReadOnly(true) - } - dfa.setState(hash, d) - if ParserATNSimulatorDebug { - fmt.Println("adding NewDFA state: " + d.String()) - } - return d -} - -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) - } -} - -// If context sensitive parsing, we know it's ambiguity not conflict// -func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go deleted file mode 100644 index 49cd10c5..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "reflect" - "strconv" -) - -type ParserRuleContext interface { - RuleContext - - SetException(RecognitionException) - - AddTokenNode(token Token) *TerminalNodeImpl - AddErrorNode(badToken Token) *ErrorNodeImpl - - EnterRule(listener ParseTreeListener) - ExitRule(listener ParseTreeListener) - - SetStart(Token) - GetStart() Token - - SetStop(Token) - GetStop() Token - - AddChild(child RuleContext) RuleContext - RemoveLastChild() -} - -type BaseParserRuleContext struct { - *BaseRuleContext - - start, stop Token - exception RecognitionException - children []Tree -} - -func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { - prc := new(BaseParserRuleContext) - - prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) - - prc.RuleIndex = -1 - // * If we are debugging or building a parse tree for a Visitor, - // we need to track all of the tokens and rule invocations associated - // with prc rule's context. This is empty for parsing w/o tree constr. - // operation because we don't the need to track the details about - // how we parse prc rule. - // / - prc.children = nil - prc.start = nil - prc.stop = nil - // The exception that forced prc rule to return. If the rule successfully - // completed, prc is {@code nil}. - prc.exception = nil - - return prc -} - -func (prc *BaseParserRuleContext) SetException(e RecognitionException) { - prc.exception = e -} - -func (prc *BaseParserRuleContext) GetChildren() []Tree { - return prc.children -} - -func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { - // from RuleContext - prc.parentCtx = ctx.parentCtx - prc.invokingState = ctx.invokingState - prc.children = nil - prc.start = ctx.start - prc.stop = ctx.stop -} - -func (prc *BaseParserRuleContext) GetText() string { - if prc.GetChildCount() == 0 { - return "" - } - - var s string - for _, child := range prc.children { - s += child.(ParseTree).GetText() - } - - return s -} - -// Double dispatch methods for listeners -func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { -} - -func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { -} - -// * Does not set parent link other add methods do that/// -func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { - if prc.children == nil { - prc.children = make([]Tree, 0) - } - if child == nil { - panic("Child may not be null") - } - prc.children = append(prc.children, child) - return child -} - -func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { - if prc.children == nil { - prc.children = make([]Tree, 0) - } - if child == nil { - panic("Child may not be null") - } - prc.children = append(prc.children, child) - return child -} - -// * Used by EnterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove -// generic ruleContext object. -// / -func (prc *BaseParserRuleContext) RemoveLastChild() { - if prc.children != nil && len(prc.children) > 0 { - prc.children = prc.children[0 : len(prc.children)-1] - } -} - -func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { - - node := NewTerminalNodeImpl(token) - prc.addTerminalNodeChild(node) - node.parentCtx = prc - return node - -} - -func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { - node := NewErrorNodeImpl(badToken) - prc.addTerminalNodeChild(node) - node.parentCtx = prc - return node -} - -func (prc *BaseParserRuleContext) GetChild(i int) Tree { - if prc.children != nil && len(prc.children) >= i { - return prc.children[i] - } - - return nil -} - -func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { - if childType == nil { - return prc.GetChild(i).(RuleContext) - } - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if reflect.TypeOf(child) == childType { - if i == 0 { - return child.(RuleContext) - } - - i-- - } - } - - return nil -} - -func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { - return TreesStringTree(prc, ruleNames, recog) -} - -func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { - return prc -} - -func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { - return visitor.VisitChildren(prc) -} - -func (prc *BaseParserRuleContext) SetStart(t Token) { - prc.start = t -} - -func (prc *BaseParserRuleContext) GetStart() Token { - return prc.start -} - -func (prc *BaseParserRuleContext) SetStop(t Token) { - prc.stop = t -} - -func (prc *BaseParserRuleContext) GetStop() Token { - return prc.stop -} - -func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if c2, ok := child.(TerminalNode); ok { - if c2.GetSymbol().GetTokenType() == ttype { - if i == 0 { - return c2 - } - - i-- - } - } - } - return nil -} - -func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { - if prc.children == nil { - return make([]TerminalNode, 0) - } - - tokens := make([]TerminalNode, 0) - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if tchild, ok := child.(TerminalNode); ok { - if tchild.GetSymbol().GetTokenType() == ttype { - tokens = append(tokens, tchild) - } - } - } - - return tokens -} - -func (prc *BaseParserRuleContext) GetPayload() interface{} { - return prc -} - -func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext { - if prc.children == nil || i < 0 || i >= len(prc.children) { - return nil - } - - j := -1 // what element have we found with ctxType? - for _, o := range prc.children { - - childType := reflect.TypeOf(o) - - if childType.Implements(ctxType) { - j++ - if j == i { - return o.(RuleContext) - } - } - } - return nil -} - -// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do -// check for convertibility - -func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { - return prc.getChild(ctxType, i) -} - -func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { - if prc.children == nil { - return make([]RuleContext, 0) - } - - contexts := make([]RuleContext, 0) - - for _, child := range prc.children { - childType := reflect.TypeOf(child) - - if childType.ConvertibleTo(ctxType) { - contexts = append(contexts, child.(RuleContext)) - } - } - return contexts -} - -func (prc *BaseParserRuleContext) GetChildCount() int { - if prc.children == nil { - return 0 - } - - return len(prc.children) -} - -func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { - if prc.start == nil || prc.stop == nil { - return TreeInvalidInterval - } - - return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) -} - -//need to manage circular dependencies, so export now - -// Print out a whole tree, not just a node, in LISP format -// (root child1 .. childN). Print just a node if b is a leaf. -// - -func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { - - var p ParserRuleContext = prc - s := "[" - for p != nil && p != stop { - if ruleNames == nil { - if !p.IsEmpty() { - s += strconv.Itoa(p.GetInvokingState()) - } - } else { - ri := p.GetRuleIndex() - var ruleName string - if ri >= 0 && ri < len(ruleNames) { - ruleName = ruleNames[ri] - } else { - ruleName = strconv.Itoa(ri) - } - s += ruleName - } - if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) { - s += " " - } - pi := p.GetParent() - if pi != nil { - p = pi.(ParserRuleContext) - } else { - p = nil - } - } - s += "]" - return s -} - -var RuleContextEmpty = NewBaseParserRuleContext(nil, -1) - -type InterpreterRuleContext interface { - ParserRuleContext -} - -type BaseInterpreterRuleContext struct { - *BaseParserRuleContext -} - -func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { - - prc := new(BaseInterpreterRuleContext) - - prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) - - prc.RuleIndex = ruleIndex - - return prc -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go deleted file mode 100644 index 99acb333..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go +++ /dev/null @@ -1,756 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -const ( - BasePredictionContextEmptyReturnState = 0x7FFFFFFF -) - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EmptyReturnState}. -// / - -var ( - BasePredictionContextglobalNodeCount = 1 - BasePredictionContextid = BasePredictionContextglobalNodeCount -) - -type PredictionContext interface { - hash() int - GetParent(int) PredictionContext - getReturnState(int) int - equals(PredictionContext) bool - length() int - isEmpty() bool - hasEmptyPath() bool - String() string -} - -type BasePredictionContext struct { - cachedHash int -} - -func NewBasePredictionContext(cachedHash int) *BasePredictionContext { - pc := new(BasePredictionContext) - pc.cachedHash = cachedHash - - return pc -} - -func (b *BasePredictionContext) isEmpty() bool { - return false -} - -func calculateHash(parent PredictionContext, returnState int) int { - h := murmurInit(1) - h = murmurUpdate(h, parent.hash()) - h = murmurUpdate(h, returnState) - return murmurFinish(h, 2) -} - -func calculateEmptyHash() int { - h := murmurInit(1) - return murmurFinish(h, 0) -} - -// Used to cache {@link BasePredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - -type PredictionContextCache struct { - cache map[PredictionContext]PredictionContext -} - -func NewPredictionContextCache() *PredictionContextCache { - t := new(PredictionContextCache) - t.cache = make(map[PredictionContext]PredictionContext) - return t -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a Newcontext to the cache. -// Protect shared cache from unsafe thread access. -// -func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { - if ctx == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY - } - existing := p.cache[ctx] - if existing != nil { - return existing - } - p.cache[ctx] = ctx - return ctx -} - -func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { - return p.cache[ctx] -} - -func (p *PredictionContextCache) length() int { - return len(p.cache) -} - -type SingletonPredictionContext interface { - PredictionContext -} - -type BaseSingletonPredictionContext struct { - *BasePredictionContext - - parentCtx PredictionContext - returnState int -} - -func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { - - s := new(BaseSingletonPredictionContext) - s.BasePredictionContext = NewBasePredictionContext(37) - - if parent != nil { - s.cachedHash = calculateHash(parent, returnState) - } else { - s.cachedHash = calculateEmptyHash() - } - - s.parentCtx = parent - s.returnState = returnState - - return s -} - -func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { - if returnState == BasePredictionContextEmptyReturnState && parent == nil { - // someone can pass in the bits of an array ctx that mean $ - return BasePredictionContextEMPTY - } - - return NewBaseSingletonPredictionContext(parent, returnState) -} - -func (b *BaseSingletonPredictionContext) length() int { - return 1 -} - -func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { - return b.parentCtx -} - -func (b *BaseSingletonPredictionContext) getReturnState(index int) int { - return b.returnState -} - -func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { - return b.returnState == BasePredictionContextEmptyReturnState -} - -func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool { - if b == other { - return true - } else if _, ok := other.(*BaseSingletonPredictionContext); !ok { - return false - } else if b.hash() != other.hash() { - return false // can't be same if hash is different - } - - otherP := other.(*BaseSingletonPredictionContext) - - if b.returnState != other.getReturnState(0) { - return false - } else if b.parentCtx == nil { - return otherP.parentCtx == nil - } - - return b.parentCtx.equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) hash() int { - h := murmurInit(1) - - if b.parentCtx == nil { - return murmurFinish(h, 0) - } - - h = murmurUpdate(h, b.parentCtx.hash()) - h = murmurUpdate(h, b.returnState) - return murmurFinish(h, 2) -} - -func (b *BaseSingletonPredictionContext) String() string { - var up string - - if b.parentCtx == nil { - up = "" - } else { - up = b.parentCtx.String() - } - - if len(up) == 0 { - if b.returnState == BasePredictionContextEmptyReturnState { - return "$" - } - - return strconv.Itoa(b.returnState) - } - - return strconv.Itoa(b.returnState) + " " + up -} - -var BasePredictionContextEMPTY = NewEmptyPredictionContext() - -type EmptyPredictionContext struct { - *BaseSingletonPredictionContext -} - -func NewEmptyPredictionContext() *EmptyPredictionContext { - - p := new(EmptyPredictionContext) - - p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - - return p -} - -func (e *EmptyPredictionContext) isEmpty() bool { - return true -} - -func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { - return nil -} - -func (e *EmptyPredictionContext) getReturnState(index int) int { - return e.returnState -} - -func (e *EmptyPredictionContext) equals(other PredictionContext) bool { - return e == other -} - -func (e *EmptyPredictionContext) String() string { - return "$" -} - -type ArrayPredictionContext struct { - *BasePredictionContext - - parents []PredictionContext - returnStates []int -} - -func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { - // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // nil parent and - // returnState == {@link //EmptyReturnState}. - - c := new(ArrayPredictionContext) - c.BasePredictionContext = NewBasePredictionContext(37) - - for i := range parents { - c.cachedHash += calculateHash(parents[i], returnStates[i]) - } - - c.parents = parents - c.returnStates = returnStates - - return c -} - -func (a *ArrayPredictionContext) GetReturnStates() []int { - return a.returnStates -} - -func (a *ArrayPredictionContext) hasEmptyPath() bool { - return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) isEmpty() bool { - // since EmptyReturnState can only appear in the last position, we - // don't need to verify that size==1 - return a.returnStates[0] == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) length() int { - return len(a.returnStates) -} - -func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { - return a.parents[index] -} - -func (a *ArrayPredictionContext) getReturnState(index int) int { - return a.returnStates[index] -} - -func (a *ArrayPredictionContext) equals(other PredictionContext) bool { - if _, ok := other.(*ArrayPredictionContext); !ok { - return false - } else if a.cachedHash != other.hash() { - return false // can't be same if hash is different - } else { - otherP := other.(*ArrayPredictionContext) - return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents - } -} - -func (a *ArrayPredictionContext) hash() int { - h := murmurInit(1) - - for _, p := range a.parents { - h = murmurUpdate(h, p.hash()) - } - - for _, r := range a.returnStates { - h = murmurUpdate(h, r) - } - - return murmurFinish(h, 2 * len(a.parents)) -} - -func (a *ArrayPredictionContext) String() string { - if a.isEmpty() { - return "[]" - } - - s := "[" - for i := 0; i < len(a.returnStates); i++ { - if i > 0 { - s = s + ", " - } - if a.returnStates[i] == BasePredictionContextEmptyReturnState { - s = s + "$" - continue - } - s = s + strconv.Itoa(a.returnStates[i]) - if a.parents[i] != nil { - s = s + " " + a.parents[i].String() - } else { - s = s + "nil" - } - } - - return s + "]" -} - -// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or nil. -// / -func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { - if outerContext == nil { - outerContext = RuleContextEmpty - } - // if we are in RuleContext of start rule, s, then BasePredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == RuleContextEmpty { - return BasePredictionContextEMPTY - } - // If we have a parent, convert it to a BasePredictionContext graph - parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) - state := a.states[outerContext.GetInvokingState()] - transition := state.GetTransitions()[0] - - return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) -} - -func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - // share same graph if both same - if a == b { - return a - } - - ac, ok1 := a.(*BaseSingletonPredictionContext) - bc, ok2 := b.(*BaseSingletonPredictionContext) - - if ok1 && ok2 { - return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if rootIsWildcard { - if _, ok := a.(*EmptyPredictionContext); ok { - return a - } - if _, ok := b.(*EmptyPredictionContext); ok { - return b - } - } - // convert singleton so both are arrays to normalize - if _, ok := a.(*BaseSingletonPredictionContext); ok { - a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) - } - if _, ok := b.(*BaseSingletonPredictionContext); ok { - b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) - } - return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache) -} - -// -// Merge two {@link SingletonBasePredictionContext} instances. -// -//Stack tops equal, parents merge is same return left graph.
-//
Same stack top, parents differ merge parents giving array node, then
-// remainders of those graphs. A Newroot node is created to point to the
-// merged parents.
-//
Different stack tops pointing to same parent. Make array node for the
-// root where both element in the root point to the same (original)
-// parent.
-//
Different stack tops pointing to different parents. Make array node for
-// the root where each element points to the corresponding original
-// parent.
-//
These local-context merge operations are used when {@code rootIsWildcard} -// is true.
-// -//{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
-// {@code //EMPTY} return left graph.
-//
Special case of last merge if local context.
-//
These full-context merge operations are used when {@code rootIsWildcard} -// is false.
-// -// -// -//Must keep all contexts {@link //EMPTY} in array is a special value (and
-// nil parent).
-//
Different tops, different parents.
-//
Shared top, same parents.
-//
Shared top, different parents.
-//
Shared top, all shared parents.
-//
Equal tops, merge parents and reduce top to
-// {@link SingletonBasePredictionContext}.
-//
- // When using this prediction mode, the parser will either return a correct - // parse tree (i.e. the same parse tree that would be returned with the - // {@link //LL} prediction mode), or it will Report a syntax error. If a - // syntax error is encountered when using the {@link //SLL} prediction mode, - // it may be due to either an actual syntax error in the input or indicate - // that the particular combination of grammar and input requires the more - // powerful {@link //LL} prediction abilities to complete successfully.
- // - //- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.
- // - PredictionModeSLL = 0 - // - // The LL(*) prediction mode. This prediction mode allows the current parser - // context to be used for resolving SLL conflicts that occur during - // prediction. This is the fastest prediction mode that guarantees correct - // parse results for all combinations of grammars with syntactically correct - // inputs. - // - //- // When using this prediction mode, the parser will make correct decisions - // for all syntactically-correct grammar and input combinations. However, in - // cases where the grammar is truly ambiguous this prediction mode might not - // Report a precise answer for exactly which alternatives are - // ambiguous.
- // - //- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.
- // - PredictionModeLL = 1 - // - // The LL(*) prediction mode with exact ambiguity detection. In addition to - // the correctness guarantees provided by the {@link //LL} prediction mode, - // this prediction mode instructs the prediction algorithm to determine the - // complete and exact set of ambiguous alternatives for every ambiguous - // decision encountered while parsing. - // - //- // This prediction mode may be used for diagnosing ambiguities during - // grammar development. Due to the performance overhead of calculating sets - // of ambiguous alternatives, this prediction mode should be avoided when - // the exact results are not necessary.
- // - //- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.
- // - PredictionModeLLExactAmbigDetection = 2 -) - -// -// Computes the SLL prediction termination condition. -// -//-// This method computes the SLL prediction termination condition for both of -// the following cases.
-// -//COMBINED SLL+LL PARSING
-// -//When LL-fallback is enabled upon SLL conflict, correct predictions are -// ensured regardless of how the termination condition is computed by this -// method. Due to the substantially higher cost of LL prediction, the -// prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.
-// -//Assuming combined SLL+LL parsing, an SLL configuration set with only -// conflicting subsets should fall back to full LL, even if the -// configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting -// configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.
-// -//Here's the prediction termination rule them: SLL (for SLL+LL parsing) -// stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.
-// -//HEURISTIC
-// -//As a heuristic, we stop prediction when we see any conflicting subset -// unless we see a state that only has one alternative associated with it. -// The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):
-// -//{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }
-// -//When the ATN simulation reaches the state before {@code ''}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop -// processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.
-// -//It also let's us continue for this rule:
-// -//{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
-// -//After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not stop -// working on this state. In the previous example, we're concerned with -// states associated with the conflicting alternatives. Here alt 3 is not -// associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.
-// -//PURE SLL PARSING
-// -//To handle pure SLL parsing, all we have to do is make sure that we -// combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.
-// -//PREDICATES IN SLL+LL PARSING
-// -//SLL decisions don't evaluate predicates until after they reach DFA stop -// states because they need to create the DFA cache that works in all -// semantic situations. In contrast, full LL evaluates predicates collected -// during start state computation so it can ignore predicates thereafter. -// This means that SLL termination detection can totally ignore semantic -// predicates.
-// -//Implementation-wise, {@link ATNConfigSet} combines stack contexts but not -// semantic predicate contexts so we might see two configurations like the -// following.
-// -//{@code (s, 1, x, {}), (s, 1, x', {p})}
-// -//Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x''} when looking for conflicts in -// the following configurations.
-// -//{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
-// -//If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of -// the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.
-// -func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to Match additional input so we terminate prediction. - // - if PredictionModeallConfigsInRuleStopStates(configs) { - return true - } - // pure SLL mode parsing - if mode == PredictionModeSLL { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL costs more time - // since we'll often fail over anyway. - if configs.HasSemanticContext() { - // dup configs, tossing out semantic predicates - dup := NewBaseATNConfigSet(false) - for _, c := range configs.GetItems() { - - // NewBaseATNConfig({semanticContext:}, c) - c = NewBaseATNConfig2(c, SemanticContextNone) - dup.Add(c, nil) - } - configs = dup - } - // now we have combined contexts for configs with dissimilar preds - } - // pure SLL or combined SLL+LL mode parsing - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) -} - -// Checks if any configuration in {@code configs} is in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if any configuration in {@code configs} is in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); ok { - return true - } - } - return false -} - -// Checks if all configurations in {@code configs} are in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if all configurations in {@code configs} are in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { - - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); !ok { - return false - } - } - return true -} - -// -// Full LL prediction termination. -// -//Can we stop looking ahead during ATN simulation or is there some -// uncertainty as to which alternative we will ultimately pick, after -// consuming more input? Even if there are partial conflicts, we might know -// that everything is going to resolve to the same minimum alternative. That -// means we can stop since no more lookahead will change that fact. On the -// other hand, there might be multiple conflicts that resolve to different -// minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.
-// -//The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.
-// -//Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:
-// -//{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.
-// -//Or in pseudo-code, for each configuration {@code c} in {@code C}:
-// -//-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not -// alt and not pred -//-// -//
The values in {@code map} are the set of {@code A_s,ctx} sets.
-// -//If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.
-// -//Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.
-// -//The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal you will still have the conflict. It's just inefficient. It -// might even look until the end of file.
-// -//No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.
-// -//CONFLICTING CONFIGS
-// -//Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.
-// -//For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.
-// -//CONTINUE/STOP RULE
-// -//Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.
-// -//The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.
-// -//CASES
-// -//EXACT AMBIGUITY DETECTION
-// -//If all states Report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.
-// -//|A_i|>1
and
-// A_i = A_j
for all i, j.
In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
-// -func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { - return PredictionModegetSingleViableAlt(altsets) -} - -// -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// -func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { - return !PredictionModehasNonConflictingAltSet(altsets) -} - -// -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -// -func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() == 1 { - return true - } - } - return false -} - -// -// Determines if any single alternative subset in {@code altsets} contains -// more than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// -func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() > 1 { - return true - } - } - return false -} - -// -// Determines if every alternative subset in {@code altsets} is equivalent. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} -// -func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if first == nil { - first = alts - } else if alts != first { - return false - } - } - - return true -} - -// -// Returns the unique alternative predicted by all alternative subsets in -// {@code altsets}. If no such alternative exists, this method returns -// {@link ATN//INVALID_ALT_NUMBER}. -// -// @param altsets a collection of alternative subsets -// -func PredictionModegetUniqueAlt(altsets []*BitSet) int { - all := PredictionModeGetAlts(altsets) - if all.length() == 1 { - return all.minValue() - } - - return ATNInvalidAltNumber -} - -// Gets the complete set of represented alternatives for a collection of -// alternative subsets. This method returns the union of each {@link BitSet} -// in {@code altsets}. -// -// @param altsets a collection of alternative subsets -// @return the set of represented alternatives in {@code altsets} -// -func PredictionModeGetAlts(altsets []*BitSet) *BitSet { - all := NewBitSet() - for _, alts := range altsets { - all.or(alts) - } - return all -} - -// -// This func gets the conflicting alt subsets from a configuration set. -// For each configuration {@code c} in {@code configs}: -// -//-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not -// alt and not pred -//-// -func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := make(map[int]*BitSet) - - for _, c := range configs.GetItems() { - key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash() - - alts, ok := configToAlts[key] - if !ok { - alts = NewBitSet() - configToAlts[key] = alts - } - alts.add(c.GetAlt()) - } - - values := make([]*BitSet, 0, 10) - for _, v := range configToAlts { - values = append(values, v) - } - return values -} - -// -// Get a map from state to alt subset from a configuration set. For each -// configuration {@code c} in {@code configs}: -// -//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt} -//-// -func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { - m := NewAltDict() - - for _, c := range configs.GetItems() { - alts := m.Get(c.GetState().String()) - if alts == nil { - alts = NewBitSet() - m.put(c.GetState().String(), alts) - } - alts.(*BitSet).add(c.GetAlt()) - } - return m -} - -func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { - values := PredictionModeGetStateToAltMap(configs).values() - for i := 0; i < len(values); i++ { - if values[i].(*BitSet).length() == 1 { - return true - } - } - return false -} - -func PredictionModegetSingleViableAlt(altsets []*BitSet) int { - result := ATNInvalidAltNumber - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - minAlt := alts.minValue() - if result == ATNInvalidAltNumber { - result = minAlt - } else if result != minAlt { // more than 1 viable alt - return ATNInvalidAltNumber - } - } - return result -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go deleted file mode 100644 index d114800f..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strings" - - "strconv" -) - -type Recognizer interface { - GetLiteralNames() []string - GetSymbolicNames() []string - GetRuleNames() []string - - Sempred(RuleContext, int, int) bool - Precpred(RuleContext, int) bool - - GetState() int - SetState(int) - Action(RuleContext, int, int) - AddErrorListener(ErrorListener) - RemoveErrorListeners() - GetATN() *ATN - GetErrorListenerDispatch() ErrorListener -} - -type BaseRecognizer struct { - listeners []ErrorListener - state int - - RuleNames []string - LiteralNames []string - SymbolicNames []string - GrammarFileName string -} - -func NewBaseRecognizer() *BaseRecognizer { - rec := new(BaseRecognizer) - rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} - rec.state = -1 - return rec -} - -var tokenTypeMapCache = make(map[string]int) -var ruleIndexMapCache = make(map[string]int) - -func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.8" - if runtimeVersion != toolVersion { - fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) - } -} - -func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { - panic("action not implemented on Recognizer!") -} - -func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { - b.listeners = append(b.listeners, listener) -} - -func (b *BaseRecognizer) RemoveErrorListeners() { - b.listeners = make([]ErrorListener, 0) -} - -func (b *BaseRecognizer) GetRuleNames() []string { - return b.RuleNames -} - -func (b *BaseRecognizer) GetTokenNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetSymbolicNames() []string { - return b.SymbolicNames -} - -func (b *BaseRecognizer) GetLiteralNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetState() int { - return b.state -} - -func (b *BaseRecognizer) SetState(v int) { - b.state = v -} - -//func (b *Recognizer) GetTokenTypeMap() { -// var tokenNames = b.GetTokenNames() -// if (tokenNames==nil) { -// panic("The current recognizer does not provide a list of token names.") -// } -// var result = tokenTypeMapCache[tokenNames] -// if(result==nil) { -// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) -// result.EOF = TokenEOF -// tokenTypeMapCache[tokenNames] = result -// } -// return result -//} - -// Get a map from rule names to rule indexes. -// -//
Used for XPath and tree pattern compilation.
-// -func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { - - panic("Method not defined!") - // var ruleNames = b.GetRuleNames() - // if (ruleNames==nil) { - // panic("The current recognizer does not provide a list of rule names.") - // } - // - // var result = ruleIndexMapCache[ruleNames] - // if(result==nil) { - // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) - // ruleIndexMapCache[ruleNames] = result - // } - // return result -} - -func (b *BaseRecognizer) GetTokenType(tokenName string) int { - panic("Method not defined!") - // var ttype = b.GetTokenTypeMap()[tokenName] - // if (ttype !=nil) { - // return ttype - // } else { - // return TokenInvalidType - // } -} - -//func (b *Recognizer) GetTokenTypeMap() map[string]int { -// Vocabulary vocabulary = getVocabulary() -// -// Synchronized (tokenTypeMapCache) { -// Map-// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of b -// method. -// - -func (b *BaseRuleContext) GetParent() Tree { - return b.parentCtx -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go deleted file mode 100644 index 49205a16..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// A tree structure used to record the semantic context in which -// an ATN configuration is valid. It's either a single predicate, -// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. -// -//
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.
-// - -type SemanticContext interface { - comparable - - evaluate(parser Recognizer, outerContext RuleContext) bool - evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext - - hash() int - String() string -} - -func SemanticContextandContext(a, b SemanticContext) SemanticContext { - if a == nil || a == SemanticContextNone { - return b - } - if b == nil || b == SemanticContextNone { - return a - } - result := NewAND(a, b) - if len(result.opnds) == 1 { - return result.opnds[0] - } - - return result -} - -func SemanticContextorContext(a, b SemanticContext) SemanticContext { - if a == nil { - return b - } - if b == nil { - return a - } - if a == SemanticContextNone || b == SemanticContextNone { - return SemanticContextNone - } - result := NewOR(a, b) - if len(result.opnds) == 1 { - return result.opnds[0] - } - - return result -} - -type Predicate struct { - ruleIndex int - predIndex int - isCtxDependent bool -} - -func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { - p := new(Predicate) - - p.ruleIndex = ruleIndex - p.predIndex = predIndex - p.isCtxDependent = isCtxDependent // e.g., $i ref in pred - return p -} - -//The default {@link SemanticContext}, which is semantically equivalent to -//a predicate of the form {@code {true}?}. - -var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false) - -func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - return p -} - -func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { - - var localctx RuleContext - - if p.isCtxDependent { - localctx = outerContext - } - - return parser.Sempred(localctx, p.ruleIndex, p.predIndex) -} - -func (p *Predicate) equals(other interface{}) bool { - if p == other { - return true - } else if _, ok := other.(*Predicate); !ok { - return false - } else { - return p.ruleIndex == other.(*Predicate).ruleIndex && - p.predIndex == other.(*Predicate).predIndex && - p.isCtxDependent == other.(*Predicate).isCtxDependent - } -} - -func (p *Predicate) hash() int { - return p.ruleIndex*43 + p.predIndex*47 -} - -func (p *Predicate) String() string { - return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" -} - -type PrecedencePredicate struct { - precedence int -} - -func NewPrecedencePredicate(precedence int) *PrecedencePredicate { - - p := new(PrecedencePredicate) - p.precedence = precedence - - return p -} - -func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { - return parser.Precpred(outerContext, p.precedence) -} - -func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - if parser.Precpred(outerContext, p.precedence) { - return SemanticContextNone - } - - return nil -} - -func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { - return p.precedence - other.precedence -} - -func (p *PrecedencePredicate) equals(other interface{}) bool { - if p == other { - return true - } else if _, ok := other.(*PrecedencePredicate); !ok { - return false - } else { - return p.precedence == other.(*PrecedencePredicate).precedence - } -} - -func (p *PrecedencePredicate) hash() int { - return p.precedence * 51 -} - -func (p *PrecedencePredicate) String() string { - return "{" + strconv.Itoa(p.precedence) + ">=prec}?" -} - -func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate { - result := make([]*PrecedencePredicate, 0) - - for _, v := range set.values() { - if c2, ok := v.(*PrecedencePredicate); ok { - result = append(result, c2) - } - } - - return result -} - -// A semantic context which is true whenever none of the contained contexts -// is false.` - -type AND struct { - opnds []SemanticContext -} - -func NewAND(a, b SemanticContext) *AND { - - operands := NewSet(nil, nil) - if aa, ok := a.(*AND); ok { - for _, o := range aa.opnds { - operands.add(o) - } - } else { - operands.add(a) - } - - if ba, ok := b.(*AND); ok { - for _, o := range ba.opnds { - operands.add(o) - } - } else { - operands.add(b) - } - precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) - if len(precedencePredicates) > 0 { - // interested in the transition with the lowest precedence - var reduced *PrecedencePredicate - - for _, p := range precedencePredicates { - if reduced == nil || p.precedence < reduced.precedence { - reduced = p - } - } - - operands.add(reduced) - } - - vs := operands.values() - opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } - - and := new(AND) - and.opnds = opnds - - return and -} - -func (a *AND) equals(other interface{}) bool { - if a == other { - return true - } else if _, ok := other.(*AND); !ok { - return false - } else { - for i, v := range other.(*AND).opnds { - if !a.opnds[i].equals(v) { - return false - } - } - return true - } -} - -// -// {@inheritDoc} -// -//-// The evaluation of predicates by a context is short-circuiting, but -// unordered.
-// -func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { - for i := 0; i < len(a.opnds); i++ { - if !a.opnds[i].evaluate(parser, outerContext) { - return false - } - } - return true -} - -func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - differs := false - operands := make([]SemanticContext, 0) - - for i := 0; i < len(a.opnds); i++ { - context := a.opnds[i] - evaluated := context.evalPrecedence(parser, outerContext) - differs = differs || (evaluated != context) - if evaluated == nil { - // The AND context is false if any element is false - return nil - } else if evaluated != SemanticContextNone { - // Reduce the result by Skipping true elements - operands = append(operands, evaluated) - } - } - if !differs { - return a - } - - if len(operands) == 0 { - // all elements were true, so the AND context is true - return SemanticContextNone - } - - var result SemanticContext - - for _, o := range operands { - if result == nil { - result = o - } else { - result = SemanticContextandContext(result, o) - } - } - - return result -} - -func (a *AND) hash() int { - h := murmurInit(37) // Init with a value different from OR - for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) - } - return murmurFinish(h, len(a.opnds)) -} - -func (a *OR) hash() int { - h := murmurInit(41) // Init with a value different from AND - for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) - } - return murmurFinish(h, len(a.opnds)) -} - -func (a *AND) String() string { - s := "" - - for _, o := range a.opnds { - s += "&& " + fmt.Sprint(o) - } - - if len(s) > 3 { - return s[0:3] - } - - return s -} - -// -// A semantic context which is true whenever at least one of the contained -// contexts is true. -// - -type OR struct { - opnds []SemanticContext -} - -func NewOR(a, b SemanticContext) *OR { - - operands := NewSet(nil, nil) - if aa, ok := a.(*OR); ok { - for _, o := range aa.opnds { - operands.add(o) - } - } else { - operands.add(a) - } - - if ba, ok := b.(*OR); ok { - for _, o := range ba.opnds { - operands.add(o) - } - } else { - operands.add(b) - } - precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) - if len(precedencePredicates) > 0 { - // interested in the transition with the lowest precedence - var reduced *PrecedencePredicate - - for _, p := range precedencePredicates { - if reduced == nil || p.precedence > reduced.precedence { - reduced = p - } - } - - operands.add(reduced) - } - - vs := operands.values() - - opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } - - o := new(OR) - o.opnds = opnds - - return o -} - -func (o *OR) equals(other interface{}) bool { - if o == other { - return true - } else if _, ok := other.(*OR); !ok { - return false - } else { - for i, v := range other.(*OR).opnds { - if !o.opnds[i].equals(v) { - return false - } - } - return true - } -} - -//-// The evaluation of predicates by o context is short-circuiting, but -// unordered.
-// -func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { - for i := 0; i < len(o.opnds); i++ { - if o.opnds[i].evaluate(parser, outerContext) { - return true - } - } - return false -} - -func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - differs := false - operands := make([]SemanticContext, 0) - for i := 0; i < len(o.opnds); i++ { - context := o.opnds[i] - evaluated := context.evalPrecedence(parser, outerContext) - differs = differs || (evaluated != context) - if evaluated == SemanticContextNone { - // The OR context is true if any element is true - return SemanticContextNone - } else if evaluated != nil { - // Reduce the result by Skipping false elements - operands = append(operands, evaluated) - } - } - if !differs { - return o - } - if len(operands) == 0 { - // all elements were false, so the OR context is false - return nil - } - var result SemanticContext - - for _, o := range operands { - if result == nil { - result = o - } else { - result = SemanticContextorContext(result, o) - } - } - - return result -} - -func (o *OR) String() string { - s := "" - - for _, o := range o.opnds { - s += "|| " + fmt.Sprint(o) - } - - if len(s) > 3 { - return s[0:3] - } - - return s -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go deleted file mode 100644 index 2d8e9909..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" - "strings" -) - -type TokenSourceCharStreamPair struct { - tokenSource TokenSource - charStream CharStream -} - -// A token has properties: text, type, line, character position in the line -// (so we can ignore tabs), token channel, index, and source from which -// we obtained this token. - -type Token interface { - GetSource() *TokenSourceCharStreamPair - GetTokenType() int - GetChannel() int - GetStart() int - GetStop() int - GetLine() int - GetColumn() int - - GetText() string - SetText(s string) - - GetTokenIndex() int - SetTokenIndex(v int) - - GetTokenSource() TokenSource - GetInputStream() CharStream -} - -type BaseToken struct { - source *TokenSourceCharStreamPair - tokenType int // token type of the token - channel int // The parser ignores everything not on DEFAULT_CHANNEL - start int // optional return -1 if not implemented. - stop int // optional return -1 if not implemented. - tokenIndex int // from 0..n-1 of the token object in the input stream - line int // line=1..n of the 1st character - column int // beginning of the line at which it occurs, 0..n-1 - text string // text of the token. - readOnly bool -} - -const ( - TokenInvalidType = 0 - - // During lookahead operations, this "token" signifies we hit rule end ATN state - // and did not follow it despite needing to. - TokenEpsilon = -2 - - TokenMinUserTokenType = 1 - - TokenEOF = -1 - - // All tokens go to the parser (unless Skip() is called in that rule) - // on a particular "channel". The parser tunes to a particular channel - // so that whitespace etc... can go to the parser on a "hidden" channel. - - TokenDefaultChannel = 0 - - // Anything on different channel than DEFAULT_CHANNEL is not parsed - // by parser. - - TokenHiddenChannel = 1 -) - -func (b *BaseToken) GetChannel() int { - return b.channel -} - -func (b *BaseToken) GetStart() int { - return b.start -} - -func (b *BaseToken) GetStop() int { - return b.stop -} - -func (b *BaseToken) GetLine() int { - return b.line -} - -func (b *BaseToken) GetColumn() int { - return b.column -} - -func (b *BaseToken) GetTokenType() int { - return b.tokenType -} - -func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { - return b.source -} - -func (b *BaseToken) GetTokenIndex() int { - return b.tokenIndex -} - -func (b *BaseToken) SetTokenIndex(v int) { - b.tokenIndex = v -} - -func (b *BaseToken) GetTokenSource() TokenSource { - return b.source.tokenSource -} - -func (b *BaseToken) GetInputStream() CharStream { - return b.source.charStream -} - -type CommonToken struct { - *BaseToken -} - -func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { - - t := new(CommonToken) - - t.BaseToken = new(BaseToken) - - t.source = source - t.tokenType = tokenType - t.channel = channel - t.start = start - t.stop = stop - t.tokenIndex = -1 - if t.source.tokenSource != nil { - t.line = source.tokenSource.GetLine() - t.column = source.tokenSource.GetCharPositionInLine() - } else { - t.column = -1 - } - return t -} - -// An empty {@link Pair} which is used as the default value of -// {@link //source} for tokens that do not have a source. - -//CommonToken.EMPTY_SOURCE = [ nil, nil ] - -// Constructs a New{@link CommonToken} as a copy of another {@link Token}. -// -//-// If {@code oldToken} is also a {@link CommonToken} instance, the newly -// constructed token will share a reference to the {@link //text} field and -// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will -// be assigned the result of calling {@link //GetText}, and {@link //source} -// will be constructed from the result of {@link Token//GetTokenSource} and -// {@link Token//GetInputStream}.
-// -// @param oldToken The token to copy. -// -func (c *CommonToken) clone() *CommonToken { - t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) - t.tokenIndex = c.GetTokenIndex() - t.line = c.GetLine() - t.column = c.GetColumn() - t.text = c.GetText() - return t -} - -func (c *CommonToken) GetText() string { - if c.text != "" { - return c.text - } - input := c.GetInputStream() - if input == nil { - return "" - } - n := input.Size() - if c.start < n && c.stop < n { - return input.GetTextFromInterval(NewInterval(c.start, c.stop)) - } - return "-// You can insert stuff, replace, and delete chunks. Note that the operations -// are done lazily--only if you convert the buffer to a {@link String} with -// {@link TokenStream#getText()}. This is very efficient because you are not -// moving data around all the time. As the buffer of tokens is converted to -// strings, the {@link #getText()} method(s) scan the input token stream and -// check to see if there is an operation at the current index. If so, the -// operation is done and then normal {@link String} rendering continues on the -// buffer. This is like having multiple Turing machine instruction streams -// (programs) operating on a single input tape. :)
-//- -// This rewriter makes no modifications to the token stream. It does not ask the -// stream to fill itself up nor does it advance the input cursor. The token -// stream {@link TokenStream#index()} will return the same value before and -// after any {@link #getText()} call.
- -//-// The rewriter only works on tokens that you have in the buffer and ignores the -// current input cursor. If you are buffering tokens on-demand, calling -// {@link #getText()} halfway through the input will only do rewrites for those -// tokens in the first half of the file.
- -//-// Since the operations are done lazily at {@link #getText}-time, operations do -// not screw up the token index values. That is, an insert operation at token -// index {@code i} does not change the index values for tokens -// {@code i}+1..n-1.
- -//-// Because operations never actually alter the buffer, you may always get the -// original token stream back without undoing anything. Since the instructions -// are queued up, you can easily simulate transactions and roll back any changes -// if there is an error just by removing instructions. For example,
- -//-// CharStream input = new ANTLRFileStream("input"); -// TLexer lex = new TLexer(input); -// CommonTokenStream tokens = new CommonTokenStream(lex); -// T parser = new T(tokens); -// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens); -// parser.startRule(); -//- -//
-// Then in the rules, you can execute (assuming rewriter is visible):
- -//-// Token t,u; -// ... -// rewriter.insertAfter(t, "text to put after t");} -// rewriter.insertAfter(u, "text after u");} -// System.out.println(rewriter.getText()); -//- -//
-// You can also have multiple "instruction streams" and get multiple rewrites -// from a single pass over the input. Just name the instruction streams and use -// that name again when printing the buffer. This could be useful for generating -// a C file and also its header file--all from the same buffer:
- -//-// rewriter.insertAfter("pass1", t, "text to put after t");} -// rewriter.insertAfter("pass2", u, "text after u");} -// System.out.println(rewriter.getText("pass1")); -// System.out.println(rewriter.getText("pass2")); -//- -//
-// If you don't use named rewrite streams, a "default" stream is used as the -// first example shows.
- - - -const( - Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 -) - -// Define the rewrite operation hierarchy - -type RewriteOperation interface { - // Execute the rewrite operation by possibly adding to the buffer. - // Return the index of the next token to operate on. - Execute(buffer *bytes.Buffer) int - String() string - GetInstructionIndex() int - GetIndex() int - GetText() string - GetOpName() string - GetTokens() TokenStream - SetInstructionIndex(val int) - SetIndex(int) - SetText(string) - SetOpName(string) - SetTokens(TokenStream) -} - -type BaseRewriteOperation struct { - //Current index of rewrites list - instruction_index int - //Token buffer index - index int - //Substitution text - text string - //Actual operation name - op_name string - //Pointer to token steam - tokens TokenStream -} - -func (op *BaseRewriteOperation)GetInstructionIndex() int{ - return op.instruction_index -} - -func (op *BaseRewriteOperation)GetIndex() int{ - return op.index -} - -func (op *BaseRewriteOperation)GetText() string{ - return op.text -} - -func (op *BaseRewriteOperation)GetOpName() string{ - return op.op_name -} - -func (op *BaseRewriteOperation)GetTokens() TokenStream{ - return op.tokens -} - -func (op *BaseRewriteOperation)SetInstructionIndex(val int){ - op.instruction_index = val -} - -func (op *BaseRewriteOperation)SetIndex(val int) { - op.index = val -} - -func (op *BaseRewriteOperation)SetText(val string){ - op.text = val -} - -func (op *BaseRewriteOperation)SetOpName(val string){ - op.op_name = val -} - -func (op *BaseRewriteOperation)SetTokens(val TokenStream) { - op.tokens = val -} - - -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{ - return op.index -} - -func (op *BaseRewriteOperation) String() string { - return fmt.Sprintf("<%s@%d:\"%s\">", - op.op_name, - op.tokens.Get(op.GetIndex()), - op.text, - ) - -} - - -type InsertBeforeOp struct { - BaseRewriteOperation -} - -func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{ - return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index, - text:text, - op_name:"InsertBeforeOp", - tokens:stream, - }} -} - -func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{ - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index+1 -} - -func (op *InsertBeforeOp) String() string { - return op.BaseRewriteOperation.String() -} - -// Distinguish between insert after/before to do the "insert afters" -// first and then the "insert befores" at same index. Implementation -// of "insert after" is "insert before index+1". - -type InsertAfterOp struct { - BaseRewriteOperation -} - -func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{ - return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index+1, - text:text, - tokens:stream, - }} -} - -func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index+1 -} - -func (op *InsertAfterOp) String() string { - return op.BaseRewriteOperation.String() -} - -// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp -// instructions. -type ReplaceOp struct{ - BaseRewriteOperation - LastIndex int -} - -func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp { - return &ReplaceOp{ - BaseRewriteOperation:BaseRewriteOperation{ - index:from, - text:text, - op_name:"ReplaceOp", - tokens:stream, - }, - LastIndex:to, - } -} - -func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{ - if op.text != ""{ - buffer.WriteString(op.text) - } - return op.LastIndex +1 -} - -func (op *ReplaceOp) String() string { - if op.text == "" { - return fmt.Sprintf("Since we never have to change the ATN transitions once we construct it, -// the states. We'll use the term Edge for the DFA to distinguish them from -// ATN transitions.
- -type Transition interface { - getTarget() ATNState - setTarget(ATNState) - getIsEpsilon() bool - getLabel() *IntervalSet - getSerializationType() int - Matches(int, int, int) bool -} - -type BaseTransition struct { - target ATNState - isEpsilon bool - label int - intervalSet *IntervalSet - serializationType int -} - -func NewBaseTransition(target ATNState) *BaseTransition { - - if target == nil { - panic("target cannot be nil.") - } - - t := new(BaseTransition) - - t.target = target - // Are we epsilon, action, sempred? - t.isEpsilon = false - t.intervalSet = nil - - return t -} - -func (t *BaseTransition) getTarget() ATNState { - return t.target -} - -func (t *BaseTransition) setTarget(s ATNState) { - t.target = s -} - -func (t *BaseTransition) getIsEpsilon() bool { - return t.isEpsilon -} - -func (t *BaseTransition) getLabel() *IntervalSet { - return t.intervalSet -} - -func (t *BaseTransition) getSerializationType() int { - return t.serializationType -} - -func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - panic("Not implemented") -} - -const ( - TransitionEPSILON = 1 - TransitionRANGE = 2 - TransitionRULE = 3 - TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? - TransitionATOM = 5 - TransitionACTION = 6 - TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 - TransitionNOTSET = 8 - TransitionWILDCARD = 9 - TransitionPRECEDENCE = 10 -) - -var TransitionserializationNames = []string{ - "INVALID", - "EPSILON", - "RANGE", - "RULE", - "PREDICATE", - "ATOM", - "ACTION", - "SET", - "NOT_SET", - "WILDCARD", - "PRECEDENCE", -} - -//var TransitionserializationTypes struct { -// EpsilonTransition int -// RangeTransition int -// RuleTransition int -// PredicateTransition int -// AtomTransition int -// ActionTransition int -// SetTransition int -// NotSetTransition int -// WildcardTransition int -// PrecedencePredicateTransition int -//}{ -// TransitionEPSILON, -// TransitionRANGE, -// TransitionRULE, -// TransitionPREDICATE, -// TransitionATOM, -// TransitionACTION, -// TransitionSET, -// TransitionNOTSET, -// TransitionWILDCARD, -// TransitionPRECEDENCE -//} - -// TODO: make all transitions sets? no, should remove set edges -type AtomTransition struct { - *BaseTransition -} - -func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { - - t := new(AtomTransition) - t.BaseTransition = NewBaseTransition(target) - - t.label = intervalSet // The token type or character value or, signifies special intervalSet. - t.intervalSet = t.makeLabel() - t.serializationType = TransitionATOM - - return t -} - -func (t *AtomTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addOne(t.label) - return s -} - -func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.label == symbol -} - -func (t *AtomTransition) String() string { - return strconv.Itoa(t.label) -} - -type RuleTransition struct { - *BaseTransition - - followState ATNState - ruleIndex, precedence int -} - -func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { - - t := new(RuleTransition) - t.BaseTransition = NewBaseTransition(ruleStart) - - t.ruleIndex = ruleIndex - t.precedence = precedence - t.followState = followState - t.serializationType = TransitionRULE - t.isEpsilon = true - - return t -} - -func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -type EpsilonTransition struct { - *BaseTransition - - outermostPrecedenceReturn int -} - -func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { - - t := new(EpsilonTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionEPSILON - t.isEpsilon = true - t.outermostPrecedenceReturn = outermostPrecedenceReturn - return t -} - -func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *EpsilonTransition) String() string { - return "epsilon" -} - -type RangeTransition struct { - *BaseTransition - - start, stop int -} - -func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { - - t := new(RangeTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionRANGE - t.start = start - t.stop = stop - t.intervalSet = t.makeLabel() - return t -} - -func (t *RangeTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addRange(t.start, t.stop) - return s -} - -func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= t.start && symbol <= t.stop -} - -func (t *RangeTransition) String() string { - return "'" + string(t.start) + "'..'" + string(t.stop) + "'" -} - -type AbstractPredicateTransition interface { - Transition - IAbstractPredicateTransitionFoo() -} - -type BaseAbstractPredicateTransition struct { - *BaseTransition -} - -func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { - - t := new(BaseAbstractPredicateTransition) - t.BaseTransition = NewBaseTransition(target) - - return t -} - -func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} - -type PredicateTransition struct { - *BaseAbstractPredicateTransition - - isCtxDependent bool - ruleIndex, predIndex int -} - -func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { - - t := new(PredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPREDICATE - t.ruleIndex = ruleIndex - t.predIndex = predIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PredicateTransition) getPredicate() *Predicate { - return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) -} - -func (t *PredicateTransition) String() string { - return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) -} - -type ActionTransition struct { - *BaseTransition - - isCtxDependent bool - ruleIndex, actionIndex, predIndex int -} - -func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { - - t := new(ActionTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionACTION - t.ruleIndex = ruleIndex - t.actionIndex = actionIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *ActionTransition) String() string { - return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) -} - -type SetTransition struct { - *BaseTransition -} - -func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { - - t := new(SetTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionSET - if set != nil { - t.intervalSet = set - } else { - t.intervalSet = NewIntervalSet() - t.intervalSet.addOne(TokenInvalidType) - } - - return t -} - -func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.intervalSet.contains(symbol) -} - -func (t *SetTransition) String() string { - return t.intervalSet.String() -} - -type NotSetTransition struct { - *SetTransition -} - -func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { - - t := new(NotSetTransition) - - t.SetTransition = NewSetTransition(target, set) - - t.serializationType = TransitionNOTSET - - return t -} - -func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) -} - -func (t *NotSetTransition) String() string { - return "~" + t.intervalSet.String() -} - -type WildcardTransition struct { - *BaseTransition -} - -func NewWildcardTransition(target ATNState) *WildcardTransition { - - t := new(WildcardTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionWILDCARD - return t -} - -func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol -} - -func (t *WildcardTransition) String() string { - return "." -} - -type PrecedencePredicateTransition struct { - *BaseAbstractPredicateTransition - - precedence int -} - -func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { - - t := new(PrecedencePredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPRECEDENCE - t.precedence = precedence - t.isEpsilon = true - - return t -} - -func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { - return NewPrecedencePredicate(t.precedence) -} - -func (t *PrecedencePredicateTransition) String() string { - return fmt.Sprint(t.precedence) + " >= _p" -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go deleted file mode 100644 index bdeb6d78..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The basic notion of a tree has a parent, a payload, and a list of children. -// It is the most abstract interface for all the trees used by ANTLR. -/// - -var TreeInvalidInterval = NewInterval(-1, -2) - -type Tree interface { - GetParent() Tree - SetParent(Tree) - GetPayload() interface{} - GetChild(i int) Tree - GetChildCount() int - GetChildren() []Tree -} - -type SyntaxTree interface { - Tree - - GetSourceInterval() *Interval -} - -type ParseTree interface { - SyntaxTree - - Accept(Visitor ParseTreeVisitor) interface{} - GetText() string - - ToStringTree([]string, Recognizer) string -} - -type RuleNode interface { - ParseTree - - GetRuleContext() RuleContext - GetBaseRuleContext() *BaseRuleContext -} - -type TerminalNode interface { - ParseTree - - GetSymbol() Token -} - -type ErrorNode interface { - TerminalNode - - errorNode() -} - -type ParseTreeVisitor interface { - Visit(tree ParseTree) interface{} - VisitChildren(node RuleNode) interface{} - VisitTerminal(node TerminalNode) interface{} - VisitErrorNode(node ErrorNode) interface{} -} - -type BaseParseTreeVisitor struct{} - -var _ ParseTreeVisitor = &BaseParseTreeVisitor{} - -func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } - -// TODO -//func (this ParseTreeVisitor) Visit(ctx) { -// if (Utils.isArray(ctx)) { -// self := this -// return ctx.map(function(child) { return VisitAtom(self, child)}) -// } else { -// return VisitAtom(this, ctx) -// } -//} -// -//func VisitAtom(Visitor, ctx) { -// if (ctx.parser == nil) { //is terminal -// return -// } -// -// name := ctx.parser.ruleNames[ctx.ruleIndex] -// funcName := "Visit" + Utils.titleCase(name) -// -// return Visitor[funcName](ctx) -//} - -type ParseTreeListener interface { - VisitTerminal(node TerminalNode) - VisitErrorNode(node ErrorNode) - EnterEveryRule(ctx ParserRuleContext) - ExitEveryRule(ctx ParserRuleContext) -} - -type BaseParseTreeListener struct{} - -var _ ParseTreeListener = &BaseParseTreeListener{} - -func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} -func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} -func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} -func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} - -type TerminalNodeImpl struct { - parentCtx RuleContext - - symbol Token -} - -var _ TerminalNode = &TerminalNodeImpl{} - -func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { - tn := new(TerminalNodeImpl) - - tn.parentCtx = nil - tn.symbol = symbol - - return tn -} - -func (t *TerminalNodeImpl) GetChild(i int) Tree { - return nil -} - -func (t *TerminalNodeImpl) GetChildren() []Tree { - return nil -} - -func (t *TerminalNodeImpl) SetChildren(tree []Tree) { - panic("Cannot set children on terminal node") -} - -func (t *TerminalNodeImpl) GetSymbol() Token { - return t.symbol -} - -func (t *TerminalNodeImpl) GetParent() Tree { - return t.parentCtx -} - -func (t *TerminalNodeImpl) SetParent(tree Tree) { - t.parentCtx = tree.(RuleContext) -} - -func (t *TerminalNodeImpl) GetPayload() interface{} { - return t.symbol -} - -func (t *TerminalNodeImpl) GetSourceInterval() *Interval { - if t.symbol == nil { - return TreeInvalidInterval - } - tokenIndex := t.symbol.GetTokenIndex() - return NewInterval(tokenIndex, tokenIndex) -} - -func (t *TerminalNodeImpl) GetChildCount() int { - return 0 -} - -func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitTerminal(t) -} - -func (t *TerminalNodeImpl) GetText() string { - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) String() string { - if t.symbol.GetTokenType() == TokenEOF { - return "Caddy is an extensible server platform that uses TLS by default.
- -- Releases · - Documentation · - Get Help -
- - - -### Menu - -- [Features](#features) -- [Install](#install) -- [Build from source](#build-from-source) - - [For development](#for-development) - - [With version information and/or plugins](#with-version-information-andor-plugins) -- [Quick start](#quick-start) -- [Overview](#overview) -- [Full documentation](#full-documentation) -- [Getting help](#getting-help) -- [About](#about) - - - - -## [Features](https://caddyserver.com/v2) - -- **Easy configuration** with the [Caddyfile](https://caddyserver.com/docs/caddyfile) -- **Powerful configuration** with its [native JSON config](https://caddyserver.com/docs/json/) -- **Dynamic configuration** with the [JSON API](https://caddyserver.com/docs/api) -- [**Config adapters**](https://caddyserver.com/docs/config-adapters) if you don't like JSON -- **Automatic HTTPS** by default - - [ZeroSSL](https://zerossl.com) and [Let's Encrypt](https://letsencrypt.org) for public names - - Fully-managed local CA for internal names & IPs - - Can coordinate with other Caddy instances in a cluster - - Multi-issuer fallback -- **Stays up when other servers go down** due to TLS/OCSP/certificate-related issues -- **Production-ready** after serving trillions of requests and managing millions of TLS certificates -- **Scales to tens of thousands of sites** ... and probably more -- **HTTP/1.1, HTTP/2, and experimental HTTP/3** support -- **Highly extensible** [modular architecture](https://caddyserver.com/docs/architecture) lets Caddy do anything without bloat -- **Runs anywhere** with **no external dependencies** (not even libc) -- Written in Go, a language with higher **memory safety guarantees** than other servers -- Actually **fun to use** -- So, so much more to [discover](https://caddyserver.com/v2) - -## Install - -The simplest, cross-platform way is to download from [GitHub Releases](https://github.com/caddyserver/caddy/releases) and place the executable file in your PATH. - -For other install options, see https://caddyserver.com/docs/install. - -## Build from source - -Requirements: - -- [Go 1.16 or newer](https://golang.org/dl/) - -### For development - -_**Note:** These steps [will not embed proper version information](https://github.com/golang/go/issues/29228). For that, please follow the instructions in the next section._ - -```bash -$ git clone "https://github.com/caddyserver/caddy.git" -$ cd caddy/cmd/caddy/ -$ go build -``` - -When you run Caddy, it may try to bind to low ports unless otherwise specified in your config. If your OS requires elevated privileges for this, you will need to give your new binary permission to do so. On Linux, this can be done easily with: `sudo setcap cap_net_bind_service=+ep ./caddy` - -If you prefer to use `go run` which only creates temporary binaries, you can still do this with the included `setcap.sh` like so: - -```bash -$ go run -exec ./setcap.sh main.go -``` - -If you don't want to type your password for `setcap`, use `sudo visudo` to edit your sudoers file and allow your user account to run that command without a password, for example: - -``` -username ALL=(ALL:ALL) NOPASSWD: /usr/sbin/setcap -``` - -replacing `username` with your actual username. Please be careful and only do this if you know what you are doing! We are only qualified to document how to use Caddy, not Go tooling or your computer, and we are providing these instructions for convenience only; please learn how to use your own computer at your own risk and make any needful adjustments. - -### With version information and/or plugins - -Using [our builder tool, `xcaddy`](https://github.com/caddyserver/xcaddy)... - -``` -$ xcaddy build -``` - -...the following steps are automated: - -1. Create a new folder: `mkdir caddy` -2. Change into it: `cd caddy` -3. Copy [Caddy's main.go](https://github.com/caddyserver/caddy/blob/master/cmd/caddy/main.go) into the empty folder. Add imports for any custom plugins you want to add. -4. Initialize a Go module: `go mod init caddy` -5. (Optional) Pin Caddy version: `go get github.com/caddyserver/caddy/v2@version` replacing `version` with a git tag, commit, or branch name. -6. (Optional) Add plugins by adding their import: `_ "import/path/here"` -7. Compile: `go build` - - - - -## Quick start - -The [Caddy website](https://caddyserver.com/docs/) has documentation that includes tutorials, quick-start guides, reference, and more. - -**We recommend that all users -- regardless of experience level -- do our [Getting Started](https://caddyserver.com/docs/getting-started) guide to become familiar with using Caddy.** - -If you've only got a minute, [the website has several quick-start tutorials](https://caddyserver.com/docs/quick-starts) to choose from! However, after finishing a quick-start tutorial, please read more documentation to understand how the software works. 🙂 - - - - -## Overview - -Caddy is most often used as an HTTPS server, but it is suitable for any long-running Go program. First and foremost, it is a platform to run Go applications. Caddy "apps" are just Go programs that are implemented as Caddy modules. Two apps -- `tls` and `http` -- ship standard with Caddy. - -Caddy apps instantly benefit from [automated documentation](https://caddyserver.com/docs/json/), graceful on-line [config changes via API](https://caddyserver.com/docs/api), and unification with other Caddy apps. - -Although [JSON](https://caddyserver.com/docs/json/) is Caddy's native config language, Caddy can accept input from [config adapters](https://caddyserver.com/docs/config-adapters) which can essentially convert any config format of your choice into JSON: Caddyfile, JSON 5, YAML, TOML, NGINX config, and more. - -The primary way to configure Caddy is through [its API](https://caddyserver.com/docs/api), but if you prefer config files, the [command-line interface](https://caddyserver.com/docs/command-line) supports those too. - -Caddy exposes an unprecedented level of control compared to any web server in existence. In Caddy, you are usually setting the actual values of the initialized types in memory that power everything from your HTTP handlers and TLS handshakes to your storage medium. Caddy is also ridiculously extensible, with a powerful plugin system that makes vast improvements over other web servers. - -To wield the power of this design, you need to know how the config document is structured. Please see [our documentation site](https://caddyserver.com/docs/) for details about [Caddy's config structure](https://caddyserver.com/docs/json/). - -Nearly all of Caddy's configuration is contained in a single config document, rather than being scattered across CLI flags and env variables and a configuration file as with other web servers. This makes managing your server config more straightforward and reduces hidden variables/factors. - - -## Full documentation - -Our website has complete documentation: - -**https://caddyserver.com/docs/** - -The docs are also open source. You can contribute to them here: https://github.com/caddyserver/website - - - -## Getting help - -- We **strongly recommend** that all professionals or companies using Caddy get a support contract through [Ardan Labs](https://www.ardanlabs.com/my/contact-us?dd=caddy) before help is needed. - -- A [sponsorship](https://github.com/sponsors/mholt) goes a long way! If Caddy is benefitting your company, please consider a sponsorship! This not only helps fund full-time work to ensure the longevity of the project, it's also a great look for your company to your customers and potential customers! - -- Individuals can exchange help for free on our community forum at https://caddy.community. Remember that people give help out of their spare time and good will. The best way to get help is to give it first! - -Please use our [issue tracker](https://github.com/caddyserver/caddy/issues) only for bug reports and feature requests, i.e. actionable development items (support questions will usually be referred to the forums). - - - -## About - -**The name "Caddy" is trademarked.** The name of the software is "Caddy", not "Caddy Server" or "CaddyServer". Please call it "Caddy" or, if you wish to clarify, "the Caddy web server". Caddy is a registered trademark of Stack Holdings GmbH. - -- _Project on Twitter: [@caddyserver](https://twitter.com/caddyserver)_ -- _Author on Twitter: [@mholt6](https://twitter.com/mholt6)_ - -Caddy is a project of [ZeroSSL](https://zerossl.com), a Stack Holdings company. - -Debian package repository hosting is graciously provided by [Cloudsmith](https://cloudsmith.com). Cloudsmith is the only fully hosted, cloud-native, universal package management solution, that enables your organization to create, store and share packages in any format, to any place, with total confidence. \ No newline at end of file diff --git a/vendor/github.com/caddyserver/caddy/v2/admin.go b/vendor/github.com/caddyserver/caddy/v2/admin.go deleted file mode 100644 index fb451682..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/admin.go +++ /dev/null @@ -1,1245 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "bytes" - "context" - "crypto" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "expvar" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/pprof" - "net/url" - "os" - "path" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/caddyserver/caddy/v2/notify" - "github.com/caddyserver/certmagic" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -// AdminConfig configures Caddy's API endpoint, which is used -// to manage Caddy while it is running. -type AdminConfig struct { - // If true, the admin endpoint will be completely disabled. - // Note that this makes any runtime changes to the config - // impossible, since the interface to do so is through the - // admin endpoint. - Disabled bool `json:"disabled,omitempty"` - - // The address to which the admin endpoint's listener should - // bind itself. Can be any single network address that can be - // parsed by Caddy. Default: localhost:2019 - Listen string `json:"listen,omitempty"` - - // If true, CORS headers will be emitted, and requests to the - // API will be rejected if their `Host` and `Origin` headers - // do not match the expected value(s). Use `origins` to - // customize which origins/hosts are allowed. If `origins` is - // not set, the listen address is the only value allowed by - // default. Enforced only on local (plaintext) endpoint. - EnforceOrigin bool `json:"enforce_origin,omitempty"` - - // The list of allowed origins/hosts for API requests. Only needed - // if accessing the admin endpoint from a host different from the - // socket's network interface or if `enforce_origin` is true. If not - // set, the listener address will be the default value. If set but - // empty, no origins will be allowed. Enforced only on local - // (plaintext) endpoint. - Origins []string `json:"origins,omitempty"` - - // Options pertaining to configuration management. - Config *ConfigSettings `json:"config,omitempty"` - - // Options that establish this server's identity. Identity refers to - // credentials which can be used to uniquely identify and authenticate - // this server instance. This is required if remote administration is - // enabled (but does not require remote administration to be enabled). - // Default: no identity management. - Identity *IdentityConfig `json:"identity,omitempty"` - - // Options pertaining to remote administration. By default, remote - // administration is disabled. If enabled, identity management must - // also be configured, as that is how the endpoint is secured. - // See the neighboring "identity" object. - // - // EXPERIMENTAL: This feature is subject to change. - Remote *RemoteAdmin `json:"remote,omitempty"` -} - -// ConfigSettings configures the management of configuration. -type ConfigSettings struct { - // Whether to keep a copy of the active config on disk. Default is true. - // Note that "pulled" dynamic configs (using the neighboring "load" module) - // are not persisted; only configs that are pushed to Caddy get persisted. - Persist *bool `json:"persist,omitempty"` - - // Loads a configuration to use. This is helpful if your configs are - // managed elsewhere, and you want Caddy to pull its config dynamically - // when it starts. The pulled config completely replaces the current - // one, just like any other config load. It is an error if a pulled - // config is configured to pull another config. - // - // EXPERIMENTAL: Subject to change. - LoadRaw json.RawMessage `json:"load,omitempty" caddy:"namespace=caddy.config_loaders inline_key=module"` - - // The interval to pull config. With a non-zero value, will pull config - // from config loader (eg. a http loader) with given interval. - // - // EXPERIMENTAL: Subject to change. - LoadInterval Duration `json:"load_interval,omitempty"` -} - -// IdentityConfig configures management of this server's identity. An identity -// consists of credentials that uniquely verify this instance; for example, -// TLS certificates (public + private key pairs). -type IdentityConfig struct { - // List of names or IP addresses which refer to this server. - // Certificates will be obtained for these identifiers so - // secure TLS connections can be made using them. - Identifiers []string `json:"identifiers,omitempty"` - - // Issuers that can provide this admin endpoint its identity - // certificate(s). Default: ACME issuers configured for - // ZeroSSL and Let's Encrypt. Be sure to change this if you - // require credentials for private identifiers. - IssuersRaw []json.RawMessage `json:"issuers,omitempty" caddy:"namespace=tls.issuance inline_key=module"` - - issuers []certmagic.Issuer -} - -// RemoteAdmin enables and configures remote administration. If enabled, -// a secure listener enforcing mutual TLS authentication will be started -// on a different port from the standard plaintext admin server. -// -// This endpoint is secured using identity management, which must be -// configured separately (because identity management does not depend -// on remote administration). See the admin/identity config struct. -// -// EXPERIMENTAL: Subject to change. -type RemoteAdmin struct { - // The address on which to start the secure listener. - // Default: :2021 - Listen string `json:"listen,omitempty"` - - // List of access controls for this secure admin endpoint. - // This configures TLS mutual authentication (i.e. authorized - // client certificates), but also application-layer permissions - // like which paths and methods each identity is authorized for. - AccessControl []*AdminAccess `json:"access_control,omitempty"` -} - -// AdminAccess specifies what permissions an identity or group -// of identities are granted. -type AdminAccess struct { - // Base64-encoded DER certificates containing public keys to accept. - // (The contents of PEM certificate blocks are base64-encoded DER.) - // Any of these public keys can appear in any part of a verified chain. - PublicKeys []string `json:"public_keys,omitempty"` - - // Limits what the associated identities are allowed to do. - // If unspecified, all permissions are granted. - Permissions []AdminPermissions `json:"permissions,omitempty"` - - publicKeys []crypto.PublicKey -} - -// AdminPermissions specifies what kinds of requests are allowed -// to be made to the admin endpoint. -type AdminPermissions struct { - // The API paths allowed. Paths are simple prefix matches. - // Any subpath of the specified paths will be allowed. - Paths []string `json:"paths,omitempty"` - - // The HTTP methods allowed for the given paths. - Methods []string `json:"methods,omitempty"` -} - -// newAdminHandler reads admin's config and returns an http.Handler suitable -// for use in an admin endpoint server, which will be listening on listenAddr. -func (admin AdminConfig) newAdminHandler(addr NetworkAddress, remote bool) adminHandler { - muxWrap := adminHandler{mux: http.NewServeMux()} - - // secure the local or remote endpoint respectively - if remote { - muxWrap.remoteControl = admin.Remote - } else { - muxWrap.enforceHost = !addr.isWildcardInterface() - muxWrap.allowedOrigins = admin.allowedOrigins(addr) - } - - addRouteWithMetrics := func(pattern string, handlerLabel string, h http.Handler) { - labels := prometheus.Labels{"path": pattern, "handler": handlerLabel} - h = instrumentHandlerCounter( - adminMetrics.requestCount.MustCurryWith(labels), - h, - ) - muxWrap.mux.Handle(pattern, h) - } - // addRoute just calls muxWrap.mux.Handle after - // wrapping the handler with error handling - addRoute := func(pattern string, handlerLabel string, h AdminHandler) { - wrapper := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - err := h.ServeHTTP(w, r) - if err != nil { - labels := prometheus.Labels{ - "path": pattern, - "handler": handlerLabel, - "method": strings.ToUpper(r.Method), - } - adminMetrics.requestErrors.With(labels).Inc() - } - muxWrap.handleError(w, r, err) - }) - addRouteWithMetrics(pattern, handlerLabel, wrapper) - } - - const handlerLabel = "admin" - - // register standard config control endpoints - addRoute("/"+rawConfigKey+"/", handlerLabel, AdminHandlerFunc(handleConfig)) - addRoute("/id/", handlerLabel, AdminHandlerFunc(handleConfigID)) - addRoute("/stop", handlerLabel, AdminHandlerFunc(handleStop)) - - // register debugging endpoints - addRouteWithMetrics("/debug/pprof/", handlerLabel, http.HandlerFunc(pprof.Index)) - addRouteWithMetrics("/debug/pprof/cmdline", handlerLabel, http.HandlerFunc(pprof.Cmdline)) - addRouteWithMetrics("/debug/pprof/profile", handlerLabel, http.HandlerFunc(pprof.Profile)) - addRouteWithMetrics("/debug/pprof/symbol", handlerLabel, http.HandlerFunc(pprof.Symbol)) - addRouteWithMetrics("/debug/pprof/trace", handlerLabel, http.HandlerFunc(pprof.Trace)) - addRouteWithMetrics("/debug/vars", handlerLabel, expvar.Handler()) - - // register third-party module endpoints - for _, m := range GetModules("admin.api") { - router := m.New().(AdminRouter) - handlerLabel := m.ID.Name() - for _, route := range router.Routes() { - addRoute(route.Pattern, handlerLabel, route.Handler) - } - } - - return muxWrap -} - -// allowedOrigins returns a list of origins that are allowed. -// If admin.Origins is nil (null), the provided listen address -// will be used as the default origin. If admin.Origins is -// empty, no origins will be allowed, effectively bricking the -// endpoint for non-unix-socket endpoints, but whatever. -func (admin AdminConfig) allowedOrigins(addr NetworkAddress) []string { - uniqueOrigins := make(map[string]struct{}) - for _, o := range admin.Origins { - uniqueOrigins[o] = struct{}{} - } - if admin.Origins == nil { - if addr.isLoopback() { - if addr.IsUnixNetwork() { - // RFC 2616, Section 14.26: - // "A client MUST include a Host header field in all HTTP/1.1 request - // messages. If the requested URI does not include an Internet host - // name for the service being requested, then the Host header field MUST - // be given with an empty value." - uniqueOrigins[""] = struct{}{} - } else { - uniqueOrigins[net.JoinHostPort("localhost", addr.port())] = struct{}{} - uniqueOrigins[net.JoinHostPort("::1", addr.port())] = struct{}{} - uniqueOrigins[net.JoinHostPort("127.0.0.1", addr.port())] = struct{}{} - } - } - if !addr.IsUnixNetwork() { - uniqueOrigins[addr.JoinHostPort(0)] = struct{}{} - } - } - allowed := make([]string, 0, len(uniqueOrigins)) - for origin := range uniqueOrigins { - allowed = append(allowed, origin) - } - return allowed -} - -// replaceLocalAdminServer replaces the running local admin server -// according to the relevant configuration in cfg. If no configuration -// for the admin endpoint exists in cfg, a default one is used, so -// that there is always an admin server (unless it is explicitly -// configured to be disabled). -func replaceLocalAdminServer(cfg *Config) error { - // always be sure to close down the old admin endpoint - // as gracefully as possible, even if the new one is - // disabled -- careful to use reference to the current - // (old) admin endpoint since it will be different - // when the function returns - oldAdminServer := localAdminServer - defer func() { - // do the shutdown asynchronously so that any - // current API request gets a response; this - // goroutine may last a few seconds - if oldAdminServer != nil { - go func(oldAdminServer *http.Server) { - err := stopAdminServer(oldAdminServer) - if err != nil { - Log().Named("admin").Error("stopping current admin endpoint", zap.Error(err)) - } - }(oldAdminServer) - } - }() - - // always get a valid admin config - adminConfig := DefaultAdminConfig - if cfg != nil && cfg.Admin != nil { - adminConfig = cfg.Admin - } - - // if new admin endpoint is to be disabled, we're done - if adminConfig.Disabled { - Log().Named("admin").Warn("admin endpoint disabled") - return nil - } - - // extract a singular listener address - addr, err := parseAdminListenAddr(adminConfig.Listen, DefaultAdminListen) - if err != nil { - return err - } - - handler := adminConfig.newAdminHandler(addr, false) - - ln, err := Listen(addr.Network, addr.JoinHostPort(0)) - if err != nil { - return err - } - - serverMu.Lock() - localAdminServer = &http.Server{ - Addr: addr.String(), // for logging purposes only - Handler: handler, - ReadTimeout: 10 * time.Second, - ReadHeaderTimeout: 5 * time.Second, - IdleTimeout: 60 * time.Second, - MaxHeaderBytes: 1024 * 64, - } - serverMu.Unlock() - - adminLogger := Log().Named("admin") - go func() { - serverMu.Lock() - server := localAdminServer - serverMu.Unlock() - if err := server.Serve(ln); !errors.Is(err, http.ErrServerClosed) { - adminLogger.Error("admin server shutdown for unknown reason", zap.Error(err)) - } - }() - - adminLogger.Info("admin endpoint started", - zap.String("address", addr.String()), - zap.Bool("enforce_origin", adminConfig.EnforceOrigin), - zap.Strings("origins", handler.allowedOrigins)) - - if !handler.enforceHost { - adminLogger.Warn("admin endpoint on open interface; host checking disabled", - zap.String("address", addr.String())) - } - - return nil -} - -// manageIdentity sets up automated identity management for this server. -func manageIdentity(ctx Context, cfg *Config) error { - if cfg == nil || cfg.Admin == nil || cfg.Admin.Identity == nil { - return nil - } - - // set default issuers; this is pretty hacky because we can't - // import the caddytls package -- but it works - if cfg.Admin.Identity.IssuersRaw == nil { - cfg.Admin.Identity.IssuersRaw = []json.RawMessage{ - json.RawMessage(`{"module": "zerossl"}`), - json.RawMessage(`{"module": "acme"}`), - } - } - - // load and provision issuer modules - if cfg.Admin.Identity.IssuersRaw != nil { - val, err := ctx.LoadModule(cfg.Admin.Identity, "IssuersRaw") - if err != nil { - return fmt.Errorf("loading identity issuer modules: %s", err) - } - for _, issVal := range val.([]interface{}) { - cfg.Admin.Identity.issuers = append(cfg.Admin.Identity.issuers, issVal.(certmagic.Issuer)) - } - } - - // we'll make a new cache when we make the CertMagic config, so stop any previous cache - if identityCertCache != nil { - identityCertCache.Stop() - } - - logger := Log().Named("admin.identity") - cmCfg := cfg.Admin.Identity.certmagicConfig(logger, true) - - // issuers have circular dependencies with the configs because, - // as explained in the caddytls package, they need access to the - // correct storage and cache to solve ACME challenges - for _, issuer := range cfg.Admin.Identity.issuers { - // avoid import cycle with caddytls package, so manually duplicate the interface here, yuck - if annoying, ok := issuer.(interface{ SetConfig(cfg *certmagic.Config) }); ok { - annoying.SetConfig(cmCfg) - } - } - - // obtain and renew server identity certificate(s) - return cmCfg.ManageAsync(ctx, cfg.Admin.Identity.Identifiers) -} - -// replaceRemoteAdminServer replaces the running remote admin server -// according to the relevant configuration in cfg. It stops any previous -// remote admin server and only starts a new one if configured. -func replaceRemoteAdminServer(ctx Context, cfg *Config) error { - if cfg == nil { - return nil - } - - remoteLogger := Log().Named("admin.remote") - - oldAdminServer := remoteAdminServer - defer func() { - if oldAdminServer != nil { - go func(oldAdminServer *http.Server) { - err := stopAdminServer(oldAdminServer) - if err != nil { - Log().Named("admin").Error("stopping current secure admin endpoint", zap.Error(err)) - } - }(oldAdminServer) - } - }() - - if cfg.Admin == nil || cfg.Admin.Remote == nil { - return nil - } - - addr, err := parseAdminListenAddr(cfg.Admin.Remote.Listen, DefaultRemoteAdminListen) - if err != nil { - return err - } - - // make the HTTP handler but disable Host/Origin enforcement - // because we are using TLS authentication instead - handler := cfg.Admin.newAdminHandler(addr, true) - - // create client certificate pool for TLS mutual auth, and extract public keys - // so that we can enforce access controls at the application layer - clientCertPool := x509.NewCertPool() - for i, accessControl := range cfg.Admin.Remote.AccessControl { - for j, certBase64 := range accessControl.PublicKeys { - cert, err := decodeBase64DERCert(certBase64) - if err != nil { - return fmt.Errorf("access control %d public key %d: parsing base64 certificate DER: %v", i, j, err) - } - accessControl.publicKeys = append(accessControl.publicKeys, cert.PublicKey) - clientCertPool.AddCert(cert) - } - } - - // create TLS config that will enforce mutual authentication - cmCfg := cfg.Admin.Identity.certmagicConfig(remoteLogger, false) - tlsConfig := cmCfg.TLSConfig() - tlsConfig.NextProtos = nil // this server does not solve ACME challenges - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = clientCertPool - - // convert logger to stdlib so it can be used by HTTP server - serverLogger, err := zap.NewStdLogAt(remoteLogger, zap.DebugLevel) - if err != nil { - return err - } - - serverMu.Lock() - // create secure HTTP server - remoteAdminServer = &http.Server{ - Addr: addr.String(), // for logging purposes only - Handler: handler, - TLSConfig: tlsConfig, - ReadTimeout: 10 * time.Second, - ReadHeaderTimeout: 5 * time.Second, - IdleTimeout: 60 * time.Second, - MaxHeaderBytes: 1024 * 64, - ErrorLog: serverLogger, - } - serverMu.Unlock() - - // start listener - ln, err := Listen(addr.Network, addr.JoinHostPort(0)) - if err != nil { - return err - } - ln = tls.NewListener(ln, tlsConfig) - - go func() { - serverMu.Lock() - server := remoteAdminServer - serverMu.Unlock() - if err := server.Serve(ln); !errors.Is(err, http.ErrServerClosed) { - remoteLogger.Error("admin remote server shutdown for unknown reason", zap.Error(err)) - } - }() - - remoteLogger.Info("secure admin remote control endpoint started", - zap.String("address", addr.String())) - - return nil -} - -func (ident *IdentityConfig) certmagicConfig(logger *zap.Logger, makeCache bool) *certmagic.Config { - if ident == nil { - // user might not have configured identity; that's OK, we can still make a - // certmagic config, although it'll be mostly useless for remote management - ident = new(IdentityConfig) - } - cmCfg := &certmagic.Config{ - Storage: DefaultStorage, // do not act as part of a cluster (this is for the server's local identity) - Logger: logger, - Issuers: ident.issuers, - } - if makeCache { - identityCertCache = certmagic.NewCache(certmagic.CacheOptions{ - GetConfigForCert: func(certmagic.Certificate) (*certmagic.Config, error) { - return cmCfg, nil - }, - }) - } - return certmagic.New(identityCertCache, *cmCfg) -} - -// IdentityCredentials returns this instance's configured, managed identity credentials -// that can be used in TLS client authentication. -func (ctx Context) IdentityCredentials(logger *zap.Logger) ([]tls.Certificate, error) { - if ctx.cfg == nil || ctx.cfg.Admin == nil || ctx.cfg.Admin.Identity == nil { - return nil, fmt.Errorf("no server identity configured") - } - ident := ctx.cfg.Admin.Identity - if len(ident.Identifiers) == 0 { - return nil, fmt.Errorf("no identifiers configured") - } - if logger == nil { - logger = Log() - } - magic := ident.certmagicConfig(logger, false) - return magic.ClientCredentials(ctx, ident.Identifiers) -} - -// enforceAccessControls enforces application-layer access controls for r based on remote. -// It expects that the TLS server has already established at least one verified chain of -// trust, and then looks for a matching, authorized public key that is allowed to access -// the defined path(s) using the defined method(s). -func (remote RemoteAdmin) enforceAccessControls(r *http.Request) error { - for _, chain := range r.TLS.VerifiedChains { - for _, peerCert := range chain { - for _, adminAccess := range remote.AccessControl { - for _, allowedKey := range adminAccess.publicKeys { - // see if we found a matching public key; the TLS server already verified the chain - // so we know the client possesses the associated private key; this handy interface - // doesn't appear to be defined anywhere in the std lib, but was implemented here: - // https://github.com/golang/go/commit/b5f2c0f50297fa5cd14af668ddd7fd923626cf8c - comparer, ok := peerCert.PublicKey.(interface{ Equal(crypto.PublicKey) bool }) - if !ok || !comparer.Equal(allowedKey) { - continue - } - - // key recognized; make sure its HTTP request is permitted - for _, accessPerm := range adminAccess.Permissions { - // verify method - methodFound := accessPerm.Methods == nil - for _, method := range accessPerm.Methods { - if method == r.Method { - methodFound = true - break - } - } - if !methodFound { - return APIError{ - HTTPStatus: http.StatusForbidden, - Message: "not authorized to use this method", - } - } - - // verify path - pathFound := accessPerm.Paths == nil - for _, allowedPath := range accessPerm.Paths { - if strings.HasPrefix(r.URL.Path, allowedPath) { - pathFound = true - break - } - } - if !pathFound { - return APIError{ - HTTPStatus: http.StatusForbidden, - Message: "not authorized to access this path", - } - } - } - - // public key authorized, method and path allowed - return nil - } - } - } - } - - // in theory, this should never happen; with an unverified chain, the TLS server - // should not accept the connection in the first place, and the acceptable cert - // pool is configured using the same list of public keys we verify against - return APIError{ - HTTPStatus: http.StatusUnauthorized, - Message: "client identity not authorized", - } -} - -func stopAdminServer(srv *http.Server) error { - if srv == nil { - return fmt.Errorf("no admin server") - } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - err := srv.Shutdown(ctx) - if err != nil { - return fmt.Errorf("shutting down admin server: %v", err) - } - Log().Named("admin").Info("stopped previous server", zap.String("address", srv.Addr)) - return nil -} - -// AdminRouter is a type which can return routes for the admin API. -type AdminRouter interface { - Routes() []AdminRoute -} - -// AdminRoute represents a route for the admin endpoint. -type AdminRoute struct { - Pattern string - Handler AdminHandler -} - -type adminHandler struct { - mux *http.ServeMux - - // security for local/plaintext) endpoint, on by default - enforceOrigin bool - enforceHost bool - allowedOrigins []string - - // security for remote/encrypted endpoint - remoteControl *RemoteAdmin -} - -// ServeHTTP is the external entry point for API requests. -// It will only be called once per request. -func (h adminHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - log := Log().Named("admin.api").With( - zap.String("method", r.Method), - zap.String("host", r.Host), - zap.String("uri", r.RequestURI), - zap.String("remote_addr", r.RemoteAddr), - zap.Reflect("headers", r.Header), - ) - if r.TLS != nil { - log = log.With( - zap.Bool("secure", true), - zap.Int("verified_chains", len(r.TLS.VerifiedChains)), - ) - } - if r.RequestURI == "/metrics" { - log.Debug("received request") - } else { - log.Info("received request") - } - h.serveHTTP(w, r) -} - -// serveHTTP is the internal entry point for API requests. It may -// be called more than once per request, for example if a request -// is rewritten (i.e. internal redirect). -func (h adminHandler) serveHTTP(w http.ResponseWriter, r *http.Request) { - if h.remoteControl != nil { - // enforce access controls on secure endpoint - if err := h.remoteControl.enforceAccessControls(r); err != nil { - h.handleError(w, r, err) - return - } - } - - if strings.Contains(r.Header.Get("Upgrade"), "websocket") { - // I've never been able demonstrate a vulnerability myself, but apparently - // WebSocket connections originating from browsers aren't subject to CORS - // restrictions, so we'll just be on the safe side - h.handleError(w, r, fmt.Errorf("websocket connections aren't allowed")) - return - } - - if h.enforceHost { - // DNS rebinding mitigation - err := h.checkHost(r) - if err != nil { - h.handleError(w, r, err) - return - } - } - - if h.enforceOrigin { - // cross-site mitigation - origin, err := h.checkOrigin(r) - if err != nil { - h.handleError(w, r, err) - return - } - - if r.Method == http.MethodOptions { - w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, GET, POST, PUT, PATCH, DELETE") - w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Cache-Control") - w.Header().Set("Access-Control-Allow-Credentials", "true") - } - w.Header().Set("Access-Control-Allow-Origin", origin) - } - - h.mux.ServeHTTP(w, r) -} - -func (h adminHandler) handleError(w http.ResponseWriter, r *http.Request, err error) { - if err == nil { - return - } - if err == errInternalRedir { - h.serveHTTP(w, r) - return - } - - apiErr, ok := err.(APIError) - if !ok { - apiErr = APIError{ - HTTPStatus: http.StatusInternalServerError, - Err: err, - } - } - if apiErr.HTTPStatus == 0 { - apiErr.HTTPStatus = http.StatusInternalServerError - } - if apiErr.Message == "" && apiErr.Err != nil { - apiErr.Message = apiErr.Err.Error() - } - - Log().Named("admin.api").Error("request error", - zap.Error(err), - zap.Int("status_code", apiErr.HTTPStatus), - ) - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(apiErr.HTTPStatus) - encErr := json.NewEncoder(w).Encode(apiErr) - if encErr != nil { - Log().Named("admin.api").Error("failed to encode error response", zap.Error(encErr)) - } -} - -// checkHost returns a handler that wraps next such that -// it will only be called if the request's Host header matches -// a trustworthy/expected value. This helps to mitigate DNS -// rebinding attacks. -func (h adminHandler) checkHost(r *http.Request) error { - var allowed bool - for _, allowedHost := range h.allowedOrigins { - if r.Host == allowedHost { - allowed = true - break - } - } - if !allowed { - return APIError{ - HTTPStatus: http.StatusForbidden, - Err: fmt.Errorf("host not allowed: %s", r.Host), - } - } - return nil -} - -// checkOrigin ensures that the Origin header, if -// set, matches the intended target; prevents arbitrary -// sites from issuing requests to our listener. It -// returns the origin that was obtained from r. -func (h adminHandler) checkOrigin(r *http.Request) (string, error) { - origin := h.getOriginHost(r) - if origin == "" { - return origin, APIError{ - HTTPStatus: http.StatusForbidden, - Err: fmt.Errorf("missing required Origin header"), - } - } - if !h.originAllowed(origin) { - return origin, APIError{ - HTTPStatus: http.StatusForbidden, - Err: fmt.Errorf("client is not allowed to access from origin %s", origin), - } - } - return origin, nil -} - -func (h adminHandler) getOriginHost(r *http.Request) string { - origin := r.Header.Get("Origin") - if origin == "" { - origin = r.Header.Get("Referer") - } - originURL, err := url.Parse(origin) - if err == nil && originURL.Host != "" { - origin = originURL.Host - } - return origin -} - -func (h adminHandler) originAllowed(origin string) bool { - for _, allowedOrigin := range h.allowedOrigins { - originCopy := origin - if !strings.Contains(allowedOrigin, "://") { - // no scheme specified, so allow both - originCopy = strings.TrimPrefix(originCopy, "http://") - originCopy = strings.TrimPrefix(originCopy, "https://") - } - if originCopy == allowedOrigin { - return true - } - } - return false -} - -func handleConfig(w http.ResponseWriter, r *http.Request) error { - switch r.Method { - case http.MethodGet: - w.Header().Set("Content-Type", "application/json") - - err := readConfig(r.URL.Path, w) - if err != nil { - return APIError{HTTPStatus: http.StatusBadRequest, Err: err} - } - - return nil - - case http.MethodPost, - http.MethodPut, - http.MethodPatch, - http.MethodDelete: - - // DELETE does not use a body, but the others do - var body []byte - if r.Method != http.MethodDelete { - if ct := r.Header.Get("Content-Type"); !strings.Contains(ct, "/json") { - return APIError{ - HTTPStatus: http.StatusBadRequest, - Err: fmt.Errorf("unacceptable content-type: %v; 'application/json' required", ct), - } - } - - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - - _, err := io.Copy(buf, r.Body) - if err != nil { - return APIError{ - HTTPStatus: http.StatusBadRequest, - Err: fmt.Errorf("reading request body: %v", err), - } - } - body = buf.Bytes() - } - - forceReload := r.Header.Get("Cache-Control") == "must-revalidate" - - err := changeConfig(r.Method, r.URL.Path, body, forceReload) - if err != nil { - return err - } - - default: - return APIError{ - HTTPStatus: http.StatusMethodNotAllowed, - Err: fmt.Errorf("method %s not allowed", r.Method), - } - } - - return nil -} - -func handleConfigID(w http.ResponseWriter, r *http.Request) error { - idPath := r.URL.Path - - parts := strings.Split(idPath, "/") - if len(parts) < 3 || parts[2] == "" { - return fmt.Errorf("request path is missing object ID") - } - if parts[0] != "" || parts[1] != "id" { - return fmt.Errorf("malformed object path") - } - id := parts[2] - - // map the ID to the expanded path - currentCfgMu.RLock() - expanded, ok := rawCfgIndex[id] - defer currentCfgMu.RUnlock() - if !ok { - return fmt.Errorf("unknown object ID '%s'", id) - } - - // piece the full URL path back together - parts = append([]string{expanded}, parts[3:]...) - r.URL.Path = path.Join(parts...) - - return errInternalRedir -} - -func handleStop(w http.ResponseWriter, r *http.Request) error { - if r.Method != http.MethodPost { - return APIError{ - HTTPStatus: http.StatusMethodNotAllowed, - Err: fmt.Errorf("method not allowed"), - } - } - - if err := notify.NotifyStopping(); err != nil { - Log().Error("unable to notify stopping to service manager", zap.Error(err)) - } - - exitProcess(Log().Named("admin.api")) - return nil -} - -// unsyncedConfigAccess traverses into the current config and performs -// the operation at path according to method, using body and out as -// needed. This is a low-level, unsynchronized function; most callers -// will want to use changeConfig or readConfig instead. This requires a -// read or write lock on currentCfgMu, depending on method (GET needs -// only a read lock; all others need a write lock). -func unsyncedConfigAccess(method, path string, body []byte, out io.Writer) error { - var err error - var val interface{} - - // if there is a request body, decode it into the - // variable that will be set in the config according - // to method and path - if len(body) > 0 { - err = json.Unmarshal(body, &val) - if err != nil { - return fmt.Errorf("decoding request body: %v", err) - } - } - - enc := json.NewEncoder(out) - - cleanPath := strings.Trim(path, "/") - if cleanPath == "" { - return fmt.Errorf("no traversable path") - } - - parts := strings.Split(cleanPath, "/") - if len(parts) == 0 { - return fmt.Errorf("path missing") - } - - // A path that ends with "..." implies: - // 1) the part before it is an array - // 2) the payload is an array - // and means that the user wants to expand the elements - // in the payload array and append each one into the - // destination array, like so: - // array = append(array, elems...) - // This special case is handled below. - ellipses := parts[len(parts)-1] == "..." - if ellipses { - parts = parts[:len(parts)-1] - } - - var ptr interface{} = rawCfg - -traverseLoop: - for i, part := range parts { - switch v := ptr.(type) { - case map[string]interface{}: - // if the next part enters a slice, and the slice is our destination, - // handle it specially (because appending to the slice copies the slice - // header, which does not replace the original one like we want) - if arr, ok := v[part].([]interface{}); ok && i == len(parts)-2 { - var idx int - if method != http.MethodPost { - idxStr := parts[len(parts)-1] - idx, err = strconv.Atoi(idxStr) - if err != nil { - return fmt.Errorf("[%s] invalid array index '%s': %v", - path, idxStr, err) - } - if idx < 0 || idx >= len(arr) { - return fmt.Errorf("[%s] array index out of bounds: %s", path, idxStr) - } - } - - switch method { - case http.MethodGet: - err = enc.Encode(arr[idx]) - if err != nil { - return fmt.Errorf("encoding config: %v", err) - } - case http.MethodPost: - if ellipses { - valArray, ok := val.([]interface{}) - if !ok { - return fmt.Errorf("final element is not an array") - } - v[part] = append(arr, valArray...) - } else { - v[part] = append(arr, val) - } - case http.MethodPut: - // avoid creation of new slice and a second copy (see - // https://github.com/golang/go/wiki/SliceTricks#insert) - arr = append(arr, nil) - copy(arr[idx+1:], arr[idx:]) - arr[idx] = val - v[part] = arr - case http.MethodPatch: - arr[idx] = val - case http.MethodDelete: - v[part] = append(arr[:idx], arr[idx+1:]...) - default: - return fmt.Errorf("unrecognized method %s", method) - } - break traverseLoop - } - - if i == len(parts)-1 { - switch method { - case http.MethodGet: - err = enc.Encode(v[part]) - if err != nil { - return fmt.Errorf("encoding config: %v", err) - } - case http.MethodPost: - // if the part is an existing list, POST appends to - // it, otherwise it just sets or creates the value - if arr, ok := v[part].([]interface{}); ok { - if ellipses { - valArray, ok := val.([]interface{}) - if !ok { - return fmt.Errorf("final element is not an array") - } - v[part] = append(arr, valArray...) - } else { - v[part] = append(arr, val) - } - } else { - v[part] = val - } - case http.MethodPut: - if _, ok := v[part]; ok { - return fmt.Errorf("[%s] key already exists: %s", path, part) - } - v[part] = val - case http.MethodPatch: - if _, ok := v[part]; !ok { - return fmt.Errorf("[%s] key does not exist: %s", path, part) - } - v[part] = val - case http.MethodDelete: - delete(v, part) - default: - return fmt.Errorf("unrecognized method %s", method) - } - } else { - // if we are "PUTting" a new resource, the key(s) in its path - // might not exist yet; that's OK but we need to make them as - // we go, while we still have a pointer from the level above - if v[part] == nil && method == http.MethodPut { - v[part] = make(map[string]interface{}) - } - ptr = v[part] - } - - case []interface{}: - partInt, err := strconv.Atoi(part) - if err != nil { - return fmt.Errorf("[/%s] invalid array index '%s': %v", - strings.Join(parts[:i+1], "/"), part, err) - } - if partInt < 0 || partInt >= len(v) { - return fmt.Errorf("[/%s] array index out of bounds: %s", - strings.Join(parts[:i+1], "/"), part) - } - ptr = v[partInt] - - default: - return fmt.Errorf("invalid traversal path at: %s", strings.Join(parts[:i+1], "/")) - } - } - - return nil -} - -// RemoveMetaFields removes meta fields like "@id" from a JSON message -// by using a simple regular expression. (An alternate way to do this -// would be to delete them from the raw, map[string]interface{} -// representation as they are indexed, then iterate the index we made -// and add them back after encoding as JSON, but this is simpler.) -func RemoveMetaFields(rawJSON []byte) []byte { - return idRegexp.ReplaceAllFunc(rawJSON, func(in []byte) []byte { - // matches with a comma on both sides (when "@id" property is - // not the first or last in the object) need to keep exactly - // one comma for correct JSON syntax - comma := []byte{','} - if bytes.HasPrefix(in, comma) && bytes.HasSuffix(in, comma) { - return comma - } - return []byte{} - }) -} - -// AdminHandler is like http.Handler except ServeHTTP may return an error. -// -// If any handler encounters an error, it should be returned for proper -// handling. -type AdminHandler interface { - ServeHTTP(http.ResponseWriter, *http.Request) error -} - -// AdminHandlerFunc is a convenience type like http.HandlerFunc. -type AdminHandlerFunc func(http.ResponseWriter, *http.Request) error - -// ServeHTTP implements the Handler interface. -func (f AdminHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error { - return f(w, r) -} - -// APIError is a structured error that every API -// handler should return for consistency in logging -// and client responses. If Message is unset, then -// Err.Error() will be serialized in its place. -type APIError struct { - HTTPStatus int `json:"-"` - Err error `json:"-"` - Message string `json:"error"` -} - -func (e APIError) Error() string { - if e.Err != nil { - return e.Err.Error() - } - return e.Message -} - -// parseAdminListenAddr extracts a singular listen address from either addr -// or defaultAddr, returning the network and the address of the listener. -func parseAdminListenAddr(addr string, defaultAddr string) (NetworkAddress, error) { - input := addr - if input == "" { - input = defaultAddr - } - listenAddr, err := ParseNetworkAddress(input) - if err != nil { - return NetworkAddress{}, fmt.Errorf("parsing listener address: %v", err) - } - if listenAddr.PortRangeSize() != 1 { - return NetworkAddress{}, fmt.Errorf("must be exactly one listener address; cannot listen on: %s", listenAddr) - } - return listenAddr, nil -} - -// decodeBase64DERCert base64-decodes, then DER-decodes, certStr. -func decodeBase64DERCert(certStr string) (*x509.Certificate, error) { - derBytes, err := base64.StdEncoding.DecodeString(certStr) - if err != nil { - return nil, err - } - return x509.ParseCertificate(derBytes) -} - -var ( - // DefaultAdminListen is the address for the local admin - // listener, if none is specified at startup. - DefaultAdminListen = "localhost:2019" - - // DefaultRemoteAdminListen is the address for the remote - // (TLS-authenticated) admin listener, if enabled and not - // specified otherwise. - DefaultRemoteAdminListen = ":2021" - - // DefaultAdminConfig is the default configuration - // for the local administration endpoint. - DefaultAdminConfig = &AdminConfig{ - Listen: DefaultAdminListen, - } -) - -// PIDFile writes a pidfile to the file at filename. It -// will get deleted before the process gracefully exits. -func PIDFile(filename string) error { - pid := []byte(strconv.Itoa(os.Getpid()) + "\n") - err := ioutil.WriteFile(filename, pid, 0600) - if err != nil { - return err - } - pidfile = filename - return nil -} - -// idRegexp is used to match ID fields and their associated values -// in the config. It also matches adjacent commas so that syntax -// can be preserved no matter where in the object the field appears. -// It supports string and most numeric values. -var idRegexp = regexp.MustCompile(`(?m),?\s*"` + idKey + `"\s*:\s*(-?[0-9]+(\.[0-9]+)?|(?U)".*")\s*,?`) - -// pidfile is the name of the pidfile, if any. -var pidfile string - -// errInternalRedir indicates an internal redirect -// and is useful when admin API handlers rewrite -// the request; in that case, authentication and -// authorization needs to happen again for the -// rewritten request. -var errInternalRedir = fmt.Errorf("internal redirect; re-authorization required") - -const ( - rawConfigKey = "config" - idKey = "@id" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -// keep a reference to admin endpoint singletons while they're active -var ( - serverMu sync.Mutex - localAdminServer, remoteAdminServer *http.Server - identityCertCache *certmagic.Cache -) diff --git a/vendor/github.com/caddyserver/caddy/v2/caddy.go b/vendor/github.com/caddyserver/caddy/v2/caddy.go deleted file mode 100644 index ba025b18..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddy.go +++ /dev/null @@ -1,779 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "path" - "path/filepath" - "runtime/debug" - "strconv" - "strings" - "sync" - "time" - - "github.com/caddyserver/caddy/v2/notify" - "github.com/caddyserver/certmagic" - "github.com/google/uuid" - "go.uber.org/zap" -) - -// Config is the top (or beginning) of the Caddy configuration structure. -// Caddy config is expressed natively as a JSON document. If you prefer -// not to work with JSON directly, there are [many config adapters](/docs/config-adapters) -// available that can convert various inputs into Caddy JSON. -// -// Many parts of this config are extensible through the use of Caddy modules. -// Fields which have a json.RawMessage type and which appear as dots (•••) in -// the online docs can be fulfilled by modules in a certain module -// namespace. The docs show which modules can be used in a given place. -// -// Whenever a module is used, its name must be given either inline as part of -// the module, or as the key to the module's value. The docs will make it clear -// which to use. -// -// Generally, all config settings are optional, as it is Caddy convention to -// have good, documented default values. If a parameter is required, the docs -// should say so. -// -// Go programs which are directly building a Config struct value should take -// care to populate the JSON-encodable fields of the struct (i.e. the fields -// with `json` struct tags) if employing the module lifecycle (e.g. Provision -// method calls). -type Config struct { - Admin *AdminConfig `json:"admin,omitempty"` - Logging *Logging `json:"logging,omitempty"` - - // StorageRaw is a storage module that defines how/where Caddy - // stores assets (such as TLS certificates). The default storage - // module is `caddy.storage.file_system` (the local file system), - // and the default path - // [depends on the OS and environment](/docs/conventions#data-directory). - StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"` - - // AppsRaw are the apps that Caddy will load and run. The - // app module name is the key, and the app's config is the - // associated value. - AppsRaw ModuleMap `json:"apps,omitempty" caddy:"namespace="` - - apps map[string]App - storage certmagic.Storage - - cancelFunc context.CancelFunc -} - -// App is a thing that Caddy runs. -type App interface { - Start() error - Stop() error -} - -// Run runs the given config, replacing any existing config. -func Run(cfg *Config) error { - cfgJSON, err := json.Marshal(cfg) - if err != nil { - return err - } - return Load(cfgJSON, true) -} - -// Load loads the given config JSON and runs it only -// if it is different from the current config or -// forceReload is true. -func Load(cfgJSON []byte, forceReload bool) error { - if err := notify.NotifyReloading(); err != nil { - Log().Error("unable to notify reloading to service manager", zap.Error(err)) - } - - defer func() { - if err := notify.NotifyReadiness(); err != nil { - Log().Error("unable to notify readiness to service manager", zap.Error(err)) - } - }() - - return changeConfig(http.MethodPost, "/"+rawConfigKey, cfgJSON, forceReload) -} - -// changeConfig changes the current config (rawCfg) according to the -// method, traversed via the given path, and uses the given input as -// the new value (if applicable; i.e. "DELETE" doesn't have an input). -// If the resulting config is the same as the previous, no reload will -// occur unless forceReload is true. This function is safe for -// concurrent use. -func changeConfig(method, path string, input []byte, forceReload bool) error { - switch method { - case http.MethodGet, - http.MethodHead, - http.MethodOptions, - http.MethodConnect, - http.MethodTrace: - return fmt.Errorf("method not allowed") - } - - currentCfgMu.Lock() - defer currentCfgMu.Unlock() - - err := unsyncedConfigAccess(method, path, input, nil) - if err != nil { - return err - } - - // the mutation is complete, so encode the entire config as JSON - newCfg, err := json.Marshal(rawCfg[rawConfigKey]) - if err != nil { - return APIError{ - HTTPStatus: http.StatusBadRequest, - Err: fmt.Errorf("encoding new config: %v", err), - } - } - - // if nothing changed, no need to do a whole reload unless the client forces it - if !forceReload && bytes.Equal(rawCfgJSON, newCfg) { - Log().Named("admin.api").Info("config is unchanged") - return nil - } - - // find any IDs in this config and index them - idx := make(map[string]string) - err = indexConfigObjects(rawCfg[rawConfigKey], "/"+rawConfigKey, idx) - if err != nil { - return APIError{ - HTTPStatus: http.StatusInternalServerError, - Err: fmt.Errorf("indexing config: %v", err), - } - } - - // load this new config; if it fails, we need to revert to - // our old representation of caddy's actual config - err = unsyncedDecodeAndRun(newCfg, true) - if err != nil { - if len(rawCfgJSON) > 0 { - // restore old config state to keep it consistent - // with what caddy is still running; we need to - // unmarshal it again because it's likely that - // pointers deep in our rawCfg map were modified - var oldCfg interface{} - err2 := json.Unmarshal(rawCfgJSON, &oldCfg) - if err2 != nil { - err = fmt.Errorf("%v; additionally, restoring old config: %v", err, err2) - } - rawCfg[rawConfigKey] = oldCfg - } - - return fmt.Errorf("loading new config: %v", err) - } - - // success, so update our stored copy of the encoded - // config to keep it consistent with what caddy is now - // running (storing an encoded copy is not strictly - // necessary, but avoids an extra json.Marshal for - // each config change) - rawCfgJSON = newCfg - rawCfgIndex = idx - - return nil -} - -// readConfig traverses the current config to path -// and writes its JSON encoding to out. -func readConfig(path string, out io.Writer) error { - currentCfgMu.RLock() - defer currentCfgMu.RUnlock() - return unsyncedConfigAccess(http.MethodGet, path, nil, out) -} - -// indexConfigObjects recursively searches ptr for object fields named -// "@id" and maps that ID value to the full configPath in the index. -// This function is NOT safe for concurrent access; obtain a write lock -// on currentCfgMu. -func indexConfigObjects(ptr interface{}, configPath string, index map[string]string) error { - switch val := ptr.(type) { - case map[string]interface{}: - for k, v := range val { - if k == idKey { - switch idVal := v.(type) { - case string: - index[idVal] = configPath - case float64: // all JSON numbers decode as float64 - index[fmt.Sprintf("%v", idVal)] = configPath - default: - return fmt.Errorf("%s: %s field must be a string or number", configPath, idKey) - } - continue - } - // traverse this object property recursively - err := indexConfigObjects(val[k], path.Join(configPath, k), index) - if err != nil { - return err - } - } - case []interface{}: - // traverse each element of the array recursively - for i := range val { - err := indexConfigObjects(val[i], path.Join(configPath, strconv.Itoa(i)), index) - if err != nil { - return err - } - } - } - - return nil -} - -// unsyncedDecodeAndRun removes any meta fields (like @id tags) -// from cfgJSON, decodes the result into a *Config, and runs -// it as the new config, replacing any other current config. -// It does NOT update the raw config state, as this is a -// lower-level function; most callers will want to use Load -// instead. A write lock on currentCfgMu is required! If -// allowPersist is false, it will not be persisted to disk, -// even if it is configured to. -func unsyncedDecodeAndRun(cfgJSON []byte, allowPersist bool) error { - // remove any @id fields from the JSON, which would cause - // loading to break since the field wouldn't be recognized - strippedCfgJSON := RemoveMetaFields(cfgJSON) - - var newCfg *Config - err := strictUnmarshalJSON(strippedCfgJSON, &newCfg) - if err != nil { - return err - } - - // prevent recursive config loads; that is a user error, and - // although frequent config loads should be safe, we cannot - // guarantee that in the presence of third party plugins, nor - // do we want this error to go unnoticed (we assume it was a - // pulled config if we're not allowed to persist it) - if !allowPersist && - newCfg != nil && - newCfg.Admin != nil && - newCfg.Admin.Config != nil && - newCfg.Admin.Config.LoadRaw != nil && - newCfg.Admin.Config.LoadInterval <= 0 { - return fmt.Errorf("recursive config loading detected: pulled configs cannot pull other configs without positive load_interval") - } - - // run the new config and start all its apps - err = run(newCfg, true) - if err != nil { - return err - } - - // swap old config with the new one - oldCfg := currentCfg - currentCfg = newCfg - - // Stop, Cleanup each old app - unsyncedStop(oldCfg) - - // autosave a non-nil config, if not disabled - if allowPersist && - newCfg != nil && - (newCfg.Admin == nil || - newCfg.Admin.Config == nil || - newCfg.Admin.Config.Persist == nil || - *newCfg.Admin.Config.Persist) { - dir := filepath.Dir(ConfigAutosavePath) - err := os.MkdirAll(dir, 0700) - if err != nil { - Log().Error("unable to create folder for config autosave", - zap.String("dir", dir), - zap.Error(err)) - } else { - err := ioutil.WriteFile(ConfigAutosavePath, cfgJSON, 0600) - if err == nil { - Log().Info("autosaved config (load with --resume flag)", zap.String("file", ConfigAutosavePath)) - } else { - Log().Error("unable to autosave config", - zap.String("file", ConfigAutosavePath), - zap.Error(err)) - } - } - } - - return nil -} - -// run runs newCfg and starts all its apps if -// start is true. If any errors happen, cleanup -// is performed if any modules were provisioned; -// apps that were started already will be stopped, -// so this function should not leak resources if -// an error is returned. However, if no error is -// returned and start == false, you should cancel -// the config if you are not going to start it, -// so that each provisioned module will be -// cleaned up. -// -// This is a low-level function; most callers -// will want to use Run instead, which also -// updates the config's raw state. -func run(newCfg *Config, start bool) error { - // because we will need to roll back any state - // modifications if this function errors, we - // keep a single error value and scope all - // sub-operations to their own functions to - // ensure this error value does not get - // overridden or missed when it should have - // been set by a short assignment - var err error - - if newCfg == nil { - newCfg = new(Config) - } - - // create a context within which to load - // modules - essentially our new config's - // execution environment; be sure that - // cleanup occurs when we return if there - // was an error; if no error, it will get - // cleaned up on next config cycle - ctx, cancel := NewContext(Context{Context: context.Background(), cfg: newCfg}) - defer func() { - if err != nil { - // if there were any errors during startup, - // we should cancel the new context we created - // since the associated config won't be used; - // this will cause all modules that were newly - // provisioned to clean themselves up - cancel() - - // also undo any other state changes we made - if currentCfg != nil { - certmagic.Default.Storage = currentCfg.storage - } - } - }() - newCfg.cancelFunc = cancel // clean up later - - // set up logging before anything bad happens - if newCfg.Logging == nil { - newCfg.Logging = new(Logging) - } - err = newCfg.Logging.openLogs(ctx) - if err != nil { - return err - } - - // start the admin endpoint (and stop any prior one) - if start { - err = replaceLocalAdminServer(newCfg) - if err != nil { - return fmt.Errorf("starting caddy administration endpoint: %v", err) - } - } - - // prepare the new config for use - newCfg.apps = make(map[string]App) - - // set up global storage and make it CertMagic's default storage, too - err = func() error { - if newCfg.StorageRaw != nil { - val, err := ctx.LoadModule(newCfg, "StorageRaw") - if err != nil { - return fmt.Errorf("loading storage module: %v", err) - } - stor, err := val.(StorageConverter).CertMagicStorage() - if err != nil { - return fmt.Errorf("creating storage value: %v", err) - } - newCfg.storage = stor - } - - if newCfg.storage == nil { - newCfg.storage = DefaultStorage - } - certmagic.Default.Storage = newCfg.storage - - return nil - }() - if err != nil { - return err - } - - // Load and Provision each app and their submodules - err = func() error { - for appName := range newCfg.AppsRaw { - if _, err := ctx.App(appName); err != nil { - return err - } - } - return nil - }() - if err != nil { - return err - } - - if !start { - return nil - } - - // Start - err = func() error { - var started []string - for name, a := range newCfg.apps { - err := a.Start() - if err != nil { - // an app failed to start, so we need to stop - // all other apps that were already started - for _, otherAppName := range started { - err2 := newCfg.apps[otherAppName].Stop() - if err2 != nil { - err = fmt.Errorf("%v; additionally, aborting app %s: %v", - err, otherAppName, err2) - } - } - return fmt.Errorf("%s app module: start: %v", name, err) - } - started = append(started, name) - } - return nil - }() - if err != nil { - return err - } - - // now that the user's config is running, finish setting up anything else, - // such as remote admin endpoint, config loader, etc. - return finishSettingUp(ctx, newCfg) -} - -// finishSettingUp should be run after all apps have successfully started. -func finishSettingUp(ctx Context, cfg *Config) error { - // establish this server's identity (only after apps are loaded - // so that cert management of this endpoint doesn't prevent user's - // servers from starting which likely also use HTTP/HTTPS ports; - // but before remote management which may depend on these creds) - err := manageIdentity(ctx, cfg) - if err != nil { - return fmt.Errorf("provisioning remote admin endpoint: %v", err) - } - - // replace any remote admin endpoint - err = replaceRemoteAdminServer(ctx, cfg) - if err != nil { - return fmt.Errorf("provisioning remote admin endpoint: %v", err) - } - - // if dynamic config is requested, set that up and run it - if cfg != nil && cfg.Admin != nil && cfg.Admin.Config != nil && cfg.Admin.Config.LoadRaw != nil { - val, err := ctx.LoadModule(cfg.Admin.Config, "LoadRaw") - if err != nil { - return fmt.Errorf("loading config loader module: %s", err) - } - runLoadedConfig := func(config []byte) { - Log().Info("applying dynamically-loaded config", zap.String("loader_module", val.(Module).CaddyModule().ID.Name()), zap.Int("pull_interval", int(cfg.Admin.Config.LoadInterval))) - currentCfgMu.Lock() - err := unsyncedDecodeAndRun(config, false) - currentCfgMu.Unlock() - if err == nil { - Log().Info("dynamically-loaded config applied successfully") - } else { - Log().Error("running dynamically-loaded config failed", zap.Error(err)) - } - } - if cfg.Admin.Config.LoadInterval > 0 { - go func() { - select { - // if LoadInterval is positive, will wait for the interval and then run with new config - case <-time.After(time.Duration(cfg.Admin.Config.LoadInterval)): - loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx) - if err != nil { - Log().Error("loading dynamic config failed", zap.Error(err)) - return - } - runLoadedConfig(loadedConfig) - case <-ctx.Done(): - return - } - }() - } else { - // if no LoadInterval is provided, will load config synchronously - loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx) - if err != nil { - return fmt.Errorf("loading dynamic config from %T: %v", val, err) - } - // do this in a goroutine so current config can finish being loaded; otherwise deadlock - go runLoadedConfig(loadedConfig) - } - - } - - return nil -} - -// ConfigLoader is a type that can load a Caddy config. The -// returned config must be valid Caddy JSON. -type ConfigLoader interface { - LoadConfig(Context) ([]byte, error) -} - -// Stop stops running the current configuration. -// It is the antithesis of Run(). This function -// will log any errors that occur during the -// stopping of individual apps and continue to -// stop the others. Stop should only be called -// if not replacing with a new config. -func Stop() error { - currentCfgMu.Lock() - defer currentCfgMu.Unlock() - unsyncedStop(currentCfg) - currentCfg = nil - rawCfgJSON = nil - rawCfgIndex = nil - rawCfg[rawConfigKey] = nil - return nil -} - -// unsyncedStop stops cfg from running, but has -// no locking around cfg. It is a no-op if cfg is -// nil. If any app returns an error when stopping, -// it is logged and the function continues stopping -// the next app. This function assumes all apps in -// cfg were successfully started first. -func unsyncedStop(cfg *Config) { - if cfg == nil { - return - } - - // stop each app - for name, a := range cfg.apps { - err := a.Stop() - if err != nil { - log.Printf("[ERROR] stop %s: %v", name, err) - } - } - - // clean up all modules - cfg.cancelFunc() -} - -// Validate loads, provisions, and validates -// cfg, but does not start running it. -func Validate(cfg *Config) error { - err := run(cfg, false) - if err == nil { - cfg.cancelFunc() // call Cleanup on all modules - } - return err -} - -// exitProcess exits the process as gracefully as possible, -// but it always exits, even if there are errors doing so. -// It stops all apps, cleans up external locks, removes any -// PID file, and shuts down admin endpoint(s) in a goroutine. -// Errors are logged along the way, and an appropriate exit -// code is emitted. -func exitProcess(logger *zap.Logger) { - if logger == nil { - logger = Log() - } - logger.Warn("exiting; byeee!! 👋") - - exitCode := ExitCodeSuccess - - // stop all apps - if err := Stop(); err != nil { - logger.Error("failed to stop apps", zap.Error(err)) - exitCode = ExitCodeFailedQuit - } - - // clean up certmagic locks - certmagic.CleanUpOwnLocks(logger) - - // remove pidfile - if pidfile != "" { - err := os.Remove(pidfile) - if err != nil { - logger.Error("cleaning up PID file:", - zap.String("pidfile", pidfile), - zap.Error(err)) - exitCode = ExitCodeFailedQuit - } - } - - // shut down admin endpoint(s) in goroutines so that - // if this function was called from an admin handler, - // it has a chance to return gracefully - // use goroutine so that we can finish responding to API request - go func() { - defer func() { - logger = logger.With(zap.Int("exit_code", exitCode)) - if exitCode == ExitCodeSuccess { - logger.Info("shutdown complete") - } else { - logger.Error("unclean shutdown") - } - os.Exit(exitCode) - }() - - if remoteAdminServer != nil { - err := stopAdminServer(remoteAdminServer) - if err != nil { - exitCode = ExitCodeFailedQuit - logger.Error("failed to stop remote admin server gracefully", zap.Error(err)) - } - } - if localAdminServer != nil { - err := stopAdminServer(localAdminServer) - if err != nil { - exitCode = ExitCodeFailedQuit - logger.Error("failed to stop local admin server gracefully", zap.Error(err)) - } - } - }() -} - -// Duration can be an integer or a string. An integer is -// interpreted as nanoseconds. If a string, it is a Go -// time.Duration value such as `300ms`, `1.5h`, or `2h45m`; -// valid units are `ns`, `us`/`µs`, `ms`, `s`, `m`, `h`, and `d`. -type Duration time.Duration - -// UnmarshalJSON satisfies json.Unmarshaler. -func (d *Duration) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return io.EOF - } - var dur time.Duration - var err error - if b[0] == byte('"') && b[len(b)-1] == byte('"') { - dur, err = ParseDuration(strings.Trim(string(b), `"`)) - } else { - err = json.Unmarshal(b, &dur) - } - *d = Duration(dur) - return err -} - -// ParseDuration parses a duration string, adding -// support for the "d" unit meaning number of days, -// where a day is assumed to be 24h. -func ParseDuration(s string) (time.Duration, error) { - var inNumber bool - var numStart int - for i := 0; i < len(s); i++ { - ch := s[i] - if ch == 'd' { - daysStr := s[numStart:i] - days, err := strconv.ParseFloat(daysStr, 64) - if err != nil { - return 0, err - } - hours := days * 24.0 - hoursStr := strconv.FormatFloat(hours, 'f', -1, 64) - s = s[:numStart] + hoursStr + "h" + s[i+1:] - i-- - continue - } - if !inNumber { - numStart = i - } - inNumber = (ch >= '0' && ch <= '9') || ch == '.' || ch == '-' || ch == '+' - } - return time.ParseDuration(s) -} - -// InstanceID returns the UUID for this instance, and generates one if it -// does not already exist. The UUID is stored in the local data directory, -// regardless of storage configuration, since each instance is intended to -// have its own unique ID. -func InstanceID() (uuid.UUID, error) { - uuidFilePath := filepath.Join(AppDataDir(), "instance.uuid") - uuidFileBytes, err := ioutil.ReadFile(uuidFilePath) - if os.IsNotExist(err) { - uuid, err := uuid.NewRandom() - if err != nil { - return uuid, err - } - err = ioutil.WriteFile(uuidFilePath, []byte(uuid.String()), 0600) - return uuid, err - } else if err != nil { - return [16]byte{}, err - } - return uuid.ParseBytes(uuidFileBytes) -} - -// GoModule returns the build info of this Caddy -// build from debug.BuildInfo (requires Go modules). -// If no version information is available, a non-nil -// value will still be returned, but with an -// unknown version. -func GoModule() *debug.Module { - var mod debug.Module - return goModule(&mod) -} - -// goModule holds the actual implementation of GoModule. -// Allocating debug.Module in GoModule() and passing a -// reference to goModule enables mid-stack inlining. -func goModule(mod *debug.Module) *debug.Module { - mod.Version = "unknown" - bi, ok := debug.ReadBuildInfo() - if ok { - mod.Path = bi.Main.Path - // The recommended way to build Caddy involves - // creating a separate main module, which - // TODO: track related Go issue: https://github.com/golang/go/issues/29228 - // once that issue is fixed, we should just be able to use bi.Main... hopefully. - for _, dep := range bi.Deps { - if dep.Path == ImportPath { - return dep - } - } - return &bi.Main - } - return mod -} - -// CtxKey is a value type for use with context.WithValue. -type CtxKey string - -// This group of variables pertains to the current configuration. -var ( - // currentCfgMu protects everything in this var block. - currentCfgMu sync.RWMutex - - // currentCfg is the currently-running configuration. - currentCfg *Config - - // rawCfg is the current, generic-decoded configuration; - // we initialize it as a map with one field ("config") - // to maintain parity with the API endpoint and to avoid - // the special case of having to access/mutate the variable - // directly without traversing into it. - rawCfg = map[string]interface{}{ - rawConfigKey: nil, - } - - // rawCfgJSON is the JSON-encoded form of rawCfg. Keeping - // this around avoids an extra Marshal call during changes. - rawCfgJSON []byte - - // rawCfgIndex is the map of user-assigned ID to expanded - // path, for converting /id/ paths to /config/ paths. - rawCfgIndex map[string]string -) - -// ImportPath is the package import path for Caddy core. -const ImportPath = "github.com/caddyserver/caddy/v2" diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/adapter.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/adapter.go deleted file mode 100644 index 5b80df3f..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/adapter.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "bytes" - "encoding/json" - "fmt" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" -) - -// Adapter adapts Caddyfile to Caddy JSON. -type Adapter struct { - ServerType ServerType -} - -// Adapt converts the Caddyfile config in body to Caddy JSON. -func (a Adapter) Adapt(body []byte, options map[string]interface{}) ([]byte, []caddyconfig.Warning, error) { - if a.ServerType == nil { - return nil, nil, fmt.Errorf("no server type") - } - if options == nil { - options = make(map[string]interface{}) - } - - filename, _ := options["filename"].(string) - if filename == "" { - filename = "Caddyfile" - } - - serverBlocks, err := Parse(filename, body) - if err != nil { - return nil, nil, err - } - - cfg, warnings, err := a.ServerType.Setup(serverBlocks, options) - if err != nil { - return nil, warnings, err - } - - // lint check: see if input was properly formatted; sometimes messy files files parse - // successfully but result in logical errors (the Caddyfile is a bad format, I'm sorry) - if warning, different := formattingDifference(filename, body); different { - warnings = append(warnings, warning) - } - - result, err := json.Marshal(cfg) - - return result, warnings, err -} - -// formattingDifference returns a warning and true if the formatted version -// is any different from the input; empty warning and false otherwise. -// TODO: also perform this check on imported files -func formattingDifference(filename string, body []byte) (caddyconfig.Warning, bool) { - // replace windows-style newlines to normalize comparison - normalizedBody := bytes.Replace(body, []byte("\r\n"), []byte("\n"), -1) - - formatted := Format(normalizedBody) - if bytes.Equal(formatted, normalizedBody) { - return caddyconfig.Warning{}, false - } - - // find where the difference is - line := 1 - for i, ch := range normalizedBody { - if i >= len(formatted) || ch != formatted[i] { - break - } - if ch == '\n' { - line++ - } - } - return caddyconfig.Warning{ - File: filename, - Line: line, - Message: "input is not formatted with 'caddy fmt'", - }, true -} - -// Unmarshaler is a type that can unmarshal -// Caddyfile tokens to set itself up for a -// JSON encoding. The goal of an unmarshaler -// is not to set itself up for actual use, -// but to set itself up for being marshaled -// into JSON. Caddyfile-unmarshaled values -// will not be used directly; they will be -// encoded as JSON and then used from that. -// Implementations must be able to support -// multiple segments (instances of their -// directive or batch of tokens); typically -// this means wrapping all token logic in -// a loop: `for d.Next() { ... }`. -type Unmarshaler interface { - UnmarshalCaddyfile(d *Dispenser) error -} - -// ServerType is a type that can evaluate a Caddyfile and set up a caddy config. -type ServerType interface { - // Setup takes the server blocks which - // contain tokens, as well as options - // (e.g. CLI flags) and creates a Caddy - // config, along with any warnings or - // an error. - Setup([]ServerBlock, map[string]interface{}) (*caddy.Config, []caddyconfig.Warning, error) -} - -// UnmarshalModule instantiates a module with the given ID and invokes -// UnmarshalCaddyfile on the new value using the immediate next segment -// of d as input. In other words, d's next token should be the first -// token of the module's Caddyfile input. -// -// This function is used when the next segment of Caddyfile tokens -// belongs to another Caddy module. The returned value is often -// type-asserted to the module's associated type for practical use -// when setting up a config. -func UnmarshalModule(d *Dispenser, moduleID string) (Unmarshaler, error) { - mod, err := caddy.GetModule(moduleID) - if err != nil { - return nil, d.Errf("getting module named '%s': %v", moduleID, err) - } - inst := mod.New() - unm, ok := inst.(Unmarshaler) - if !ok { - return nil, d.Errf("module %s is not a Caddyfile unmarshaler; is %T", mod.ID, inst) - } - err = unm.UnmarshalCaddyfile(d.NewFromNextSegment()) - if err != nil { - return nil, err - } - return unm, nil -} - -// Interface guard -var _ caddyconfig.Adapter = (*Adapter)(nil) diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/dispenser.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/dispenser.go deleted file mode 100644 index fa7f5e75..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/dispenser.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "errors" - "fmt" - "io" - "log" - "strings" -) - -// Dispenser is a type that dispenses tokens, similarly to a lexer, -// except that it can do so with some notion of structure. An empty -// Dispenser is invalid; call NewDispenser to make a proper instance. -type Dispenser struct { - tokens []Token - cursor int - nesting int -} - -// NewDispenser returns a Dispenser filled with the given tokens. -func NewDispenser(tokens []Token) *Dispenser { - return &Dispenser{ - tokens: tokens, - cursor: -1, - } -} - -// NewTestDispenser parses input into tokens and creates a new -// Dispenser for test purposes only; any errors are fatal. -func NewTestDispenser(input string) *Dispenser { - tokens, err := allTokens("Testfile", []byte(input)) - if err != nil && err != io.EOF { - log.Fatalf("getting all tokens from input: %v", err) - } - return NewDispenser(tokens) -} - -// Next loads the next token. Returns true if a token -// was loaded; false otherwise. If false, all tokens -// have been consumed. -func (d *Dispenser) Next() bool { - if d.cursor < len(d.tokens)-1 { - d.cursor++ - return true - } - return false -} - -// Prev moves to the previous token. It does the inverse -// of Next(), except this function may decrement the cursor -// to -1 so that the next call to Next() points to the -// first token; this allows dispensing to "start over". This -// method returns true if the cursor ends up pointing to a -// valid token. -func (d *Dispenser) Prev() bool { - if d.cursor > -1 { - d.cursor-- - return d.cursor > -1 - } - return false -} - -// NextArg loads the next token if it is on the same -// line and if it is not a block opening (open curly -// brace). Returns true if an argument token was -// loaded; false otherwise. If false, all tokens on -// the line have been consumed except for potentially -// a block opening. It handles imported tokens -// correctly. -func (d *Dispenser) NextArg() bool { - if !d.nextOnSameLine() { - return false - } - if d.Val() == "{" { - // roll back; a block opening is not an argument - d.cursor-- - return false - } - return true -} - -// nextOnSameLine advances the cursor if the next -// token is on the same line of the same file. -func (d *Dispenser) nextOnSameLine() bool { - if d.cursor < 0 { - d.cursor++ - return true - } - if d.cursor >= len(d.tokens) { - return false - } - if d.cursor < len(d.tokens)-1 && - d.tokens[d.cursor].File == d.tokens[d.cursor+1].File && - d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) == d.tokens[d.cursor+1].Line { - d.cursor++ - return true - } - return false -} - -// NextLine loads the next token only if it is not on the same -// line as the current token, and returns true if a token was -// loaded; false otherwise. If false, there is not another token -// or it is on the same line. It handles imported tokens correctly. -func (d *Dispenser) NextLine() bool { - if d.cursor < 0 { - d.cursor++ - return true - } - if d.cursor >= len(d.tokens) { - return false - } - if d.cursor < len(d.tokens)-1 && - (d.tokens[d.cursor].File != d.tokens[d.cursor+1].File || - d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) < d.tokens[d.cursor+1].Line) { - d.cursor++ - return true - } - return false -} - -// NextBlock can be used as the condition of a for loop -// to load the next token as long as it opens a block or -// is already in a block nested more than initialNestingLevel. -// In other words, a loop over NextBlock() will iterate -// all tokens in the block assuming the next token is an -// open curly brace, until the matching closing brace. -// The open and closing brace tokens for the outer-most -// block will be consumed internally and omitted from -// the iteration. -// -// Proper use of this method looks like this: -// -// for nesting := d.Nesting(); d.NextBlock(nesting); { -// } -// -// However, in simple cases where it is known that the -// Dispenser is new and has not already traversed state -// by a loop over NextBlock(), this will do: -// -// for d.NextBlock(0) { -// } -// -// As with other token parsing logic, a loop over -// NextBlock() should be contained within a loop over -// Next(), as it is usually prudent to skip the initial -// token. -func (d *Dispenser) NextBlock(initialNestingLevel int) bool { - if d.nesting > initialNestingLevel { - if !d.Next() { - return false // should be EOF error - } - if d.Val() == "}" && !d.nextOnSameLine() { - d.nesting-- - } else if d.Val() == "{" && !d.nextOnSameLine() { - d.nesting++ - } - return d.nesting > initialNestingLevel - } - if !d.nextOnSameLine() { // block must open on same line - return false - } - if d.Val() != "{" { - d.cursor-- // roll back if not opening brace - return false - } - d.Next() // consume open curly brace - if d.Val() == "}" { - return false // open and then closed right away - } - d.nesting++ - return true -} - -// Nesting returns the current nesting level. Necessary -// if using NextBlock() -func (d *Dispenser) Nesting() int { - return d.nesting -} - -// Val gets the text of the current token. If there is no token -// loaded, it returns empty string. -func (d *Dispenser) Val() string { - if d.cursor < 0 || d.cursor >= len(d.tokens) { - return "" - } - return d.tokens[d.cursor].Text -} - -// Line gets the line number of the current token. -// If there is no token loaded, it returns 0. -func (d *Dispenser) Line() int { - if d.cursor < 0 || d.cursor >= len(d.tokens) { - return 0 - } - return d.tokens[d.cursor].Line -} - -// File gets the filename where the current token originated. -func (d *Dispenser) File() string { - if d.cursor < 0 || d.cursor >= len(d.tokens) { - return "" - } - return d.tokens[d.cursor].File -} - -// Args is a convenience function that loads the next arguments -// (tokens on the same line) into an arbitrary number of strings -// pointed to in targets. If there are not enough argument tokens -// available to fill targets, false is returned and the remaining -// targets are left unchanged. If all the targets are filled, -// then true is returned. -func (d *Dispenser) Args(targets ...*string) bool { - for i := 0; i < len(targets); i++ { - if !d.NextArg() { - return false - } - *targets[i] = d.Val() - } - return true -} - -// AllArgs is like Args, but if there are more argument tokens -// available than there are targets, false is returned. The -// number of available argument tokens must match the number of -// targets exactly to return true. -func (d *Dispenser) AllArgs(targets ...*string) bool { - if !d.Args(targets...) { - return false - } - if d.NextArg() { - d.Prev() - return false - } - return true -} - -// RemainingArgs loads any more arguments (tokens on the same line) -// into a slice and returns them. Open curly brace tokens also indicate -// the end of arguments, and the curly brace is not included in -// the return value nor is it loaded. -func (d *Dispenser) RemainingArgs() []string { - var args []string - for d.NextArg() { - args = append(args, d.Val()) - } - return args -} - -// NewFromNextSegment returns a new dispenser with a copy of -// the tokens from the current token until the end of the -// "directive" whether that be to the end of the line or -// the end of a block that starts at the end of the line; -// in other words, until the end of the segment. -func (d *Dispenser) NewFromNextSegment() *Dispenser { - return NewDispenser(d.NextSegment()) -} - -// NextSegment returns a copy of the tokens from the current -// token until the end of the line or block that starts at -// the end of the line. -func (d *Dispenser) NextSegment() Segment { - tkns := Segment{d.Token()} - for d.NextArg() { - tkns = append(tkns, d.Token()) - } - var openedBlock bool - for nesting := d.Nesting(); d.NextBlock(nesting); { - if !openedBlock { - // because NextBlock() consumes the initial open - // curly brace, we rewind here to append it, since - // our case is special in that we want the new - // dispenser to have all the tokens including - // surrounding curly braces - d.Prev() - tkns = append(tkns, d.Token()) - d.Next() - openedBlock = true - } - tkns = append(tkns, d.Token()) - } - if openedBlock { - // include closing brace - tkns = append(tkns, d.Token()) - - // do not consume the closing curly brace; the - // next iteration of the enclosing loop will - // call Next() and consume it - } - return tkns -} - -// Token returns the current token. -func (d *Dispenser) Token() Token { - if d.cursor < 0 || d.cursor >= len(d.tokens) { - return Token{} - } - return d.tokens[d.cursor] -} - -// Reset sets d's cursor to the beginning, as -// if this was a new and unused dispenser. -func (d *Dispenser) Reset() { - d.cursor = -1 - d.nesting = 0 -} - -// ArgErr returns an argument error, meaning that another -// argument was expected but not found. In other words, -// a line break or open curly brace was encountered instead of -// an argument. -func (d *Dispenser) ArgErr() error { - if d.Val() == "{" { - return d.Err("Unexpected token '{', expecting argument") - } - return d.Errf("Wrong argument count or unexpected line ending after '%s'", d.Val()) -} - -// SyntaxErr creates a generic syntax error which explains what was -// found and what was expected. -func (d *Dispenser) SyntaxErr(expected string) error { - msg := fmt.Sprintf("%s:%d - Syntax error: Unexpected token '%s', expecting '%s'", d.File(), d.Line(), d.Val(), expected) - return errors.New(msg) -} - -// EOFErr returns an error indicating that the dispenser reached -// the end of the input when searching for the next token. -func (d *Dispenser) EOFErr() error { - return d.Errf("Unexpected EOF") -} - -// Err generates a custom parse-time error with a message of msg. -func (d *Dispenser) Err(msg string) error { - return d.Errf(msg) -} - -// Errf is like Err, but for formatted error messages -func (d *Dispenser) Errf(format string, args ...interface{}) error { - err := fmt.Errorf(format, args...) - return fmt.Errorf("%s:%d - Error during parsing: %w", d.File(), d.Line(), err) -} - -// Delete deletes the current token and returns the updated slice -// of tokens. The cursor is not advanced to the next token. -// Because deletion modifies the underlying slice, this method -// should only be called if you have access to the original slice -// of tokens and/or are using the slice of tokens outside this -// Dispenser instance. If you do not re-assign the slice with the -// return value of this method, inconsistencies in the token -// array will become apparent (or worse, hide from you like they -// did me for 3 and a half freaking hours late one night). -func (d *Dispenser) Delete() []Token { - if d.cursor >= 0 && d.cursor <= len(d.tokens)-1 { - d.tokens = append(d.tokens[:d.cursor], d.tokens[d.cursor+1:]...) - d.cursor-- - } - return d.tokens -} - -// numLineBreaks counts how many line breaks are in the token -// value given by the token index tknIdx. It returns 0 if the -// token does not exist or there are no line breaks. -func (d *Dispenser) numLineBreaks(tknIdx int) int { - if tknIdx < 0 || tknIdx >= len(d.tokens) { - return 0 - } - return strings.Count(d.tokens[tknIdx].Text, "\n") -} - -// isNewLine determines whether the current token is on a different -// line (higher line number) than the previous token. It handles imported -// tokens correctly. If there isn't a previous token, it returns true. -func (d *Dispenser) isNewLine() bool { - if d.cursor < 1 { - return true - } - if d.cursor > len(d.tokens)-1 { - return false - } - return d.tokens[d.cursor-1].File != d.tokens[d.cursor].File || - d.tokens[d.cursor-1].Line+d.numLineBreaks(d.cursor-1) < d.tokens[d.cursor].Line -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter.go deleted file mode 100644 index cb0033f7..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "bytes" - "io" - "unicode" -) - -// Format formats the input Caddyfile to a standard, nice-looking -// appearance. It works by reading each rune of the input and taking -// control over all the bracing and whitespace that is written; otherwise, -// words, comments, placeholders, and escaped characters are all treated -// literally and written as they appear in the input. -func Format(input []byte) []byte { - input = bytes.TrimSpace(input) - - out := new(bytes.Buffer) - rdr := bytes.NewReader(input) - - var ( - last rune // the last character that was written to the result - - space = true // whether current/previous character was whitespace (beginning of input counts as space) - beginningOfLine = true // whether we are at beginning of line - - openBrace bool // whether current word/token is or started with open curly brace - openBraceWritten bool // if openBrace, whether that brace was written or not - openBraceSpace bool // whether there was a non-newline space before open brace - - newLines int // count of newlines consumed - - comment bool // whether we're in a comment - quoted bool // whether we're in a quoted segment - escaped bool // whether current char is escaped - - nesting int // indentation level - ) - - write := func(ch rune) { - out.WriteRune(ch) - last = ch - } - - indent := func() { - for tabs := nesting; tabs > 0; tabs-- { - write('\t') - } - } - - nextLine := func() { - write('\n') - beginningOfLine = true - } - - for { - ch, _, err := rdr.ReadRune() - if err != nil { - if err == io.EOF { - break - } - panic(err) - } - - if comment { - if ch == '\n' { - comment = false - space = true - nextLine() - continue - } else { - write(ch) - continue - } - } - - if !escaped && ch == '\\' { - if space { - write(' ') - space = false - } - write(ch) - escaped = true - continue - } - - if escaped { - write(ch) - escaped = false - continue - } - - if quoted { - if ch == '"' { - quoted = false - } - write(ch) - continue - } - - if space && ch == '"' { - quoted = true - } - - if unicode.IsSpace(ch) { - space = true - if ch == '\n' { - newLines++ - } - continue - } - spacePrior := space - space = false - - ////////////////////////////////////////////////////////// - // I find it helpful to think of the formatting loop in two - // main sections; by the time we reach this point, we - // know we are in a "regular" part of the file: we know - // the character is not a space, not in a literal segment - // like a comment or quoted, it's not escaped, etc. - ////////////////////////////////////////////////////////// - - if ch == '#' { - comment = true - } - - if openBrace && spacePrior && !openBraceWritten { - if nesting == 0 && last == '}' { - nextLine() - nextLine() - } - - openBrace = false - if beginningOfLine { - indent() - } else if !openBraceSpace { - write(' ') - } - write('{') - openBraceWritten = true - nextLine() - newLines = 0 - nesting++ - } - - switch { - case ch == '{': - openBrace = true - openBraceWritten = false - openBraceSpace = spacePrior && !beginningOfLine - if openBraceSpace { - write(' ') - } - continue - - case ch == '}' && (spacePrior || !openBrace): - if last != '\n' { - nextLine() - } - if nesting > 0 { - nesting-- - } - indent() - write('}') - newLines = 0 - continue - } - - if newLines > 2 { - newLines = 2 - } - for i := 0; i < newLines; i++ { - nextLine() - } - newLines = 0 - if beginningOfLine { - indent() - } - if nesting == 0 && last == '}' && beginningOfLine { - nextLine() - nextLine() - } - - if !beginningOfLine && spacePrior { - write(' ') - } - - if openBrace && !openBraceWritten { - write('{') - openBraceWritten = true - } - write(ch) - - beginningOfLine = false - } - - // the Caddyfile does not need any leading or trailing spaces, but... - trimmedResult := bytes.TrimSpace(out.Bytes()) - - // ...Caddyfiles should, however, end with a newline because - // newlines are significant to the syntax of the file - return append(trimmedResult, '\n') -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter_fuzz.go deleted file mode 100644 index b1bbd84a..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter_fuzz.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build gofuzz - -package caddyfile - -import "bytes" - -func FuzzFormat(input []byte) int { - formatted := Format(input) - if bytes.Equal(formatted, Format(formatted)) { - return 1 - } - return 0 -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/importgraph.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/importgraph.go deleted file mode 100644 index 659c3680..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/importgraph.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "fmt" -) - -type adjacency map[string][]string - -type importGraph struct { - nodes map[string]bool - edges adjacency -} - -func (i *importGraph) addNode(name string) { - if i.nodes == nil { - i.nodes = make(map[string]bool) - } - if _, exists := i.nodes[name]; exists { - return - } - i.nodes[name] = true -} -func (i *importGraph) addNodes(names []string) { - for _, name := range names { - i.addNode(name) - } -} - -func (i *importGraph) removeNode(name string) { - delete(i.nodes, name) -} -func (i *importGraph) removeNodes(names []string) { - for _, name := range names { - i.removeNode(name) - } -} - -func (i *importGraph) addEdge(from, to string) error { - if !i.exists(from) || !i.exists(to) { - return fmt.Errorf("one of the nodes does not exist") - } - - if i.willCycle(to, from) { - return fmt.Errorf("a cycle of imports exists between %s and %s", from, to) - } - - if i.areConnected(from, to) { - // if connected, there's nothing to do - return nil - } - - if i.nodes == nil { - i.nodes = make(map[string]bool) - } - if i.edges == nil { - i.edges = make(adjacency) - } - - i.edges[from] = append(i.edges[from], to) - return nil -} -func (i *importGraph) addEdges(from string, tos []string) error { - for _, to := range tos { - err := i.addEdge(from, to) - if err != nil { - return err - } - } - return nil -} - -func (i *importGraph) areConnected(from, to string) bool { - al, ok := i.edges[from] - if !ok { - return false - } - for _, v := range al { - if v == to { - return true - } - } - return false -} - -func (i *importGraph) willCycle(from, to string) bool { - collector := make(map[string]bool) - - var visit func(string) - visit = func(start string) { - if !collector[start] { - collector[start] = true - for _, v := range i.edges[start] { - visit(v) - } - } - } - - for _, v := range i.edges[from] { - visit(v) - } - for k := range collector { - if to == k { - return true - } - } - - return false -} - -func (i *importGraph) exists(key string) bool { - _, exists := i.nodes[key] - return exists -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer.go deleted file mode 100644 index f4da2391..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "bufio" - "bytes" - "io" - "unicode" -) - -type ( - // lexer is a utility which can get values, token by - // token, from a Reader. A token is a word, and tokens - // are separated by whitespace. A word can be enclosed - // in quotes if it contains whitespace. - lexer struct { - reader *bufio.Reader - token Token - line int - skippedLines int - } - - // Token represents a single parsable unit. - Token struct { - File string - Line int - Text string - inSnippet bool - snippetName string - } -) - -// load prepares the lexer to scan an input for tokens. -// It discards any leading byte order mark. -func (l *lexer) load(input io.Reader) error { - l.reader = bufio.NewReader(input) - l.line = 1 - - // discard byte order mark, if present - firstCh, _, err := l.reader.ReadRune() - if err != nil { - return err - } - if firstCh != 0xFEFF { - err := l.reader.UnreadRune() - if err != nil { - return err - } - } - - return nil -} - -// next loads the next token into the lexer. -// A token is delimited by whitespace, unless -// the token starts with a quotes character (") -// in which case the token goes until the closing -// quotes (the enclosing quotes are not included). -// Inside quoted strings, quotes may be escaped -// with a preceding \ character. No other chars -// may be escaped. The rest of the line is skipped -// if a "#" character is read in. Returns true if -// a token was loaded; false otherwise. -func (l *lexer) next() bool { - var val []rune - var comment, quoted, btQuoted, escaped bool - - makeToken := func() bool { - l.token.Text = string(val) - return true - } - - for { - ch, _, err := l.reader.ReadRune() - if err != nil { - if len(val) > 0 { - return makeToken() - } - if err == io.EOF { - return false - } - panic(err) - } - - if !escaped && !btQuoted && ch == '\\' { - escaped = true - continue - } - - if quoted || btQuoted { - if quoted && escaped { - // all is literal in quoted area, - // so only escape quotes - if ch != '"' { - val = append(val, '\\') - } - escaped = false - } else { - if quoted && ch == '"' { - return makeToken() - } - if btQuoted && ch == '`' { - return makeToken() - } - } - if ch == '\n' { - l.line += 1 + l.skippedLines - l.skippedLines = 0 - } - val = append(val, ch) - continue - } - - if unicode.IsSpace(ch) { - if ch == '\r' { - continue - } - if ch == '\n' { - if escaped { - l.skippedLines++ - escaped = false - } else { - l.line += 1 + l.skippedLines - l.skippedLines = 0 - } - comment = false - } - if len(val) > 0 { - return makeToken() - } - continue - } - - if ch == '#' && len(val) == 0 { - comment = true - } - if comment { - continue - } - - if len(val) == 0 { - l.token = Token{Line: l.line} - if ch == '"' { - quoted = true - continue - } - if ch == '`' { - btQuoted = true - continue - } - } - - if escaped { - val = append(val, '\\') - escaped = false - } - - val = append(val, ch) - } -} - -// Tokenize takes bytes as input and lexes it into -// a list of tokens that can be parsed as a Caddyfile. -// Also takes a filename to fill the token's File as -// the source of the tokens, which is important to -// determine relative paths for `import` directives. -func Tokenize(input []byte, filename string) ([]Token, error) { - l := lexer{} - if err := l.load(bytes.NewReader(input)); err != nil { - return nil, err - } - var tokens []Token - for l.next() { - l.token.File = filename - tokens = append(tokens, l.token) - } - return tokens, nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer_fuzz.go deleted file mode 100644 index 29348446..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer_fuzz.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build gofuzz - -package caddyfile - -func FuzzTokenize(input []byte) int { - tokens, err := Tokenize(input, "Caddyfile") - if err != nil { - return 0 - } - if len(tokens) == 0 { - return -1 - } - return 1 -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/parse.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/parse.go deleted file mode 100644 index c0f60794..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/parse.go +++ /dev/null @@ -1,616 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2" -) - -// Parse parses the input just enough to group tokens, in -// order, by server block. No further parsing is performed. -// Server blocks are returned in the order in which they appear. -// Directives that do not appear in validDirectives will cause -// an error. If you do not want to check for valid directives, -// pass in nil instead. -// -// Environment variables in {$ENVIRONMENT_VARIABLE} notation -// will be replaced before parsing begins. -func Parse(filename string, input []byte) ([]ServerBlock, error) { - tokens, err := allTokens(filename, input) - if err != nil { - return nil, err - } - p := parser{ - Dispenser: NewDispenser(tokens), - importGraph: importGraph{ - nodes: make(map[string]bool), - edges: make(adjacency), - }, - } - return p.parseAll() -} - -// replaceEnvVars replaces all occurrences of environment variables. -func replaceEnvVars(input []byte) ([]byte, error) { - var offset int - for { - begin := bytes.Index(input[offset:], spanOpen) - if begin < 0 { - break - } - begin += offset // make beginning relative to input, not offset - end := bytes.Index(input[begin+len(spanOpen):], spanClose) - if end < 0 { - break - } - end += begin + len(spanOpen) // make end relative to input, not begin - - // get the name; if there is no name, skip it - envString := input[begin+len(spanOpen) : end] - if len(envString) == 0 { - offset = end + len(spanClose) - continue - } - - // split the string into a key and an optional default - envParts := strings.SplitN(string(envString), envVarDefaultDelimiter, 2) - - // do a lookup for the env var, replace with the default if not found - envVarValue, found := os.LookupEnv(envParts[0]) - if !found && len(envParts) == 2 { - envVarValue = envParts[1] - } - - // get the value of the environment variable - // note that this causes one-level deep chaining - envVarBytes := []byte(envVarValue) - - // splice in the value - input = append(input[:begin], - append(envVarBytes, input[end+len(spanClose):]...)...) - - // continue at the end of the replacement - offset = begin + len(envVarBytes) - } - return input, nil -} - -// allTokens lexes the entire input, but does not parse it. -// It returns all the tokens from the input, unstructured -// and in order. -func allTokens(filename string, input []byte) ([]Token, error) { - input, err := replaceEnvVars(input) - if err != nil { - return nil, err - } - tokens, err := Tokenize(input, filename) - if err != nil { - return nil, err - } - return tokens, nil -} - -type parser struct { - *Dispenser - block ServerBlock // current server block being parsed - eof bool // if we encounter a valid EOF in a hard place - definedSnippets map[string][]Token - nesting int - importGraph importGraph -} - -func (p *parser) parseAll() ([]ServerBlock, error) { - var blocks []ServerBlock - - for p.Next() { - err := p.parseOne() - if err != nil { - return blocks, err - } - if len(p.block.Keys) > 0 || len(p.block.Segments) > 0 { - blocks = append(blocks, p.block) - } - if p.nesting > 0 { - return blocks, p.EOFErr() - } - } - - return blocks, nil -} - -func (p *parser) parseOne() error { - p.block = ServerBlock{} - return p.begin() -} - -func (p *parser) begin() error { - if len(p.tokens) == 0 { - return nil - } - - err := p.addresses() - - if err != nil { - return err - } - - if p.eof { - // this happens if the Caddyfile consists of only - // a line of addresses and nothing else - return nil - } - - if ok, name := p.isSnippet(); ok { - if p.definedSnippets == nil { - p.definedSnippets = map[string][]Token{} - } - if _, found := p.definedSnippets[name]; found { - return p.Errf("redeclaration of previously declared snippet %s", name) - } - // consume all tokens til matched close brace - tokens, err := p.snippetTokens() - if err != nil { - return err - } - // Just as we need to track which file the token comes from, we need to - // keep track of which snippets do the tokens come from. This is helpful - // in tracking import cycles across files/snippets by namespacing them. Without - // this we end up with false-positives in cycle-detection. - for k, v := range tokens { - v.inSnippet = true - v.snippetName = name - tokens[k] = v - } - p.definedSnippets[name] = tokens - // empty block keys so we don't save this block as a real server. - p.block.Keys = nil - return nil - } - - return p.blockContents() -} - -func (p *parser) addresses() error { - var expectingAnother bool - - for { - tkn := p.Val() - - // special case: import directive replaces tokens during parse-time - if tkn == "import" && p.isNewLine() { - err := p.doImport() - if err != nil { - return err - } - continue - } - - // Open brace definitely indicates end of addresses - if tkn == "{" { - if expectingAnother { - return p.Errf("Expected another address but had '%s' - check for extra comma", tkn) - } - // Mark this server block as being defined with braces. - // This is used to provide a better error message when - // the user may have tried to define two server blocks - // without having used braces, which are required in - // that case. - p.block.HasBraces = true - break - } - - // Users commonly forget to place a space between the address and the '{' - if strings.HasSuffix(tkn, "{") { - return p.Errf("Site addresses cannot end with a curly brace: '%s' - put a space between the token and the brace", tkn) - } - - if tkn != "" { // empty token possible if user typed "" - // Trailing comma indicates another address will follow, which - // may possibly be on the next line - if tkn[len(tkn)-1] == ',' { - tkn = tkn[:len(tkn)-1] - expectingAnother = true - } else { - expectingAnother = false // but we may still see another one on this line - } - - // If there's a comma here, it's probably because they didn't use a space - // between their two domains, e.g. "foo.com,bar.com", which would not be - // parsed as two separate site addresses. - if strings.Contains(tkn, ",") { - return p.Errf("Site addresses cannot contain a comma ',': '%s' - put a space after the comma to separate site addresses", tkn) - } - - p.block.Keys = append(p.block.Keys, tkn) - } - - // Advance token and possibly break out of loop or return error - hasNext := p.Next() - if expectingAnother && !hasNext { - return p.EOFErr() - } - if !hasNext { - p.eof = true - break // EOF - } - if !expectingAnother && p.isNewLine() { - break - } - } - - return nil -} - -func (p *parser) blockContents() error { - errOpenCurlyBrace := p.openCurlyBrace() - if errOpenCurlyBrace != nil { - // single-server configs don't need curly braces - p.cursor-- - } - - err := p.directives() - if err != nil { - return err - } - - // only look for close curly brace if there was an opening - if errOpenCurlyBrace == nil { - err = p.closeCurlyBrace() - if err != nil { - return err - } - } - - return nil -} - -// directives parses through all the lines for directives -// and it expects the next token to be the first -// directive. It goes until EOF or closing curly brace -// which ends the server block. -func (p *parser) directives() error { - for p.Next() { - // end of server block - if p.Val() == "}" { - // p.nesting has already been decremented - break - } - - // special case: import directive replaces tokens during parse-time - if p.Val() == "import" { - err := p.doImport() - if err != nil { - return err - } - p.cursor-- // cursor is advanced when we continue, so roll back one more - continue - } - - // normal case: parse a directive as a new segment - // (a "segment" is a line which starts with a directive - // and which ends at the end of the line or at the end of - // the block that is opened at the end of the line) - if err := p.directive(); err != nil { - return err - } - } - - return nil -} - -// doImport swaps out the import directive and its argument -// (a total of 2 tokens) with the tokens in the specified file -// or globbing pattern. When the function returns, the cursor -// is on the token before where the import directive was. In -// other words, call Next() to access the first token that was -// imported. -func (p *parser) doImport() error { - // syntax checks - if !p.NextArg() { - return p.ArgErr() - } - importPattern := p.Val() - if importPattern == "" { - return p.Err("Import requires a non-empty filepath") - } - - // grab remaining args as placeholder replacements - args := p.RemainingArgs() - - // add args to the replacer - repl := caddy.NewEmptyReplacer() - for index, arg := range args { - repl.Set("args."+strconv.Itoa(index), arg) - } - - // splice out the import directive and its arguments - // (2 tokens, plus the length of args) - tokensBefore := p.tokens[:p.cursor-1-len(args)] - tokensAfter := p.tokens[p.cursor+1:] - var importedTokens []Token - var nodes []string - - // first check snippets. That is a simple, non-recursive replacement - if p.definedSnippets != nil && p.definedSnippets[importPattern] != nil { - importedTokens = p.definedSnippets[importPattern] - if len(importedTokens) > 0 { - // just grab the first one - nodes = append(nodes, fmt.Sprintf("%s:%s", importedTokens[0].File, importedTokens[0].snippetName)) - } - } else { - // make path relative to the file of the _token_ being processed rather - // than current working directory (issue #867) and then use glob to get - // list of matching filenames - absFile, err := filepath.Abs(p.Dispenser.File()) - if err != nil { - return p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.File(), err) - } - - var matches []string - var globPattern string - if !filepath.IsAbs(importPattern) { - globPattern = filepath.Join(filepath.Dir(absFile), importPattern) - } else { - globPattern = importPattern - } - if strings.Count(globPattern, "*") > 1 || strings.Count(globPattern, "?") > 1 || - (strings.Contains(globPattern, "[") && strings.Contains(globPattern, "]")) { - // See issue #2096 - a pattern with many glob expansions can hang for too long - return p.Errf("Glob pattern may only contain one wildcard (*), but has others: %s", globPattern) - } - matches, err = filepath.Glob(globPattern) - - if err != nil { - return p.Errf("Failed to use import pattern %s: %v", importPattern, err) - } - if len(matches) == 0 { - if strings.ContainsAny(globPattern, "*?[]") { - log.Printf("[WARNING] No files matching import glob pattern: %s", importPattern) - } else { - return p.Errf("File to import not found: %s", importPattern) - } - } - - // collect all the imported tokens - for _, importFile := range matches { - newTokens, err := p.doSingleImport(importFile) - if err != nil { - return err - } - importedTokens = append(importedTokens, newTokens...) - } - nodes = matches - } - - nodeName := p.File() - if p.Token().inSnippet { - nodeName += fmt.Sprintf(":%s", p.Token().snippetName) - } - p.importGraph.addNode(nodeName) - p.importGraph.addNodes(nodes) - if err := p.importGraph.addEdges(nodeName, nodes); err != nil { - p.importGraph.removeNodes(nodes) - return err - } - - // copy the tokens so we don't overwrite p.definedSnippets - tokensCopy := make([]Token, len(importedTokens)) - copy(tokensCopy, importedTokens) - - // run the argument replacer on the tokens - for index, token := range tokensCopy { - token.Text = repl.ReplaceKnown(token.Text, "") - tokensCopy[index] = token - } - - // splice the imported tokens in the place of the import statement - // and rewind cursor so Next() will land on first imported token - p.tokens = append(tokensBefore, append(tokensCopy, tokensAfter...)...) - p.cursor -= len(args) + 1 - - return nil -} - -// doSingleImport lexes the individual file at importFile and returns -// its tokens or an error, if any. -func (p *parser) doSingleImport(importFile string) ([]Token, error) { - file, err := os.Open(importFile) - if err != nil { - return nil, p.Errf("Could not import %s: %v", importFile, err) - } - defer file.Close() - - if info, err := file.Stat(); err != nil { - return nil, p.Errf("Could not import %s: %v", importFile, err) - } else if info.IsDir() { - return nil, p.Errf("Could not import %s: is a directory", importFile) - } - - input, err := ioutil.ReadAll(file) - if err != nil { - return nil, p.Errf("Could not read imported file %s: %v", importFile, err) - } - - importedTokens, err := allTokens(importFile, input) - if err != nil { - return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err) - } - - // Tack the file path onto these tokens so errors show the imported file's name - // (we use full, absolute path to avoid bugs: issue #1892) - filename, err := filepath.Abs(importFile) - if err != nil { - return nil, p.Errf("Failed to get absolute path of file: %s: %v", importFile, err) - } - for i := 0; i < len(importedTokens); i++ { - importedTokens[i].File = filename - } - - return importedTokens, nil -} - -// directive collects tokens until the directive's scope -// closes (either end of line or end of curly brace block). -// It expects the currently-loaded token to be a directive -// (or } that ends a server block). The collected tokens -// are loaded into the current server block for later use -// by directive setup functions. -func (p *parser) directive() error { - - // a segment is a list of tokens associated with this directive - var segment Segment - - // the directive itself is appended as a relevant token - segment = append(segment, p.Token()) - - for p.Next() { - if p.Val() == "{" { - p.nesting++ - } else if p.isNewLine() && p.nesting == 0 { - p.cursor-- // read too far - break - } else if p.Val() == "}" && p.nesting > 0 { - p.nesting-- - } else if p.Val() == "}" && p.nesting == 0 { - return p.Err("Unexpected '}' because no matching opening brace") - } else if p.Val() == "import" && p.isNewLine() { - if err := p.doImport(); err != nil { - return err - } - p.cursor-- // cursor is advanced when we continue, so roll back one more - continue - } - - segment = append(segment, p.Token()) - } - - p.block.Segments = append(p.block.Segments, segment) - - if p.nesting > 0 { - return p.EOFErr() - } - - return nil -} - -// openCurlyBrace expects the current token to be an -// opening curly brace. This acts like an assertion -// because it returns an error if the token is not -// a opening curly brace. It does NOT advance the token. -func (p *parser) openCurlyBrace() error { - if p.Val() != "{" { - return p.SyntaxErr("{") - } - return nil -} - -// closeCurlyBrace expects the current token to be -// a closing curly brace. This acts like an assertion -// because it returns an error if the token is not -// a closing curly brace. It does NOT advance the token. -func (p *parser) closeCurlyBrace() error { - if p.Val() != "}" { - return p.SyntaxErr("}") - } - return nil -} - -func (p *parser) isSnippet() (bool, string) { - keys := p.block.Keys - // A snippet block is a single key with parens. Nothing else qualifies. - if len(keys) == 1 && strings.HasPrefix(keys[0], "(") && strings.HasSuffix(keys[0], ")") { - return true, strings.TrimSuffix(keys[0][1:], ")") - } - return false, "" -} - -// read and store everything in a block for later replay. -func (p *parser) snippetTokens() ([]Token, error) { - // snippet must have curlies. - err := p.openCurlyBrace() - if err != nil { - return nil, err - } - nesting := 1 // count our own nesting in snippets - tokens := []Token{} - for p.Next() { - if p.Val() == "}" { - nesting-- - if nesting == 0 { - break - } - } - if p.Val() == "{" { - nesting++ - } - tokens = append(tokens, p.tokens[p.cursor]) - } - // make sure we're matched up - if nesting != 0 { - return nil, p.SyntaxErr("}") - } - return tokens, nil -} - -// ServerBlock associates any number of keys from the -// head of the server block with tokens, which are -// grouped by segments. -type ServerBlock struct { - HasBraces bool - Keys []string - Segments []Segment -} - -// DispenseDirective returns a dispenser that contains -// all the tokens in the server block. -func (sb ServerBlock) DispenseDirective(dir string) *Dispenser { - var tokens []Token - for _, seg := range sb.Segments { - if len(seg) > 0 && seg[0].Text == dir { - tokens = append(tokens, seg...) - } - } - return NewDispenser(tokens) -} - -// Segment is a list of tokens which begins with a directive -// and ends at the end of the directive (either at the end of -// the line, or at the end of a block it opens). -type Segment []Token - -// Directive returns the directive name for the segment. -// The directive name is the text of the first token. -func (s Segment) Directive() string { - if len(s) > 0 { - return s[0].Text - } - return "" -} - -// spanOpen and spanClose are used to bound spans that -// contain the name of an environment variable. -var ( - spanOpen, spanClose = []byte{'{', '$'}, []byte{'}'} - envVarDefaultDelimiter = ":" -) diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/configadapters.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/configadapters.go deleted file mode 100644 index ccac5f88..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/configadapters.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyconfig - -import ( - "encoding/json" - "fmt" - - "github.com/caddyserver/caddy/v2" -) - -// Adapter is a type which can adapt a configuration to Caddy JSON. -// It returns the results and any warnings, or an error. -type Adapter interface { - Adapt(body []byte, options map[string]interface{}) ([]byte, []Warning, error) -} - -// Warning represents a warning or notice related to conversion. -type Warning struct { - File string `json:"file,omitempty"` - Line int `json:"line,omitempty"` - Directive string `json:"directive,omitempty"` - Message string `json:"message,omitempty"` -} - -func (w Warning) String() string { - var directive string - if w.Directive != "" { - directive = fmt.Sprintf(" (%s)", w.Directive) - } - return fmt.Sprintf("%s:%d%s: %s", w.File, w.Line, directive, w.Message) -} - -// JSON encodes val as JSON, returning it as a json.RawMessage. Any -// marshaling errors (which are highly unlikely with correct code) -// are converted to warnings. This is convenient when filling config -// structs that require a json.RawMessage, without having to worry -// about errors. -func JSON(val interface{}, warnings *[]Warning) json.RawMessage { - b, err := json.Marshal(val) - if err != nil { - if warnings != nil { - *warnings = append(*warnings, Warning{Message: err.Error()}) - } - return nil - } - return b -} - -// JSONModuleObject is like JSON(), except it marshals val into a JSON object -// with an added key named fieldName with the value fieldVal. This is useful -// for encoding module values where the module name has to be described within -// the object by a certain key; for example, `"handler": "file_server"` for a -// file server HTTP handler (fieldName="handler" and fieldVal="file_server"). -// The val parameter must encode into a map[string]interface{} (i.e. it must be -// a struct or map). Any errors are converted into warnings. -func JSONModuleObject(val interface{}, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage { - // encode to a JSON object first - enc, err := json.Marshal(val) - if err != nil { - if warnings != nil { - *warnings = append(*warnings, Warning{Message: err.Error()}) - } - return nil - } - - // then decode the object - var tmp map[string]interface{} - err = json.Unmarshal(enc, &tmp) - if err != nil { - if warnings != nil { - *warnings = append(*warnings, Warning{Message: err.Error()}) - } - return nil - } - - // so we can easily add the module's field with its appointed value - tmp[fieldName] = fieldVal - - // then re-marshal as JSON - result, err := json.Marshal(tmp) - if err != nil { - if warnings != nil { - *warnings = append(*warnings, Warning{Message: err.Error()}) - } - return nil - } - - return result -} - -// RegisterAdapter registers a config adapter with the given name. -// This should usually be done at init-time. It panics if the -// adapter cannot be registered successfully. -func RegisterAdapter(name string, adapter Adapter) { - if _, ok := configAdapters[name]; ok { - panic(fmt.Errorf("%s: already registered", name)) - } - configAdapters[name] = adapter - caddy.RegisterModule(adapterModule{name, adapter}) -} - -// GetAdapter returns the adapter with the given name, -// or nil if one with that name is not registered. -func GetAdapter(name string) Adapter { - return configAdapters[name] -} - -// adapterModule is a wrapper type that can turn any config -// adapter into a Caddy module, which has the benefit of being -// counted with other modules, even though they do not -// technically extend the Caddy configuration structure. -// See caddyserver/caddy#3132. -type adapterModule struct { - name string - Adapter -} - -func (am adapterModule) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: caddy.ModuleID("caddy.adapters." + am.name), - New: func() caddy.Module { return am }, - } -} - -var configAdapters = make(map[string]Adapter) diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses.go deleted file mode 100644 index 71053204..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpcaddyfile - -import ( - "fmt" - "net" - "reflect" - "sort" - "strconv" - "strings" - "unicode" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" - "github.com/caddyserver/certmagic" -) - -// mapAddressToServerBlocks returns a map of listener address to list of server -// blocks that will be served on that address. To do this, each server block is -// expanded so that each one is considered individually, although keys of a -// server block that share the same address stay grouped together so the config -// isn't repeated unnecessarily. For example, this Caddyfile: -// -// example.com { -// bind 127.0.0.1 -// } -// www.example.com, example.net/path, localhost:9999 { -// bind 127.0.0.1 1.2.3.4 -// } -// -// has two server blocks to start with. But expressed in this Caddyfile are -// actually 4 listener addresses: 127.0.0.1:443, 1.2.3.4:443, 127.0.0.1:9999, -// and 127.0.0.1:9999. This is because the bind directive is applied to each -// key of its server block (specifying the host part), and each key may have -// a different port. And we definitely need to be sure that a site which is -// bound to be served on a specific interface is not served on others just -// because that is more convenient: it would be a potential security risk -// if the difference between interfaces means private vs. public. -// -// So what this function does for the example above is iterate each server -// block, and for each server block, iterate its keys. For the first, it -// finds one key (example.com) and determines its listener address -// (127.0.0.1:443 - because of 'bind' and automatic HTTPS). It then adds -// the listener address to the map value returned by this function, with -// the first server block as one of its associations. -// -// It then iterates each key on the second server block and associates them -// with one or more listener addresses. Indeed, each key in this block has -// two listener addresses because of the 'bind' directive. Once we know -// which addresses serve which keys, we can create a new server block for -// each address containing the contents of the server block and only those -// specific keys of the server block which use that address. -// -// It is possible and even likely that some keys in the returned map have -// the exact same list of server blocks (i.e. they are identical). This -// happens when multiple hosts are declared with a 'bind' directive and -// the resulting listener addresses are not shared by any other server -// block (or the other server blocks are exactly identical in their token -// contents). This happens with our example above because 1.2.3.4:443 -// and 1.2.3.4:9999 are used exclusively with the second server block. This -// repetition may be undesirable, so call consolidateAddrMappings() to map -// multiple addresses to the same lists of server blocks (a many:many mapping). -// (Doing this is essentially a map-reduce technique.) -func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBlock, - options map[string]interface{}) (map[string][]serverBlock, error) { - sbmap := make(map[string][]serverBlock) - - for i, sblock := range originalServerBlocks { - // within a server block, we need to map all the listener addresses - // implied by the server block to the keys of the server block which - // will be served by them; this has the effect of treating each - // key of a server block as its own, but without having to repeat its - // contents in cases where multiple keys really can be served together - addrToKeys := make(map[string][]string) - for j, key := range sblock.block.Keys { - // a key can have multiple listener addresses if there are multiple - // arguments to the 'bind' directive (although they will all have - // the same port, since the port is defined by the key or is implicit - // through automatic HTTPS) - addrs, err := st.listenerAddrsForServerBlockKey(sblock, key, options) - if err != nil { - return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key, err) - } - - // associate this key with each listener address it is served on - for _, addr := range addrs { - addrToKeys[addr] = append(addrToKeys[addr], key) - } - } - - // now that we know which addresses serve which keys of this - // server block, we iterate that mapping and create a list of - // new server blocks for each address where the keys of the - // server block are only the ones which use the address; but - // the contents (tokens) are of course the same - for addr, keys := range addrToKeys { - // parse keys so that we only have to do it once - parsedKeys := make([]Address, 0, len(keys)) - for _, key := range keys { - addr, err := ParseAddress(key) - if err != nil { - return nil, fmt.Errorf("parsing key '%s': %v", key, err) - } - parsedKeys = append(parsedKeys, addr.Normalize()) - } - sbmap[addr] = append(sbmap[addr], serverBlock{ - block: caddyfile.ServerBlock{ - Keys: keys, - Segments: sblock.block.Segments, - }, - pile: sblock.pile, - keys: parsedKeys, - }) - } - } - - return sbmap, nil -} - -// consolidateAddrMappings eliminates repetition of identical server blocks in a mapping of -// single listener addresses to lists of server blocks. Since multiple addresses may serve -// identical sites (server block contents), this function turns a 1:many mapping into a -// many:many mapping. Server block contents (tokens) must be exactly identical so that -// reflect.DeepEqual returns true in order for the addresses to be combined. Identical -// entries are deleted from the addrToServerBlocks map. Essentially, each pairing (each -// association from multiple addresses to multiple server blocks; i.e. each element of -// the returned slice) becomes a server definition in the output JSON. -func (st *ServerType) consolidateAddrMappings(addrToServerBlocks map[string][]serverBlock) []sbAddrAssociation { - sbaddrs := make([]sbAddrAssociation, 0, len(addrToServerBlocks)) - for addr, sblocks := range addrToServerBlocks { - // we start with knowing that at least this address - // maps to these server blocks - a := sbAddrAssociation{ - addresses: []string{addr}, - serverBlocks: sblocks, - } - - // now find other addresses that map to identical - // server blocks and add them to our list of - // addresses, while removing them from the map - for otherAddr, otherSblocks := range addrToServerBlocks { - if addr == otherAddr { - continue - } - if reflect.DeepEqual(sblocks, otherSblocks) { - a.addresses = append(a.addresses, otherAddr) - delete(addrToServerBlocks, otherAddr) - } - } - - sbaddrs = append(sbaddrs, a) - } - - // sort them by their first address (we know there will always be at least one) - // to avoid problems with non-deterministic ordering (makes tests flaky) - sort.Slice(sbaddrs, func(i, j int) bool { - return sbaddrs[i].addresses[0] < sbaddrs[j].addresses[0] - }) - - return sbaddrs -} - -func (st *ServerType) listenerAddrsForServerBlockKey(sblock serverBlock, key string, - options map[string]interface{}) ([]string, error) { - addr, err := ParseAddress(key) - if err != nil { - return nil, fmt.Errorf("parsing key: %v", err) - } - addr = addr.Normalize() - - // figure out the HTTP and HTTPS ports; either - // use defaults, or override with user config - httpPort, httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPPort), strconv.Itoa(caddyhttp.DefaultHTTPSPort) - if hport, ok := options["http_port"]; ok { - httpPort = strconv.Itoa(hport.(int)) - } - if hsport, ok := options["https_port"]; ok { - httpsPort = strconv.Itoa(hsport.(int)) - } - - // default port is the HTTPS port - lnPort := httpsPort - if addr.Port != "" { - // port explicitly defined - lnPort = addr.Port - } else if addr.Scheme == "http" { - // port inferred from scheme - lnPort = httpPort - } - - // error if scheme and port combination violate convention - if (addr.Scheme == "http" && lnPort == httpsPort) || (addr.Scheme == "https" && lnPort == httpPort) { - return nil, fmt.Errorf("[%s] scheme and port violate convention", key) - } - - // the bind directive specifies hosts, but is optional - lnHosts := make([]string, 0, len(sblock.pile)) - for _, cfgVal := range sblock.pile["bind"] { - lnHosts = append(lnHosts, cfgVal.Value.([]string)...) - } - if len(lnHosts) == 0 { - lnHosts = []string{""} - } - - // use a map to prevent duplication - listeners := make(map[string]struct{}) - for _, host := range lnHosts { - addr, err := caddy.ParseNetworkAddress(host) - if err == nil && addr.IsUnixNetwork() { - listeners[host] = struct{}{} - } else { - listeners[net.JoinHostPort(host, lnPort)] = struct{}{} - } - } - - // now turn map into list - listenersList := make([]string, 0, len(listeners)) - for lnStr := range listeners { - listenersList = append(listenersList, lnStr) - } - - return listenersList, nil -} - -// Address represents a site address. It contains -// the original input value, and the component -// parts of an address. The component parts may be -// updated to the correct values as setup proceeds, -// but the original value should never be changed. -// -// The Host field must be in a normalized form. -type Address struct { - Original, Scheme, Host, Port, Path string -} - -// ParseAddress parses an address string into a structured format with separate -// scheme, host, port, and path portions, as well as the original input string. -func ParseAddress(str string) (Address, error) { - const maxLen = 4096 - if len(str) > maxLen { - str = str[:maxLen] - } - remaining := strings.TrimSpace(str) - a := Address{Original: remaining} - - // extract scheme - splitScheme := strings.SplitN(remaining, "://", 2) - switch len(splitScheme) { - case 0: - return a, nil - case 1: - remaining = splitScheme[0] - case 2: - a.Scheme = splitScheme[0] - remaining = splitScheme[1] - } - - // extract host and port - hostSplit := strings.SplitN(remaining, "/", 2) - if len(hostSplit) > 0 { - host, port, err := net.SplitHostPort(hostSplit[0]) - if err != nil { - host, port, err = net.SplitHostPort(hostSplit[0] + ":") - if err != nil { - host = hostSplit[0] - } - } - a.Host = host - a.Port = port - } - if len(hostSplit) == 2 { - // all that remains is the path - a.Path = "/" + hostSplit[1] - } - - // make sure port is valid - if a.Port != "" { - if portNum, err := strconv.Atoi(a.Port); err != nil { - return Address{}, fmt.Errorf("invalid port '%s': %v", a.Port, err) - } else if portNum < 0 || portNum > 65535 { - return Address{}, fmt.Errorf("port %d is out of range", portNum) - } - } - - return a, nil -} - -// String returns a human-readable form of a. It will -// be a cleaned-up and filled-out URL string. -func (a Address) String() string { - if a.Host == "" && a.Port == "" { - return "" - } - scheme := a.Scheme - if scheme == "" { - if a.Port == strconv.Itoa(certmagic.HTTPSPort) { - scheme = "https" - } else { - scheme = "http" - } - } - s := scheme - if s != "" { - s += "://" - } - if a.Port != "" && - ((scheme == "https" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort)) || - (scheme == "http" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort))) { - s += net.JoinHostPort(a.Host, a.Port) - } else { - s += a.Host - } - if a.Path != "" { - s += a.Path - } - return s -} - -// Normalize returns a normalized version of a. -func (a Address) Normalize() Address { - path := a.Path - - // ensure host is normalized if it's an IP address - host := strings.TrimSpace(a.Host) - if ip := net.ParseIP(host); ip != nil { - host = ip.String() - } - - return Address{ - Original: a.Original, - Scheme: lowerExceptPlaceholders(a.Scheme), - Host: lowerExceptPlaceholders(host), - Port: a.Port, - Path: path, - } -} - -// Key returns a string form of a, much like String() does, but this -// method doesn't add anything default that wasn't in the original. -func (a Address) Key() string { - res := "" - if a.Scheme != "" { - res += a.Scheme + "://" - } - if a.Host != "" { - res += a.Host - } - // insert port only if the original has its own explicit port - if a.Port != "" && - len(a.Original) >= len(res) && - strings.HasPrefix(a.Original[len(res):], ":"+a.Port) { - res += ":" + a.Port - } - if a.Path != "" { - res += a.Path - } - return res -} - -// lowerExceptPlaceholders lowercases s except within -// placeholders (substrings in non-escaped '{ }' spans). -// See https://github.com/caddyserver/caddy/issues/3264 -func lowerExceptPlaceholders(s string) string { - var sb strings.Builder - var escaped, inPlaceholder bool - for _, ch := range s { - if ch == '\\' && !escaped { - escaped = true - sb.WriteRune(ch) - continue - } - if ch == '{' && !escaped { - inPlaceholder = true - } - if ch == '}' && inPlaceholder && !escaped { - inPlaceholder = false - } - if inPlaceholder { - sb.WriteRune(ch) - } else { - sb.WriteRune(unicode.ToLower(ch)) - } - escaped = false - } - return sb.String() -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses_fuzz.go deleted file mode 100644 index 4ab62984..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses_fuzz.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build gofuzz - -package httpcaddyfile - -func FuzzParseAddress(data []byte) int { - addr, err := ParseAddress(string(data)) - if err != nil { - if addr == (Address{}) { - return 1 - } - return 0 - } - return 1 -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/builtins.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/builtins.go deleted file mode 100644 index d52c5ef8..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/builtins.go +++ /dev/null @@ -1,789 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpcaddyfile - -import ( - "encoding/base64" - "encoding/pem" - "fmt" - "html" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" - "github.com/caddyserver/caddy/v2/modules/caddytls" - "github.com/caddyserver/certmagic" - "github.com/mholt/acmez/acme" - "go.uber.org/zap/zapcore" -) - -func init() { - RegisterDirective("bind", parseBind) - RegisterDirective("tls", parseTLS) - RegisterHandlerDirective("root", parseRoot) - RegisterHandlerDirective("redir", parseRedir) - RegisterHandlerDirective("respond", parseRespond) - RegisterHandlerDirective("abort", parseAbort) - RegisterHandlerDirective("error", parseError) - RegisterHandlerDirective("route", parseRoute) - RegisterHandlerDirective("handle", parseHandle) - RegisterDirective("handle_errors", parseHandleErrors) - RegisterDirective("log", parseLog) -} - -// parseBind parses the bind directive. Syntax: -// -// bind]
-//
-func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
- if !h.Next() {
- return nil, h.ArgErr()
- }
-
- if !h.NextArg() {
- return nil, h.ArgErr()
- }
- to := h.Val()
-
- var code string
- if h.NextArg() {
- code = h.Val()
- }
-
- var body string
- switch code {
- case "permanent":
- code = "301"
- case "temporary", "":
- code = "302"
- case "html":
- // Script tag comes first since that will better imitate a redirect in the browser's
- // history, but the meta tag is a fallback for most non-JS clients.
- const metaRedir = `
-
-
- Redirecting...
-
-
-
- Redirecting to %s...
-
-`
- safeTo := html.EscapeString(to)
- body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
- code = "302"
- default:
- codeInt, err := strconv.Atoi(code)
- if err != nil {
- return nil, h.Errf("Not a supported redir code type or not valid integer: '%s'", code)
- }
- if codeInt < 300 || codeInt > 399 {
- return nil, h.Errf("Redir code not in the 3xx range: '%v'", codeInt)
- }
- }
-
- return caddyhttp.StaticResponse{
- StatusCode: caddyhttp.WeakString(code),
- Headers: http.Header{"Location": []string{to}},
- Body: body,
- }, nil
-}
-
-// parseRespond parses the respond directive.
-func parseRespond(h Helper) (caddyhttp.MiddlewareHandler, error) {
- sr := new(caddyhttp.StaticResponse)
- err := sr.UnmarshalCaddyfile(h.Dispenser)
- if err != nil {
- return nil, err
- }
- return sr, nil
-}
-
-// parseAbort parses the abort directive.
-func parseAbort(h Helper) (caddyhttp.MiddlewareHandler, error) {
- h.Next() // consume directive
- for h.Next() || h.NextBlock(0) {
- return nil, h.ArgErr()
- }
- return &caddyhttp.StaticResponse{Abort: true}, nil
-}
-
-// parseError parses the error directive.
-func parseError(h Helper) (caddyhttp.MiddlewareHandler, error) {
- se := new(caddyhttp.StaticError)
- err := se.UnmarshalCaddyfile(h.Dispenser)
- if err != nil {
- return nil, err
- }
- return se, nil
-}
-
-// parseRoute parses the route directive.
-func parseRoute(h Helper) (caddyhttp.MiddlewareHandler, error) {
- sr := new(caddyhttp.Subroute)
-
- allResults, err := parseSegmentAsConfig(h)
- if err != nil {
- return nil, err
- }
-
- for _, result := range allResults {
- switch handler := result.Value.(type) {
- case caddyhttp.Route:
- sr.Routes = append(sr.Routes, handler)
- case caddyhttp.Subroute:
- // directives which return a literal subroute instead of a route
- // means they intend to keep those handlers together without
- // them being reordered; we're doing that anyway since we're in
- // the route directive, so just append its handlers
- sr.Routes = append(sr.Routes, handler.Routes...)
- default:
- return nil, h.Errf("%s directive returned something other than an HTTP route or subroute: %#v (only handler directives can be used in routes)", result.directive, result.Value)
- }
- }
-
- return sr, nil
-}
-
-func parseHandle(h Helper) (caddyhttp.MiddlewareHandler, error) {
- return ParseSegmentAsSubroute(h)
-}
-
-func parseHandleErrors(h Helper) ([]ConfigValue, error) {
- subroute, err := ParseSegmentAsSubroute(h)
- if err != nil {
- return nil, err
- }
- return []ConfigValue{
- {
- Class: "error_route",
- Value: subroute,
- },
- }, nil
-}
-
-// parseLog parses the log directive. Syntax:
-//
-// log {
-// output ...
-// format ...
-// level
-// }
-//
-func parseLog(h Helper) ([]ConfigValue, error) {
- return parseLogHelper(h, nil)
-}
-
-// parseLogHelper is used both for the parseLog directive within Server Blocks,
-// as well as the global "log" option for configuring loggers at the global
-// level. The parseAsGlobalOption parameter is used to distinguish any differing logic
-// between the two.
-func parseLogHelper(h Helper, globalLogNames map[string]struct{}) ([]ConfigValue, error) {
- // When the globalLogNames parameter is passed in, we make
- // modifications to the parsing behavior.
- parseAsGlobalOption := globalLogNames != nil
-
- var configValues []ConfigValue
- for h.Next() {
- // Logic below expects that a name is always present when a
- // global option is being parsed.
- var globalLogName string
- if parseAsGlobalOption {
- if h.NextArg() {
- globalLogName = h.Val()
-
- // Only a single argument is supported.
- if h.NextArg() {
- return nil, h.ArgErr()
- }
- } else {
- // If there is no log name specified, we
- // reference the default logger. See the
- // setupNewDefault function in the logging
- // package for where this is configured.
- globalLogName = "default"
- }
-
- // Verify this name is unused.
- _, used := globalLogNames[globalLogName]
- if used {
- return nil, h.Err("duplicate global log option for: " + globalLogName)
- }
- globalLogNames[globalLogName] = struct{}{}
- } else {
- // No arguments are supported for the server block log directive
- if h.NextArg() {
- return nil, h.ArgErr()
- }
- }
-
- cl := new(caddy.CustomLog)
-
- for h.NextBlock(0) {
- switch h.Val() {
- case "output":
- if !h.NextArg() {
- return nil, h.ArgErr()
- }
- moduleName := h.Val()
-
- // can't use the usual caddyfile.Unmarshaler flow with the
- // standard writers because they are in the caddy package
- // (because they are the default) and implementing that
- // interface there would unfortunately create circular import
- var wo caddy.WriterOpener
- switch moduleName {
- case "stdout":
- wo = caddy.StdoutWriter{}
- case "stderr":
- wo = caddy.StderrWriter{}
- case "discard":
- wo = caddy.DiscardWriter{}
- default:
- modID := "caddy.logging.writers." + moduleName
- unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
- if err != nil {
- return nil, err
- }
- var ok bool
- wo, ok = unm.(caddy.WriterOpener)
- if !ok {
- return nil, h.Errf("module %s (%T) is not a WriterOpener", modID, unm)
- }
- }
- cl.WriterRaw = caddyconfig.JSONModuleObject(wo, "output", moduleName, h.warnings)
-
- case "format":
- if !h.NextArg() {
- return nil, h.ArgErr()
- }
- moduleName := h.Val()
- moduleID := "caddy.logging.encoders." + moduleName
- unm, err := caddyfile.UnmarshalModule(h.Dispenser, moduleID)
- if err != nil {
- return nil, err
- }
- enc, ok := unm.(zapcore.Encoder)
- if !ok {
- return nil, h.Errf("module %s (%T) is not a zapcore.Encoder", moduleID, unm)
- }
- cl.EncoderRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, h.warnings)
-
- case "level":
- if !h.NextArg() {
- return nil, h.ArgErr()
- }
- cl.Level = h.Val()
- if h.NextArg() {
- return nil, h.ArgErr()
- }
-
- case "include":
- // This configuration is only allowed in the global options
- if !parseAsGlobalOption {
- return nil, h.ArgErr()
- }
- for h.NextArg() {
- cl.Include = append(cl.Include, h.Val())
- }
-
- case "exclude":
- // This configuration is only allowed in the global options
- if !parseAsGlobalOption {
- return nil, h.ArgErr()
- }
- for h.NextArg() {
- cl.Exclude = append(cl.Exclude, h.Val())
- }
-
- default:
- return nil, h.Errf("unrecognized subdirective: %s", h.Val())
- }
- }
-
- var val namedCustomLog
- // Skip handling of empty logging configs
- if !reflect.DeepEqual(cl, new(caddy.CustomLog)) {
- if parseAsGlobalOption {
- // Use indicated name for global log options
- val.name = globalLogName
- val.log = cl
- } else {
- // Construct a log name for server log streams
- logCounter, ok := h.State["logCounter"].(int)
- if !ok {
- logCounter = 0
- }
- val.name = fmt.Sprintf("log%d", logCounter)
- cl.Include = []string{"http.log.access." + val.name}
- val.log = cl
- logCounter++
- h.State["logCounter"] = logCounter
- }
- }
- configValues = append(configValues, ConfigValue{
- Class: "custom_log",
- Value: val,
- })
- }
- return configValues, nil
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/directives.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/directives.go
deleted file mode 100644
index 360f91e7..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/directives.go
+++ /dev/null
@@ -1,539 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpcaddyfile
-
-import (
- "encoding/json"
- "net"
- "sort"
- "strconv"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
-)
-
-// directiveOrder specifies the order
-// to apply directives in HTTP routes.
-//
-// The root directive goes first in case rewrites or
-// redirects depend on existence of files, i.e. the
-// file matcher, which must know the root first.
-//
-// The header directive goes second so that headers
-// can be manipulated before doing redirects.
-var directiveOrder = []string{
- "map",
- "root",
-
- "header",
- "request_body",
-
- "redir",
-
- // URI manipulation
- "rewrite",
- "uri",
- "try_files",
-
- // middleware handlers; some wrap responses
- "basicauth",
- "request_header",
- "encode",
- "push",
- "templates",
-
- // special routing & dispatching directives
- "handle",
- "handle_path",
- "route",
-
- // handlers that typically respond to requests
- "abort",
- "error",
- "respond",
- "metrics",
- "reverse_proxy",
- "php_fastcgi",
- "file_server",
- "acme_server",
-}
-
-// directiveIsOrdered returns true if dir is
-// a known, ordered (sorted) directive.
-func directiveIsOrdered(dir string) bool {
- for _, d := range directiveOrder {
- if d == dir {
- return true
- }
- }
- return false
-}
-
-// RegisterDirective registers a unique directive dir with an
-// associated unmarshaling (setup) function. When directive dir
-// is encountered in a Caddyfile, setupFunc will be called to
-// unmarshal its tokens.
-func RegisterDirective(dir string, setupFunc UnmarshalFunc) {
- if _, ok := registeredDirectives[dir]; ok {
- panic("directive " + dir + " already registered")
- }
- registeredDirectives[dir] = setupFunc
-}
-
-// RegisterHandlerDirective is like RegisterDirective, but for
-// directives which specifically output only an HTTP handler.
-// Directives registered with this function will always have
-// an optional matcher token as the first argument.
-func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) {
- RegisterDirective(dir, func(h Helper) ([]ConfigValue, error) {
- if !h.Next() {
- return nil, h.ArgErr()
- }
-
- matcherSet, err := h.ExtractMatcherSet()
- if err != nil {
- return nil, err
- }
-
- val, err := setupFunc(h)
- if err != nil {
- return nil, err
- }
-
- return h.NewRoute(matcherSet, val), nil
- })
-}
-
-// RegisterGlobalOption registers a unique global option opt with
-// an associated unmarshaling (setup) function. When the global
-// option opt is encountered in a Caddyfile, setupFunc will be
-// called to unmarshal its tokens.
-func RegisterGlobalOption(opt string, setupFunc UnmarshalGlobalFunc) {
- if _, ok := registeredGlobalOptions[opt]; ok {
- panic("global option " + opt + " already registered")
- }
- registeredGlobalOptions[opt] = setupFunc
-}
-
-// Helper is a type which helps setup a value from
-// Caddyfile tokens.
-type Helper struct {
- *caddyfile.Dispenser
- // State stores intermediate variables during caddyfile adaptation.
- State map[string]interface{}
- options map[string]interface{}
- warnings *[]caddyconfig.Warning
- matcherDefs map[string]caddy.ModuleMap
- parentBlock caddyfile.ServerBlock
- groupCounter counter
-}
-
-// Option gets the option keyed by name.
-func (h Helper) Option(name string) interface{} {
- return h.options[name]
-}
-
-// Caddyfiles returns the list of config files from
-// which tokens in the current server block were loaded.
-func (h Helper) Caddyfiles() []string {
- // first obtain set of names of files involved
- // in this server block, without duplicates
- files := make(map[string]struct{})
- for _, segment := range h.parentBlock.Segments {
- for _, token := range segment {
- files[token.File] = struct{}{}
- }
- }
- // then convert the set into a slice
- filesSlice := make([]string, 0, len(files))
- for file := range files {
- filesSlice = append(filesSlice, file)
- }
- return filesSlice
-}
-
-// JSON converts val into JSON. Any errors are added to warnings.
-func (h Helper) JSON(val interface{}) json.RawMessage {
- return caddyconfig.JSON(val, h.warnings)
-}
-
-// MatcherToken assumes the next argument token is (possibly) a matcher,
-// and if so, returns the matcher set along with a true value. If the next
-// token is not a matcher, nil and false is returned. Note that a true
-// value may be returned with a nil matcher set if it is a catch-all.
-func (h Helper) MatcherToken() (caddy.ModuleMap, bool, error) {
- if !h.NextArg() {
- return nil, false, nil
- }
- return matcherSetFromMatcherToken(h.Dispenser.Token(), h.matcherDefs, h.warnings)
-}
-
-// ExtractMatcherSet is like MatcherToken, except this is a higher-level
-// method that returns the matcher set described by the matcher token,
-// or nil if there is none, and deletes the matcher token from the
-// dispenser and resets it as if this look-ahead never happened. Useful
-// when wrapping a route (one or more handlers) in a user-defined matcher.
-func (h Helper) ExtractMatcherSet() (caddy.ModuleMap, error) {
- matcherSet, hasMatcher, err := h.MatcherToken()
- if err != nil {
- return nil, err
- }
- if hasMatcher {
- // strip matcher token; we don't need to
- // use the return value here because a
- // new dispenser should have been made
- // solely for this directive's tokens,
- // with no other uses of same slice
- h.Dispenser.Delete()
- }
- h.Dispenser.Reset() // pretend this lookahead never happened
- return matcherSet, nil
-}
-
-// NewRoute returns config values relevant to creating a new HTTP route.
-func (h Helper) NewRoute(matcherSet caddy.ModuleMap,
- handler caddyhttp.MiddlewareHandler) []ConfigValue {
- mod, err := caddy.GetModule(caddy.GetModuleID(handler))
- if err != nil {
- *h.warnings = append(*h.warnings, caddyconfig.Warning{
- File: h.File(),
- Line: h.Line(),
- Message: err.Error(),
- })
- return nil
- }
- var matcherSetsRaw []caddy.ModuleMap
- if matcherSet != nil {
- matcherSetsRaw = append(matcherSetsRaw, matcherSet)
- }
- return []ConfigValue{
- {
- Class: "route",
- Value: caddyhttp.Route{
- MatcherSetsRaw: matcherSetsRaw,
- HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", mod.ID.Name(), h.warnings)},
- },
- },
- }
-}
-
-// GroupRoutes adds the routes (caddyhttp.Route type) in vals to the
-// same group, if there is more than one route in vals.
-func (h Helper) GroupRoutes(vals []ConfigValue) {
- // ensure there's at least two routes; group of one is pointless
- var count int
- for _, v := range vals {
- if _, ok := v.Value.(caddyhttp.Route); ok {
- count++
- if count > 1 {
- break
- }
- }
- }
- if count < 2 {
- return
- }
-
- // now that we know the group will have some effect, do it
- groupName := h.groupCounter.nextGroup()
- for i := range vals {
- if route, ok := vals[i].Value.(caddyhttp.Route); ok {
- route.Group = groupName
- vals[i].Value = route
- }
- }
-}
-
-// NewBindAddresses returns config values relevant to adding
-// listener bind addresses to the config.
-func (h Helper) NewBindAddresses(addrs []string) []ConfigValue {
- return []ConfigValue{{Class: "bind", Value: addrs}}
-}
-
-// WithDispenser returns a new instance based on d. All others Helper
-// fields are copied, so typically maps are shared with this new instance.
-func (h Helper) WithDispenser(d *caddyfile.Dispenser) Helper {
- h.Dispenser = d
- return h
-}
-
-// ParseSegmentAsSubroute parses the segment such that its subdirectives
-// are themselves treated as directives, from which a subroute is built
-// and returned.
-func ParseSegmentAsSubroute(h Helper) (caddyhttp.MiddlewareHandler, error) {
- allResults, err := parseSegmentAsConfig(h)
- if err != nil {
- return nil, err
- }
-
- return buildSubroute(allResults, h.groupCounter)
-}
-
-// parseSegmentAsConfig parses the segment such that its subdirectives
-// are themselves treated as directives, including named matcher definitions,
-// and the raw Config structs are returned.
-func parseSegmentAsConfig(h Helper) ([]ConfigValue, error) {
- var allResults []ConfigValue
-
- for h.Next() {
- // don't allow non-matcher args on the first line
- if h.NextArg() {
- return nil, h.ArgErr()
- }
-
- // slice the linear list of tokens into top-level segments
- var segments []caddyfile.Segment
- for nesting := h.Nesting(); h.NextBlock(nesting); {
- segments = append(segments, h.NextSegment())
- }
-
- // copy existing matcher definitions so we can augment
- // new ones that are defined only in this scope
- matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs))
- for key, val := range h.matcherDefs {
- matcherDefs[key] = val
- }
-
- // find and extract any embedded matcher definitions in this scope
- for i := 0; i < len(segments); i++ {
- seg := segments[i]
- if strings.HasPrefix(seg.Directive(), matcherPrefix) {
- // parse, then add the matcher to matcherDefs
- err := parseMatcherDefinitions(caddyfile.NewDispenser(seg), matcherDefs)
- if err != nil {
- return nil, err
- }
- // remove the matcher segment (consumed), then step back the loop
- segments = append(segments[:i], segments[i+1:]...)
- i--
- }
- }
-
- // with matchers ready to go, evaluate each directive's segment
- for _, seg := range segments {
- dir := seg.Directive()
- dirFunc, ok := registeredDirectives[dir]
- if !ok {
- return nil, h.Errf("unrecognized directive: %s - are you sure your Caddyfile structure (nesting and braces) is correct?", dir)
- }
-
- subHelper := h
- subHelper.Dispenser = caddyfile.NewDispenser(seg)
- subHelper.matcherDefs = matcherDefs
-
- results, err := dirFunc(subHelper)
- if err != nil {
- return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err)
- }
- for _, result := range results {
- result.directive = dir
- allResults = append(allResults, result)
- }
- }
- }
-
- return allResults, nil
-}
-
-// ConfigValue represents a value to be added to the final
-// configuration, or a value to be consulted when building
-// the final configuration.
-type ConfigValue struct {
- // The kind of value this is. As the config is
- // being built, the adapter will look in the
- // "pile" for values belonging to a certain
- // class when it is setting up a certain part
- // of the config. The associated value will be
- // type-asserted and placed accordingly.
- Class string
-
- // The value to be used when building the config.
- // Generally its type is associated with the
- // name of the Class.
- Value interface{}
-
- directive string
-}
-
-func sortRoutes(routes []ConfigValue) {
- dirPositions := make(map[string]int)
- for i, dir := range directiveOrder {
- dirPositions[dir] = i
- }
-
- sort.SliceStable(routes, func(i, j int) bool {
- // if the directives are different, just use the established directive order
- iDir, jDir := routes[i].directive, routes[j].directive
- if iDir != jDir {
- return dirPositions[iDir] < dirPositions[jDir]
- }
-
- // directives are the same; sub-sort by path matcher length if there's
- // only one matcher set and one path (this is a very common case and
- // usually -- but not always -- helpful/expected, oh well; user can
- // always take manual control of order using handler or route blocks)
- iRoute, ok := routes[i].Value.(caddyhttp.Route)
- if !ok {
- return false
- }
- jRoute, ok := routes[j].Value.(caddyhttp.Route)
- if !ok {
- return false
- }
-
- // decode the path matchers, if there is just one of them
- var iPM, jPM caddyhttp.MatchPath
- if len(iRoute.MatcherSetsRaw) == 1 {
- _ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &iPM)
- }
- if len(jRoute.MatcherSetsRaw) == 1 {
- _ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &jPM)
- }
-
- // sort by longer path (more specific) first; missing path
- // matchers or multi-matchers are treated as zero-length paths
- var iPathLen, jPathLen int
- if len(iPM) > 0 {
- iPathLen = len(iPM[0])
- }
- if len(jPM) > 0 {
- jPathLen = len(jPM[0])
- }
-
- // if both directives have no path matcher, use whichever one
- // has any kind of matcher defined first.
- if iPathLen == 0 && jPathLen == 0 {
- return len(iRoute.MatcherSetsRaw) > 0 && len(jRoute.MatcherSetsRaw) == 0
- }
-
- // sort with the most-specific (longest) path first
- return iPathLen > jPathLen
- })
-}
-
-// serverBlock pairs a Caddyfile server block with
-// a "pile" of config values, keyed by class name,
-// as well as its parsed keys for convenience.
-type serverBlock struct {
- block caddyfile.ServerBlock
- pile map[string][]ConfigValue // config values obtained from directives
- keys []Address
-}
-
-// hostsFromKeys returns a list of all the non-empty hostnames found in
-// the keys of the server block sb. If logger mode is false, a key with
-// an empty hostname portion will return an empty slice, since that
-// server block is interpreted to effectively match all hosts. An empty
-// string is never added to the slice.
-//
-// If loggerMode is true, then the non-standard ports of keys will be
-// joined to the hostnames. This is to effectively match the Host
-// header of requests that come in for that key.
-//
-// The resulting slice is not sorted but will never have duplicates.
-func (sb serverBlock) hostsFromKeys(loggerMode bool) []string {
- // ensure each entry in our list is unique
- hostMap := make(map[string]struct{})
- for _, addr := range sb.keys {
- if addr.Host == "" {
- if !loggerMode {
- // server block contains a key like ":443", i.e. the host portion
- // is empty / catch-all, which means to match all hosts
- return []string{}
- }
- // never append an empty string
- continue
- }
- if loggerMode &&
- addr.Port != "" &&
- addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort) &&
- addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort) {
- hostMap[net.JoinHostPort(addr.Host, addr.Port)] = struct{}{}
- } else {
- hostMap[addr.Host] = struct{}{}
- }
- }
-
- // convert map to slice
- sblockHosts := make([]string, 0, len(hostMap))
- for host := range hostMap {
- sblockHosts = append(sblockHosts, host)
- }
-
- return sblockHosts
-}
-
-func (sb serverBlock) hostsFromKeysNotHTTP(httpPort string) []string {
- // ensure each entry in our list is unique
- hostMap := make(map[string]struct{})
- for _, addr := range sb.keys {
- if addr.Host == "" {
- continue
- }
- if addr.Scheme != "http" && addr.Port != httpPort {
- hostMap[addr.Host] = struct{}{}
- }
- }
-
- // convert map to slice
- sblockHosts := make([]string, 0, len(hostMap))
- for host := range hostMap {
- sblockHosts = append(sblockHosts, host)
- }
-
- return sblockHosts
-}
-
-// hasHostCatchAllKey returns true if sb has a key that
-// omits a host portion, i.e. it "catches all" hosts.
-func (sb serverBlock) hasHostCatchAllKey() bool {
- for _, addr := range sb.keys {
- if addr.Host == "" {
- return true
- }
- }
- return false
-}
-
-type (
- // UnmarshalFunc is a function which can unmarshal Caddyfile
- // tokens into zero or more config values using a Helper type.
- // These are passed in a call to RegisterDirective.
- UnmarshalFunc func(h Helper) ([]ConfigValue, error)
-
- // UnmarshalHandlerFunc is like UnmarshalFunc, except the
- // output of the unmarshaling is an HTTP handler. This
- // function does not need to deal with HTTP request matching
- // which is abstracted away. Since writing HTTP handlers
- // with Caddyfile support is very common, this is a more
- // convenient way to add a handler to the chain since a lot
- // of the details common to HTTP handlers are taken care of
- // for you. These are passed to a call to
- // RegisterHandlerDirective.
- UnmarshalHandlerFunc func(h Helper) (caddyhttp.MiddlewareHandler, error)
-
- // UnmarshalGlobalFunc is a function which can unmarshal Caddyfile
- // tokens from a global option. It is passed the tokens to parse and
- // existing value from the previous instance of this global option
- // (if any). It returns the value to associate with this global option.
- UnmarshalGlobalFunc func(d *caddyfile.Dispenser, existingVal interface{}) (interface{}, error)
-)
-
-var registeredDirectives = make(map[string]UnmarshalFunc)
-
-var registeredGlobalOptions = make(map[string]UnmarshalGlobalFunc)
diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/httptype.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/httptype.go
deleted file mode 100644
index e5dafe6a..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/httptype.go
+++ /dev/null
@@ -1,1352 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpcaddyfile
-
-import (
- "encoding/json"
- "fmt"
- "log"
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
- "github.com/caddyserver/caddy/v2/modules/caddypki"
- "github.com/caddyserver/caddy/v2/modules/caddytls"
-)
-
-func init() {
- caddyconfig.RegisterAdapter("caddyfile", caddyfile.Adapter{ServerType: ServerType{}})
-}
-
-// App represents the configuration for a non-standard
-// Caddy app module (e.g. third-party plugin) which was
-// parsed from a global options block.
-type App struct {
- // The JSON key for the app being configured
- Name string
-
- // The raw app config as JSON
- Value json.RawMessage
-}
-
-// ServerType can set up a config from an HTTP Caddyfile.
-type ServerType struct {
-}
-
-// Setup makes a config from the tokens.
-func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
- options map[string]interface{}) (*caddy.Config, []caddyconfig.Warning, error) {
- var warnings []caddyconfig.Warning
- gc := counter{new(int)}
- state := make(map[string]interface{})
-
- // load all the server blocks and associate them with a "pile"
- // of config values; also prohibit duplicate keys because they
- // can make a config confusing if more than one server block is
- // chosen to handle a request - we actually will make each
- // server block's route terminal so that only one will run
- sbKeys := make(map[string]struct{})
- originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks))
- for i, sblock := range inputServerBlocks {
- for j, k := range sblock.Keys {
- if j == 0 && strings.HasPrefix(k, "@") {
- return nil, warnings, fmt.Errorf("cannot define a matcher outside of a site block: '%s'", k)
- }
- if _, ok := sbKeys[k]; ok {
- return nil, warnings, fmt.Errorf("duplicate site address not allowed: '%s' in %v (site block %d, key %d)", k, sblock.Keys, i, j)
- }
- sbKeys[k] = struct{}{}
- }
- originalServerBlocks = append(originalServerBlocks, serverBlock{
- block: sblock,
- pile: make(map[string][]ConfigValue),
- })
- }
-
- // apply any global options
- var err error
- originalServerBlocks, err = st.evaluateGlobalOptionsBlock(originalServerBlocks, options)
- if err != nil {
- return nil, warnings, err
- }
-
- // replace shorthand placeholders (which are
- // convenient when writing a Caddyfile) with
- // their actual placeholder identifiers or
- // variable names
- replacer := strings.NewReplacer(
- "{dir}", "{http.request.uri.path.dir}",
- "{file}", "{http.request.uri.path.file}",
- "{host}", "{http.request.host}",
- "{hostport}", "{http.request.hostport}",
- "{port}", "{http.request.port}",
- "{method}", "{http.request.method}",
- "{path}", "{http.request.uri.path}",
- "{query}", "{http.request.uri.query}",
- "{remote}", "{http.request.remote}",
- "{remote_host}", "{http.request.remote.host}",
- "{remote_port}", "{http.request.remote.port}",
- "{scheme}", "{http.request.scheme}",
- "{uri}", "{http.request.uri}",
- "{tls_cipher}", "{http.request.tls.cipher_suite}",
- "{tls_version}", "{http.request.tls.version}",
- "{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}",
- "{tls_client_issuer}", "{http.request.tls.client.issuer}",
- "{tls_client_serial}", "{http.request.tls.client.serial}",
- "{tls_client_subject}", "{http.request.tls.client.subject}",
- "{tls_client_certificate_pem}", "{http.request.tls.client.certificate_pem}",
- "{upstream_hostport}", "{http.reverse_proxy.upstream.hostport}",
- )
-
- // these are placeholders that allow a user-defined final
- // parameters, but we still want to provide a shorthand
- // for those, so we use a regexp to replace
- regexpReplacements := []struct {
- search *regexp.Regexp
- replace string
- }{
- {regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"},
- {regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"},
- {regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"},
- {regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"},
- {regexp.MustCompile(`{re\.([\w-]*)\.([\w-]*)}`), "{http.regexp.$1.$2}"},
- }
-
- for _, sb := range originalServerBlocks {
- for _, segment := range sb.block.Segments {
- for i := 0; i < len(segment); i++ {
- // simple string replacements
- segment[i].Text = replacer.Replace(segment[i].Text)
- // complex regexp replacements
- for _, r := range regexpReplacements {
- segment[i].Text = r.search.ReplaceAllString(segment[i].Text, r.replace)
- }
- }
- }
-
- if len(sb.block.Keys) == 0 {
- return nil, warnings, fmt.Errorf("server block without any key is global configuration, and if used, it must be first")
- }
-
- // extract matcher definitions
- matcherDefs := make(map[string]caddy.ModuleMap)
- for _, segment := range sb.block.Segments {
- if dir := segment.Directive(); strings.HasPrefix(dir, matcherPrefix) {
- d := sb.block.DispenseDirective(dir)
- err := parseMatcherDefinitions(d, matcherDefs)
- if err != nil {
- return nil, warnings, err
- }
- }
- }
-
- // evaluate each directive ("segment") in this block
- for _, segment := range sb.block.Segments {
- dir := segment.Directive()
-
- if strings.HasPrefix(dir, matcherPrefix) {
- // matcher definitions were pre-processed
- continue
- }
-
- dirFunc, ok := registeredDirectives[dir]
- if !ok {
- tkn := segment[0]
- message := "%s:%d: unrecognized directive: %s"
- if !sb.block.HasBraces {
- message += "\nDid you mean to define a second site? If so, you must use curly braces around each site to separate their configurations."
- }
- return nil, warnings, fmt.Errorf(message, tkn.File, tkn.Line, dir)
- }
-
- h := Helper{
- Dispenser: caddyfile.NewDispenser(segment),
- options: options,
- warnings: &warnings,
- matcherDefs: matcherDefs,
- parentBlock: sb.block,
- groupCounter: gc,
- State: state,
- }
-
- results, err := dirFunc(h)
- if err != nil {
- return nil, warnings, fmt.Errorf("parsing caddyfile tokens for '%s': %v", dir, err)
- }
-
- // As a special case, we want "handle_path" to be sorted
- // at the same level as "handle", so we force them to use
- // the same directive name after their parsing is complete.
- // See https://github.com/caddyserver/caddy/issues/3675#issuecomment-678042377
- if dir == "handle_path" {
- dir = "handle"
- }
-
- for _, result := range results {
- result.directive = dir
- sb.pile[result.Class] = append(sb.pile[result.Class], result)
- }
- }
- }
-
- // map
- sbmap, err := st.mapAddressToServerBlocks(originalServerBlocks, options)
- if err != nil {
- return nil, warnings, err
- }
-
- // reduce
- pairings := st.consolidateAddrMappings(sbmap)
-
- // each pairing of listener addresses to list of server
- // blocks is basically a server definition
- servers, err := st.serversFromPairings(pairings, options, &warnings, gc)
- if err != nil {
- return nil, warnings, err
- }
-
- // now that each server is configured, make the HTTP app
- httpApp := caddyhttp.App{
- HTTPPort: tryInt(options["http_port"], &warnings),
- HTTPSPort: tryInt(options["https_port"], &warnings),
- GracePeriod: tryDuration(options["grace_period"], &warnings),
- Servers: servers,
- }
-
- // then make the TLS app
- tlsApp, warnings, err := st.buildTLSApp(pairings, options, warnings)
- if err != nil {
- return nil, warnings, err
- }
-
- // then make the PKI app
- pkiApp, warnings, err := st.buildPKIApp(pairings, options, warnings)
- if err != nil {
- return nil, warnings, err
- }
-
- // extract any custom logs, and enforce configured levels
- var customLogs []namedCustomLog
- var hasDefaultLog bool
- addCustomLog := func(ncl namedCustomLog) {
- if ncl.name == "" {
- return
- }
- if ncl.name == "default" {
- hasDefaultLog = true
- }
- if _, ok := options["debug"]; ok && ncl.log.Level == "" {
- ncl.log.Level = "DEBUG"
- }
- customLogs = append(customLogs, ncl)
- }
- // Apply global log options, when set
- if options["log"] != nil {
- for _, logValue := range options["log"].([]ConfigValue) {
- addCustomLog(logValue.Value.(namedCustomLog))
- }
- }
- // Apply server-specific log options
- for _, p := range pairings {
- for _, sb := range p.serverBlocks {
- for _, clVal := range sb.pile["custom_log"] {
- addCustomLog(clVal.Value.(namedCustomLog))
- }
- }
- }
-
- if !hasDefaultLog {
- // if the default log was not customized, ensure we
- // configure it with any applicable options
- if _, ok := options["debug"]; ok {
- customLogs = append(customLogs, namedCustomLog{
- name: "default",
- log: &caddy.CustomLog{Level: "DEBUG"},
- })
- }
- }
-
- // annnd the top-level config, then we're done!
- cfg := &caddy.Config{AppsRaw: make(caddy.ModuleMap)}
-
- // loop through the configured options, and if any of
- // them are an httpcaddyfile App, then we insert them
- // into the config as raw Caddy apps
- for _, opt := range options {
- if app, ok := opt.(App); ok {
- cfg.AppsRaw[app.Name] = app.Value
- }
- }
-
- // insert the standard Caddy apps into the config
- if len(httpApp.Servers) > 0 {
- cfg.AppsRaw["http"] = caddyconfig.JSON(httpApp, &warnings)
- }
- if !reflect.DeepEqual(tlsApp, &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}) {
- cfg.AppsRaw["tls"] = caddyconfig.JSON(tlsApp, &warnings)
- }
- if !reflect.DeepEqual(pkiApp, &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}) {
- cfg.AppsRaw["pki"] = caddyconfig.JSON(pkiApp, &warnings)
- }
- if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok {
- cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr,
- "module",
- storageCvtr.(caddy.Module).CaddyModule().ID.Name(),
- &warnings)
- }
- if adminConfig, ok := options["admin"].(*caddy.AdminConfig); ok && adminConfig != nil {
- cfg.Admin = adminConfig
- }
- if len(customLogs) > 0 {
- if cfg.Logging == nil {
- cfg.Logging = &caddy.Logging{
- Logs: make(map[string]*caddy.CustomLog),
- }
- }
- for _, ncl := range customLogs {
- if ncl.name != "" {
- cfg.Logging.Logs[ncl.name] = ncl.log
- }
- // most users seem to prefer not writing access logs
- // to the default log when they are directed to a
- // file or have any other special customization
- if ncl.name != "default" && len(ncl.log.Include) > 0 {
- defaultLog, ok := cfg.Logging.Logs["default"]
- if !ok {
- defaultLog = new(caddy.CustomLog)
- cfg.Logging.Logs["default"] = defaultLog
- }
- defaultLog.Exclude = append(defaultLog.Exclude, ncl.log.Include...)
- }
- }
- }
-
- return cfg, warnings, nil
-}
-
-// evaluateGlobalOptionsBlock evaluates the global options block,
-// which is expected to be the first server block if it has zero
-// keys. It returns the updated list of server blocks with the
-// global options block removed, and updates options accordingly.
-func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options map[string]interface{}) ([]serverBlock, error) {
- if len(serverBlocks) == 0 || len(serverBlocks[0].block.Keys) > 0 {
- return serverBlocks, nil
- }
-
- for _, segment := range serverBlocks[0].block.Segments {
- opt := segment.Directive()
- var val interface{}
- var err error
- disp := caddyfile.NewDispenser(segment)
-
- optFunc, ok := registeredGlobalOptions[opt]
- if !ok {
- tkn := segment[0]
- return nil, fmt.Errorf("%s:%d: unrecognized global option: %s", tkn.File, tkn.Line, opt)
- }
-
- val, err = optFunc(disp, options[opt])
- if err != nil {
- return nil, fmt.Errorf("parsing caddyfile tokens for '%s': %v", opt, err)
- }
-
- // As a special case, fold multiple "servers" options together
- // in an array instead of overwriting a possible existing value
- if opt == "servers" {
- existingOpts, ok := options[opt].([]serverOptions)
- if !ok {
- existingOpts = []serverOptions{}
- }
- serverOpts, ok := val.(serverOptions)
- if !ok {
- return nil, fmt.Errorf("unexpected type from 'servers' global options: %T", val)
- }
- options[opt] = append(existingOpts, serverOpts)
- continue
- }
- // Additionally, fold multiple "log" options together into an
- // array so that multiple loggers can be configured.
- if opt == "log" {
- existingOpts, ok := options[opt].([]ConfigValue)
- if !ok {
- existingOpts = []ConfigValue{}
- }
- logOpts, ok := val.([]ConfigValue)
- if !ok {
- return nil, fmt.Errorf("unexpected type from 'log' global options: %T", val)
- }
- options[opt] = append(existingOpts, logOpts...)
- continue
- }
-
- options[opt] = val
- }
-
- // If we got "servers" options, we'll sort them by their listener address
- if serverOpts, ok := options["servers"].([]serverOptions); ok {
- sort.Slice(serverOpts, func(i, j int) bool {
- return len(serverOpts[i].ListenerAddress) > len(serverOpts[j].ListenerAddress)
- })
-
- // Reject the config if there are duplicate listener address
- seen := make(map[string]bool)
- for _, entry := range serverOpts {
- if _, alreadySeen := seen[entry.ListenerAddress]; alreadySeen {
- return nil, fmt.Errorf("cannot have 'servers' global options with duplicate listener addresses: %s", entry.ListenerAddress)
- }
- seen[entry.ListenerAddress] = true
- }
- }
-
- return serverBlocks[1:], nil
-}
-
-// serversFromPairings creates the servers for each pairing of addresses
-// to server blocks. Each pairing is essentially a server definition.
-func (st *ServerType) serversFromPairings(
- pairings []sbAddrAssociation,
- options map[string]interface{},
- warnings *[]caddyconfig.Warning,
- groupCounter counter,
-) (map[string]*caddyhttp.Server, error) {
- servers := make(map[string]*caddyhttp.Server)
- defaultSNI := tryString(options["default_sni"], warnings)
-
- httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
- if hp, ok := options["http_port"].(int); ok {
- httpPort = strconv.Itoa(hp)
- }
- httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort)
- if hsp, ok := options["https_port"].(int); ok {
- httpsPort = strconv.Itoa(hsp)
- }
- autoHTTPS := "on"
- if ah, ok := options["auto_https"].(string); ok {
- autoHTTPS = ah
- }
-
- for i, p := range pairings {
- srv := &caddyhttp.Server{
- Listen: p.addresses,
- }
-
- // handle the auto_https global option
- if autoHTTPS != "on" {
- srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
- if autoHTTPS == "off" {
- srv.AutoHTTPS.Disabled = true
- }
- if autoHTTPS == "disable_redirects" {
- srv.AutoHTTPS.DisableRedir = true
- }
- if autoHTTPS == "ignore_loaded_certs" {
- srv.AutoHTTPS.IgnoreLoadedCerts = true
- }
- }
-
- // sort server blocks by their keys; this is important because
- // only the first matching site should be evaluated, and we should
- // attempt to match most specific site first (host and path), in
- // case their matchers overlap; we do this somewhat naively by
- // descending sort by length of host then path
- sort.SliceStable(p.serverBlocks, func(i, j int) bool {
- // TODO: we could pre-process the specificities for efficiency,
- // but I don't expect many blocks will have THAT many keys...
- var iLongestPath, jLongestPath string
- var iLongestHost, jLongestHost string
- var iWildcardHost, jWildcardHost bool
- for _, addr := range p.serverBlocks[i].keys {
- if strings.Contains(addr.Host, "*") || addr.Host == "" {
- iWildcardHost = true
- }
- if specificity(addr.Host) > specificity(iLongestHost) {
- iLongestHost = addr.Host
- }
- if specificity(addr.Path) > specificity(iLongestPath) {
- iLongestPath = addr.Path
- }
- }
- for _, addr := range p.serverBlocks[j].keys {
- if strings.Contains(addr.Host, "*") || addr.Host == "" {
- jWildcardHost = true
- }
- if specificity(addr.Host) > specificity(jLongestHost) {
- jLongestHost = addr.Host
- }
- if specificity(addr.Path) > specificity(jLongestPath) {
- jLongestPath = addr.Path
- }
- }
- // catch-all blocks (blocks with no hostname) should always go
- // last, even after blocks with wildcard hosts
- if specificity(iLongestHost) == 0 {
- return false
- }
- if specificity(jLongestHost) == 0 {
- return true
- }
- if iWildcardHost != jWildcardHost {
- // site blocks that have a key with a wildcard in the hostname
- // must always be less specific than blocks without one; see
- // https://github.com/caddyserver/caddy/issues/3410
- return jWildcardHost && !iWildcardHost
- }
- if specificity(iLongestHost) == specificity(jLongestHost) {
- return len(iLongestPath) > len(jLongestPath)
- }
- return specificity(iLongestHost) > specificity(jLongestHost)
- })
-
- var hasCatchAllTLSConnPolicy, addressQualifiesForTLS bool
- autoHTTPSWillAddConnPolicy := autoHTTPS != "off"
-
- // if a catch-all server block (one which accepts all hostnames) exists in this pairing,
- // we need to know that so that we can configure logs properly (see #3878)
- var catchAllSblockExists bool
- for _, sblock := range p.serverBlocks {
- if len(sblock.hostsFromKeys(false)) == 0 {
- catchAllSblockExists = true
- }
- }
-
- // if needed, the ServerLogConfig is initialized beforehand so
- // that all server blocks can populate it with data, even when not
- // coming with a log directive
- for _, sblock := range p.serverBlocks {
- if len(sblock.pile["custom_log"]) != 0 {
- srv.Logs = new(caddyhttp.ServerLogConfig)
- break
- }
- }
-
- // create a subroute for each site in the server block
- for _, sblock := range p.serverBlocks {
- matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock)
- if err != nil {
- return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.block.Keys, err)
- }
-
- hosts := sblock.hostsFromKeys(false)
-
- // emit warnings if user put unspecified IP addresses; they probably want the bind directive
- for _, h := range hosts {
- if h == "0.0.0.0" || h == "::" {
- log.Printf("[WARNING] Site block has unspecified IP address %s which only matches requests having that Host header; you probably want the 'bind' directive to configure the socket", h)
- }
- }
-
- // tls: connection policies
- if cpVals, ok := sblock.pile["tls.connection_policy"]; ok {
- // tls connection policies
- for _, cpVal := range cpVals {
- cp := cpVal.Value.(*caddytls.ConnectionPolicy)
-
- // make sure the policy covers all hostnames from the block
- for _, h := range hosts {
- if h == defaultSNI {
- hosts = append(hosts, "")
- cp.DefaultSNI = defaultSNI
- break
- }
- }
-
- if len(hosts) > 0 {
- cp.MatchersRaw = caddy.ModuleMap{
- "sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
- }
- } else {
- cp.DefaultSNI = defaultSNI
- }
-
- // only append this policy if it actually changes something
- if !cp.SettingsEmpty() {
- srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
- hasCatchAllTLSConnPolicy = len(hosts) == 0
- }
- }
- }
-
- for _, addr := range sblock.keys {
- // if server only uses HTTPS port, auto-HTTPS will not apply
- if listenersUseAnyPortOtherThan(srv.Listen, httpPort) {
- // exclude any hosts that were defined explicitly with "http://"
- // in the key from automated cert management (issue #2998)
- if addr.Scheme == "http" && addr.Host != "" {
- if srv.AutoHTTPS == nil {
- srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
- }
- if !sliceContains(srv.AutoHTTPS.Skip, addr.Host) {
- srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, addr.Host)
- }
- }
- }
-
- // we'll need to remember if the address qualifies for auto-HTTPS, so we
- // can add a TLS conn policy if necessary
- if addr.Scheme == "https" ||
- (addr.Scheme != "http" && addr.Host != "" && addr.Port != httpPort) {
- addressQualifiesForTLS = true
- }
- // predict whether auto-HTTPS will add the conn policy for us; if so, we
- // may not need to add one for this server
- autoHTTPSWillAddConnPolicy = autoHTTPSWillAddConnPolicy &&
- (addr.Port == httpsPort || (addr.Port != httpPort && addr.Host != ""))
- }
-
- // Look for any config values that provide listener wrappers on the server block
- for _, listenerConfig := range sblock.pile["listener_wrapper"] {
- listenerWrapper, ok := listenerConfig.Value.(caddy.ListenerWrapper)
- if !ok {
- return nil, fmt.Errorf("config for a listener wrapper did not provide a value that implements caddy.ListenerWrapper")
- }
- jsonListenerWrapper := caddyconfig.JSONModuleObject(
- listenerWrapper,
- "wrapper",
- listenerWrapper.(caddy.Module).CaddyModule().ID.Name(),
- warnings)
- srv.ListenerWrappersRaw = append(srv.ListenerWrappersRaw, jsonListenerWrapper)
- }
-
- // set up each handler directive, making sure to honor directive order
- dirRoutes := sblock.pile["route"]
- siteSubroute, err := buildSubroute(dirRoutes, groupCounter)
- if err != nil {
- return nil, err
- }
-
- // add the site block's route(s) to the server
- srv.Routes = appendSubrouteToRouteList(srv.Routes, siteSubroute, matcherSetsEnc, p, warnings)
-
- // if error routes are defined, add those too
- if errorSubrouteVals, ok := sblock.pile["error_route"]; ok {
- if srv.Errors == nil {
- srv.Errors = new(caddyhttp.HTTPErrorConfig)
- }
- for _, val := range errorSubrouteVals {
- sr := val.Value.(*caddyhttp.Subroute)
- srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, sr, matcherSetsEnc, p, warnings)
- }
- }
-
- // add log associations
- // see https://github.com/caddyserver/caddy/issues/3310
- sblockLogHosts := sblock.hostsFromKeys(true)
- for _, cval := range sblock.pile["custom_log"] {
- ncl := cval.Value.(namedCustomLog)
- if sblock.hasHostCatchAllKey() {
- // all requests for hosts not able to be listed should use
- // this log because it's a catch-all-hosts server block
- srv.Logs.DefaultLoggerName = ncl.name
- } else {
- // map each host to the user's desired logger name
- for _, h := range sblockLogHosts {
- // if the custom logger name is non-empty, add it to the map;
- // otherwise, only map to an empty logger name if this or
- // another site block on this server has a catch-all host (in
- // which case only requests with mapped hostnames will be
- // access-logged, so it'll be necessary to add them to the
- // map even if they use default logger)
- if ncl.name != "" || catchAllSblockExists {
- if srv.Logs.LoggerNames == nil {
- srv.Logs.LoggerNames = make(map[string]string)
- }
- srv.Logs.LoggerNames[h] = ncl.name
- }
- }
- }
- }
- if srv.Logs != nil && len(sblock.pile["custom_log"]) == 0 {
- // server has access logs enabled, but this server block does not
- // enable access logs; therefore, all hosts of this server block
- // should not be access-logged
- if len(hosts) == 0 {
- // if the server block has a catch-all-hosts key, then we should
- // not log reqs to any host unless it appears in the map
- srv.Logs.SkipUnmappedHosts = true
- }
- srv.Logs.SkipHosts = append(srv.Logs.SkipHosts, sblockLogHosts...)
- }
- }
-
- // a server cannot (natively) serve both HTTP and HTTPS at the
- // same time, so make sure the configuration isn't in conflict
- err := detectConflictingSchemes(srv, p.serverBlocks, options)
- if err != nil {
- return nil, err
- }
-
- // a catch-all TLS conn policy is necessary to ensure TLS can
- // be offered to all hostnames of the server; even though only
- // one policy is needed to enable TLS for the server, that
- // policy might apply to only certain TLS handshakes; but when
- // using the Caddyfile, user would expect all handshakes to at
- // least have a matching connection policy, so here we append a
- // catch-all/default policy if there isn't one already (it's
- // important that it goes at the end) - see issue #3004:
- // https://github.com/caddyserver/caddy/issues/3004
- // TODO: maybe a smarter way to handle this might be to just make the
- // auto-HTTPS logic at provision-time detect if there is any connection
- // policy missing for any HTTPS-enabled hosts, if so, add it... maybe?
- if addressQualifiesForTLS &&
- !hasCatchAllTLSConnPolicy &&
- (len(srv.TLSConnPolicies) > 0 || !autoHTTPSWillAddConnPolicy || defaultSNI != "") {
- srv.TLSConnPolicies = append(srv.TLSConnPolicies, &caddytls.ConnectionPolicy{DefaultSNI: defaultSNI})
- }
-
- // tidy things up a bit
- srv.TLSConnPolicies, err = consolidateConnPolicies(srv.TLSConnPolicies)
- if err != nil {
- return nil, fmt.Errorf("consolidating TLS connection policies for server %d: %v", i, err)
- }
- srv.Routes = consolidateRoutes(srv.Routes)
-
- servers[fmt.Sprintf("srv%d", i)] = srv
- }
-
- err := applyServerOptions(servers, options, warnings)
- if err != nil {
- return nil, err
- }
-
- return servers, nil
-}
-
-func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock, options map[string]interface{}) error {
- httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
- if hp, ok := options["http_port"].(int); ok {
- httpPort = strconv.Itoa(hp)
- }
- httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort)
- if hsp, ok := options["https_port"].(int); ok {
- httpsPort = strconv.Itoa(hsp)
- }
-
- var httpOrHTTPS string
- checkAndSetHTTP := func(addr Address) error {
- if httpOrHTTPS == "HTTPS" {
- errMsg := fmt.Errorf("server listening on %v is configured for HTTPS and cannot natively multiplex HTTP and HTTPS: %s",
- srv.Listen, addr.Original)
- if addr.Scheme == "" && addr.Host == "" {
- errMsg = fmt.Errorf("%s (try specifying https:// in the address)", errMsg)
- }
- return errMsg
- }
- if len(srv.TLSConnPolicies) > 0 {
- // any connection policies created for an HTTP server
- // is a logical conflict, as it would enable HTTPS
- return fmt.Errorf("server listening on %v is HTTP, but attempts to configure TLS connection policies", srv.Listen)
- }
- httpOrHTTPS = "HTTP"
- return nil
- }
- checkAndSetHTTPS := func(addr Address) error {
- if httpOrHTTPS == "HTTP" {
- return fmt.Errorf("server listening on %v is configured for HTTP and cannot natively multiplex HTTP and HTTPS: %s",
- srv.Listen, addr.Original)
- }
- httpOrHTTPS = "HTTPS"
- return nil
- }
-
- for _, sblock := range serverBlocks {
- for _, addr := range sblock.keys {
- if addr.Scheme == "http" || addr.Port == httpPort {
- if err := checkAndSetHTTP(addr); err != nil {
- return err
- }
- } else if addr.Scheme == "https" || addr.Port == httpsPort || len(srv.TLSConnPolicies) > 0 {
- if err := checkAndSetHTTPS(addr); err != nil {
- return err
- }
- } else if addr.Host == "" {
- if err := checkAndSetHTTP(addr); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
-
-// consolidateConnPolicies sorts any catch-all policy to the end, removes empty TLS connection
-// policies, and combines equivalent ones for a cleaner overall output.
-func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.ConnectionPolicies, error) {
- // catch-all policies (those without any matcher) should be at the
- // end, otherwise it nullifies any more specific policies
- sort.SliceStable(cps, func(i, j int) bool {
- return cps[j].MatchersRaw == nil && cps[i].MatchersRaw != nil
- })
-
- for i := 0; i < len(cps); i++ {
- // compare it to the others
- for j := 0; j < len(cps); j++ {
- if j == i {
- continue
- }
-
- // if they're exactly equal in every way, just keep one of them
- if reflect.DeepEqual(cps[i], cps[j]) {
- cps = append(cps[:j], cps[j+1:]...)
- i--
- break
- }
-
- // if they have the same matcher, try to reconcile each field: either they must
- // be identical, or we have to be able to combine them safely
- if reflect.DeepEqual(cps[i].MatchersRaw, cps[j].MatchersRaw) {
- if len(cps[i].ALPN) > 0 &&
- len(cps[j].ALPN) > 0 &&
- !reflect.DeepEqual(cps[i].ALPN, cps[j].ALPN) {
- return nil, fmt.Errorf("two policies with same match criteria have conflicting ALPN: %v vs. %v",
- cps[i].ALPN, cps[j].ALPN)
- }
- if len(cps[i].CipherSuites) > 0 &&
- len(cps[j].CipherSuites) > 0 &&
- !reflect.DeepEqual(cps[i].CipherSuites, cps[j].CipherSuites) {
- return nil, fmt.Errorf("two policies with same match criteria have conflicting cipher suites: %v vs. %v",
- cps[i].CipherSuites, cps[j].CipherSuites)
- }
- if cps[i].ClientAuthentication == nil &&
- cps[j].ClientAuthentication != nil &&
- !reflect.DeepEqual(cps[i].ClientAuthentication, cps[j].ClientAuthentication) {
- return nil, fmt.Errorf("two policies with same match criteria have conflicting client auth configuration: %+v vs. %+v",
- cps[i].ClientAuthentication, cps[j].ClientAuthentication)
- }
- if len(cps[i].Curves) > 0 &&
- len(cps[j].Curves) > 0 &&
- !reflect.DeepEqual(cps[i].Curves, cps[j].Curves) {
- return nil, fmt.Errorf("two policies with same match criteria have conflicting curves: %v vs. %v",
- cps[i].Curves, cps[j].Curves)
- }
- if cps[i].DefaultSNI != "" &&
- cps[j].DefaultSNI != "" &&
- cps[i].DefaultSNI != cps[j].DefaultSNI {
- return nil, fmt.Errorf("two policies with same match criteria have conflicting default SNI: %s vs. %s",
- cps[i].DefaultSNI, cps[j].DefaultSNI)
- }
- if cps[i].ProtocolMin != "" &&
- cps[j].ProtocolMin != "" &&
- cps[i].ProtocolMin != cps[j].ProtocolMin {
- return nil, fmt.Errorf("two policies with same match criteria have conflicting min protocol: %s vs. %s",
- cps[i].ProtocolMin, cps[j].ProtocolMin)
- }
- if cps[i].ProtocolMax != "" &&
- cps[j].ProtocolMax != "" &&
- cps[i].ProtocolMax != cps[j].ProtocolMax {
- return nil, fmt.Errorf("two policies with same match criteria have conflicting max protocol: %s vs. %s",
- cps[i].ProtocolMax, cps[j].ProtocolMax)
- }
- if cps[i].CertSelection != nil && cps[j].CertSelection != nil {
- // merging fields other than AnyTag is not implemented
- if !reflect.DeepEqual(cps[i].CertSelection.SerialNumber, cps[j].CertSelection.SerialNumber) ||
- !reflect.DeepEqual(cps[i].CertSelection.SubjectOrganization, cps[j].CertSelection.SubjectOrganization) ||
- cps[i].CertSelection.PublicKeyAlgorithm != cps[j].CertSelection.PublicKeyAlgorithm ||
- !reflect.DeepEqual(cps[i].CertSelection.AllTags, cps[j].CertSelection.AllTags) {
- return nil, fmt.Errorf("two policies with same match criteria have conflicting cert selections: %+v vs. %+v",
- cps[i].CertSelection, cps[j].CertSelection)
- }
- }
-
- // by now we've decided that we can merge the two -- we'll keep i and drop j
-
- if len(cps[i].ALPN) == 0 && len(cps[j].ALPN) > 0 {
- cps[i].ALPN = cps[j].ALPN
- }
- if len(cps[i].CipherSuites) == 0 && len(cps[j].CipherSuites) > 0 {
- cps[i].CipherSuites = cps[j].CipherSuites
- }
- if cps[i].ClientAuthentication == nil && cps[j].ClientAuthentication != nil {
- cps[i].ClientAuthentication = cps[j].ClientAuthentication
- }
- if len(cps[i].Curves) == 0 && len(cps[j].Curves) > 0 {
- cps[i].Curves = cps[j].Curves
- }
- if cps[i].DefaultSNI == "" && cps[j].DefaultSNI != "" {
- cps[i].DefaultSNI = cps[j].DefaultSNI
- }
- if cps[i].ProtocolMin == "" && cps[j].ProtocolMin != "" {
- cps[i].ProtocolMin = cps[j].ProtocolMin
- }
- if cps[i].ProtocolMax == "" && cps[j].ProtocolMax != "" {
- cps[i].ProtocolMax = cps[j].ProtocolMax
- }
-
- if cps[i].CertSelection == nil && cps[j].CertSelection != nil {
- // if j is the only one with a policy, move it over to i
- cps[i].CertSelection = cps[j].CertSelection
- } else if cps[i].CertSelection != nil && cps[j].CertSelection != nil {
- // if both have one, then combine AnyTag
- for _, tag := range cps[j].CertSelection.AnyTag {
- if !sliceContains(cps[i].CertSelection.AnyTag, tag) {
- cps[i].CertSelection.AnyTag = append(cps[i].CertSelection.AnyTag, tag)
- }
- }
- }
-
- cps = append(cps[:j], cps[j+1:]...)
- i--
- break
- }
- }
- }
- return cps, nil
-}
-
-// appendSubrouteToRouteList appends the routes in subroute
-// to the routeList, optionally qualified by matchers.
-func appendSubrouteToRouteList(routeList caddyhttp.RouteList,
- subroute *caddyhttp.Subroute,
- matcherSetsEnc []caddy.ModuleMap,
- p sbAddrAssociation,
- warnings *[]caddyconfig.Warning) caddyhttp.RouteList {
-
- // nothing to do if... there's nothing to do
- if len(matcherSetsEnc) == 0 && len(subroute.Routes) == 0 && subroute.Errors == nil {
- return routeList
- }
-
- if len(matcherSetsEnc) == 0 && len(p.serverBlocks) == 1 {
- // no need to wrap the handlers in a subroute if this is
- // the only server block and there is no matcher for it
- routeList = append(routeList, subroute.Routes...)
- } else {
- route := caddyhttp.Route{
- // the semantics of a site block in the Caddyfile dictate
- // that only the first matching one is evaluated, since
- // site blocks do not cascade nor inherit
- Terminal: true,
- }
- if len(matcherSetsEnc) > 0 {
- route.MatcherSetsRaw = matcherSetsEnc
- }
- if len(subroute.Routes) > 0 || subroute.Errors != nil {
- route.HandlersRaw = []json.RawMessage{
- caddyconfig.JSONModuleObject(subroute, "handler", "subroute", warnings),
- }
- }
- if len(route.MatcherSetsRaw) > 0 || len(route.HandlersRaw) > 0 {
- routeList = append(routeList, route)
- }
- }
- return routeList
-}
-
-// buildSubroute turns the config values, which are expected to be routes
-// into a clean and orderly subroute that has all the routes within it.
-func buildSubroute(routes []ConfigValue, groupCounter counter) (*caddyhttp.Subroute, error) {
- for _, val := range routes {
- if !directiveIsOrdered(val.directive) {
- return nil, fmt.Errorf("directive '%s' is not ordered, so it cannot be used here", val.directive)
- }
- }
-
- sortRoutes(routes)
-
- subroute := new(caddyhttp.Subroute)
-
- // some directives are mutually exclusive (only first matching
- // instance should be evaluated); this is done by putting their
- // routes in the same group
- mutuallyExclusiveDirs := map[string]*struct {
- count int
- groupName string
- }{
- // as a special case, group rewrite directives so that they are mutually exclusive;
- // this means that only the first matching rewrite will be evaluated, and that's
- // probably a good thing, since there should never be a need to do more than one
- // rewrite (I think?), and cascading rewrites smell bad... imagine these rewrites:
- // rewrite /docs/json/* /docs/json/index.html
- // rewrite /docs/* /docs/index.html
- // (We use this on the Caddy website, or at least we did once.) The first rewrite's
- // result is also matched by the second rewrite, making the first rewrite pointless.
- // See issue #2959.
- "rewrite": {},
-
- // handle blocks are also mutually exclusive by definition
- "handle": {},
-
- // root just sets a variable, so if it was not mutually exclusive, intersecting
- // root directives would overwrite previously-matched ones; they should not cascade
- "root": {},
- }
-
- // we need to deterministically loop over each of these directives
- // in order to keep the group numbers consistent
- keys := make([]string, 0, len(mutuallyExclusiveDirs))
- for k := range mutuallyExclusiveDirs {
- keys = append(keys, k)
- }
- sort.Strings(keys)
-
- for _, meDir := range keys {
- info := mutuallyExclusiveDirs[meDir]
-
- // see how many instances of the directive there are
- for _, r := range routes {
- if r.directive == meDir {
- info.count++
- if info.count > 1 {
- break
- }
- }
- }
- // if there is more than one, put them in a group
- // (special case: "rewrite" directive must always be in
- // its own group--even if there is only one--because we
- // do not want a rewrite to be consolidated into other
- // adjacent routes that happen to have the same matcher,
- // see caddyserver/caddy#3108 - because the implied
- // intent of rewrite is to do an internal redirect,
- // we can't assume that the request will continue to
- // match the same matcher; anyway, giving a route a
- // unique group name should keep it from consolidating)
- if info.count > 1 || meDir == "rewrite" {
- info.groupName = groupCounter.nextGroup()
- }
- }
-
- // add all the routes piled in from directives
- for _, r := range routes {
- // put this route into a group if it is mutually exclusive
- if info, ok := mutuallyExclusiveDirs[r.directive]; ok {
- route := r.Value.(caddyhttp.Route)
- route.Group = info.groupName
- r.Value = route
- }
-
- switch route := r.Value.(type) {
- case caddyhttp.Subroute:
- // if a route-class config value is actually a Subroute handler
- // with nothing but a list of routes, then it is the intention
- // of the directive to keep these handlers together and in this
- // same order, but not necessarily in a subroute (if it wanted
- // to keep them in a subroute, the directive would have returned
- // a route with a Subroute as its handler); this is useful to
- // keep multiple handlers/routes together and in the same order
- // so that the sorting procedure we did above doesn't reorder them
- if route.Errors != nil {
- // if error handlers are also set, this is confusing; it's
- // probably supposed to be wrapped in a Route and encoded
- // as a regular handler route... programmer error.
- panic("found subroute with more than just routes; perhaps it should have been wrapped in a route?")
- }
- subroute.Routes = append(subroute.Routes, route.Routes...)
- case caddyhttp.Route:
- subroute.Routes = append(subroute.Routes, route)
- }
- }
-
- subroute.Routes = consolidateRoutes(subroute.Routes)
-
- return subroute, nil
-}
-
-// consolidateRoutes combines routes with the same properties
-// (same matchers, same Terminal and Group settings) for a
-// cleaner overall output.
-func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList {
- for i := 0; i < len(routes)-1; i++ {
- if reflect.DeepEqual(routes[i].MatcherSetsRaw, routes[i+1].MatcherSetsRaw) &&
- routes[i].Terminal == routes[i+1].Terminal &&
- routes[i].Group == routes[i+1].Group {
- // keep the handlers in the same order, then splice out repetitive route
- routes[i].HandlersRaw = append(routes[i].HandlersRaw, routes[i+1].HandlersRaw...)
- routes = append(routes[:i+1], routes[i+2:]...)
- i--
- }
- }
- return routes
-}
-
-func matcherSetFromMatcherToken(
- tkn caddyfile.Token,
- matcherDefs map[string]caddy.ModuleMap,
- warnings *[]caddyconfig.Warning,
-) (caddy.ModuleMap, bool, error) {
- // matcher tokens can be wildcards, simple path matchers,
- // or refer to a pre-defined matcher by some name
- if tkn.Text == "*" {
- // match all requests == no matchers, so nothing to do
- return nil, true, nil
- } else if strings.HasPrefix(tkn.Text, "/") {
- // convenient way to specify a single path match
- return caddy.ModuleMap{
- "path": caddyconfig.JSON(caddyhttp.MatchPath{tkn.Text}, warnings),
- }, true, nil
- } else if strings.HasPrefix(tkn.Text, matcherPrefix) {
- // pre-defined matcher
- m, ok := matcherDefs[tkn.Text]
- if !ok {
- return nil, false, fmt.Errorf("unrecognized matcher name: %+v", tkn.Text)
- }
- return m, true, nil
- }
- return nil, false, nil
-}
-
-func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.ModuleMap, error) {
- type hostPathPair struct {
- hostm caddyhttp.MatchHost
- pathm caddyhttp.MatchPath
- }
-
- // keep routes with common host and path matchers together
- var matcherPairs []*hostPathPair
-
- var catchAllHosts bool
- for _, addr := range sblock.keys {
- // choose a matcher pair that should be shared by this
- // server block; if none exists yet, create one
- var chosenMatcherPair *hostPathPair
- for _, mp := range matcherPairs {
- if (len(mp.pathm) == 0 && addr.Path == "") ||
- (len(mp.pathm) == 1 && mp.pathm[0] == addr.Path) {
- chosenMatcherPair = mp
- break
- }
- }
- if chosenMatcherPair == nil {
- chosenMatcherPair = new(hostPathPair)
- if addr.Path != "" {
- chosenMatcherPair.pathm = []string{addr.Path}
- }
- matcherPairs = append(matcherPairs, chosenMatcherPair)
- }
-
- // if one of the keys has no host (i.e. is a catch-all for
- // any hostname), then we need to null out the host matcher
- // entirely so that it matches all hosts
- if addr.Host == "" && !catchAllHosts {
- chosenMatcherPair.hostm = nil
- catchAllHosts = true
- }
- if catchAllHosts {
- continue
- }
-
- // add this server block's keys to the matcher
- // pair if it doesn't already exist
- if addr.Host != "" {
- var found bool
- for _, h := range chosenMatcherPair.hostm {
- if h == addr.Host {
- found = true
- break
- }
- }
- if !found {
- chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host)
- }
- }
- }
-
- // iterate each pairing of host and path matchers and
- // put them into a map for JSON encoding
- var matcherSets []map[string]caddyhttp.RequestMatcher
- for _, mp := range matcherPairs {
- matcherSet := make(map[string]caddyhttp.RequestMatcher)
- if len(mp.hostm) > 0 {
- matcherSet["host"] = mp.hostm
- }
- if len(mp.pathm) > 0 {
- matcherSet["path"] = mp.pathm
- }
- if len(matcherSet) > 0 {
- matcherSets = append(matcherSets, matcherSet)
- }
- }
-
- // finally, encode each of the matcher sets
- matcherSetsEnc := make([]caddy.ModuleMap, 0, len(matcherSets))
- for _, ms := range matcherSets {
- msEncoded, err := encodeMatcherSet(ms)
- if err != nil {
- return nil, fmt.Errorf("server block %v: %v", sblock.block.Keys, err)
- }
- matcherSetsEnc = append(matcherSetsEnc, msEncoded)
- }
-
- return matcherSetsEnc, nil
-}
-
-func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error {
- for d.Next() {
- definitionName := d.Val()
-
- if _, ok := matchers[definitionName]; ok {
- return fmt.Errorf("matcher is defined more than once: %s", definitionName)
- }
- matchers[definitionName] = make(caddy.ModuleMap)
-
- // in case there are multiple instances of the same matcher, concatenate
- // their tokens (we expect that UnmarshalCaddyfile should be able to
- // handle more than one segment); otherwise, we'd overwrite other
- // instances of the matcher in this set
- tokensByMatcherName := make(map[string][]caddyfile.Token)
- for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
- matcherName := d.Val()
- tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
- }
- for matcherName, tokens := range tokensByMatcherName {
- mod, err := caddy.GetModule("http.matchers." + matcherName)
- if err != nil {
- return fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
- }
- unm, ok := mod.New().(caddyfile.Unmarshaler)
- if !ok {
- return fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
- }
- err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
- if err != nil {
- return err
- }
- rm, ok := unm.(caddyhttp.RequestMatcher)
- if !ok {
- return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
- }
- matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
- }
- }
- return nil
-}
-
-func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (caddy.ModuleMap, error) {
- msEncoded := make(caddy.ModuleMap)
- for matcherName, val := range matchers {
- jsonBytes, err := json.Marshal(val)
- if err != nil {
- return nil, fmt.Errorf("marshaling matcher set %#v: %v", matchers, err)
- }
- msEncoded[matcherName] = jsonBytes
- }
- return msEncoded, nil
-}
-
-// tryInt tries to convert val to an integer. If it fails,
-// it downgrades the error to a warning and returns 0.
-func tryInt(val interface{}, warnings *[]caddyconfig.Warning) int {
- intVal, ok := val.(int)
- if val != nil && !ok && warnings != nil {
- *warnings = append(*warnings, caddyconfig.Warning{Message: "not an integer type"})
- }
- return intVal
-}
-
-func tryString(val interface{}, warnings *[]caddyconfig.Warning) string {
- stringVal, ok := val.(string)
- if val != nil && !ok && warnings != nil {
- *warnings = append(*warnings, caddyconfig.Warning{Message: "not a string type"})
- }
- return stringVal
-}
-
-func tryDuration(val interface{}, warnings *[]caddyconfig.Warning) caddy.Duration {
- durationVal, ok := val.(caddy.Duration)
- if val != nil && !ok && warnings != nil {
- *warnings = append(*warnings, caddyconfig.Warning{Message: "not a duration type"})
- }
- return durationVal
-}
-
-// sliceContains returns true if needle is in haystack.
-func sliceContains(haystack []string, needle string) bool {
- for _, s := range haystack {
- if s == needle {
- return true
- }
- }
- return false
-}
-
-// listenersUseAnyPortOtherThan returns true if there are any
-// listeners in addresses that use a port which is not otherPort.
-// Mostly borrowed from unexported method in caddyhttp package.
-func listenersUseAnyPortOtherThan(addresses []string, otherPort string) bool {
- otherPortInt, err := strconv.Atoi(otherPort)
- if err != nil {
- return false
- }
- for _, lnAddr := range addresses {
- laddrs, err := caddy.ParseNetworkAddress(lnAddr)
- if err != nil {
- continue
- }
- if uint(otherPortInt) > laddrs.EndPort || uint(otherPortInt) < laddrs.StartPort {
- return true
- }
- }
- return false
-}
-
-// specificity returns len(s) minus any wildcards (*) and
-// placeholders ({...}). Basically, it's a length count
-// that penalizes the use of wildcards and placeholders.
-// This is useful for comparing hostnames and paths.
-// However, wildcards in paths are not a sure answer to
-// the question of specificity. For example,
-// '*.example.com' is clearly less specific than
-// 'a.example.com', but is '/a' more or less specific
-// than '/a*'?
-func specificity(s string) int {
- l := len(s) - strings.Count(s, "*")
- for len(s) > 0 {
- start := strings.Index(s, "{")
- if start < 0 {
- return l
- }
- end := strings.Index(s[start:], "}") + start + 1
- if end <= start {
- return l
- }
- l -= end - start
- s = s[end:]
- }
- return l
-}
-
-type counter struct {
- n *int
-}
-
-func (c counter) nextGroup() string {
- name := fmt.Sprintf("group%d", *c.n)
- *c.n++
- return name
-}
-
-type namedCustomLog struct {
- name string
- log *caddy.CustomLog
-}
-
-// sbAddrAssociation is a mapping from a list of
-// addresses to a list of server blocks that are
-// served on those addresses.
-type sbAddrAssociation struct {
- addresses []string
- serverBlocks []serverBlock
-}
-
-const matcherPrefix = "@"
-
-// Interface guard
-var _ caddyfile.ServerType = (*ServerType)(nil)
diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/options.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/options.go
deleted file mode 100644
index f693110e..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/options.go
+++ /dev/null
@@ -1,460 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpcaddyfile
-
-import (
- "strconv"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/caddyserver/caddy/v2/modules/caddytls"
- "github.com/caddyserver/certmagic"
- "github.com/mholt/acmez/acme"
-)
-
-func init() {
- RegisterGlobalOption("debug", parseOptTrue)
- RegisterGlobalOption("http_port", parseOptHTTPPort)
- RegisterGlobalOption("https_port", parseOptHTTPSPort)
- RegisterGlobalOption("grace_period", parseOptDuration)
- RegisterGlobalOption("default_sni", parseOptSingleString)
- RegisterGlobalOption("order", parseOptOrder)
- RegisterGlobalOption("storage", parseOptStorage)
- RegisterGlobalOption("storage_clean_interval", parseOptDuration)
- RegisterGlobalOption("acme_ca", parseOptSingleString)
- RegisterGlobalOption("acme_ca_root", parseOptSingleString)
- RegisterGlobalOption("acme_dns", parseOptACMEDNS)
- RegisterGlobalOption("acme_eab", parseOptACMEEAB)
- RegisterGlobalOption("cert_issuer", parseOptCertIssuer)
- RegisterGlobalOption("skip_install_trust", parseOptTrue)
- RegisterGlobalOption("email", parseOptSingleString)
- RegisterGlobalOption("admin", parseOptAdmin)
- RegisterGlobalOption("on_demand_tls", parseOptOnDemand)
- RegisterGlobalOption("local_certs", parseOptTrue)
- RegisterGlobalOption("key_type", parseOptSingleString)
- RegisterGlobalOption("auto_https", parseOptAutoHTTPS)
- RegisterGlobalOption("servers", parseServerOptions)
- RegisterGlobalOption("ocsp_stapling", parseOCSPStaplingOptions)
- RegisterGlobalOption("log", parseLogOptions)
- RegisterGlobalOption("preferred_chains", parseOptPreferredChains)
-}
-
-func parseOptTrue(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { return true, nil }
-
-func parseOptHTTPPort(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- var httpPort int
- for d.Next() {
- var httpPortStr string
- if !d.AllArgs(&httpPortStr) {
- return 0, d.ArgErr()
- }
- var err error
- httpPort, err = strconv.Atoi(httpPortStr)
- if err != nil {
- return 0, d.Errf("converting port '%s' to integer value: %v", httpPortStr, err)
- }
- }
- return httpPort, nil
-}
-
-func parseOptHTTPSPort(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- var httpsPort int
- for d.Next() {
- var httpsPortStr string
- if !d.AllArgs(&httpsPortStr) {
- return 0, d.ArgErr()
- }
- var err error
- httpsPort, err = strconv.Atoi(httpsPortStr)
- if err != nil {
- return 0, d.Errf("converting port '%s' to integer value: %v", httpsPortStr, err)
- }
- }
- return httpsPort, nil
-}
-
-func parseOptOrder(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- newOrder := directiveOrder
-
- for d.Next() {
- // get directive name
- if !d.Next() {
- return nil, d.ArgErr()
- }
- dirName := d.Val()
- if _, ok := registeredDirectives[dirName]; !ok {
- return nil, d.Errf("%s is not a registered directive", dirName)
- }
-
- // get positional token
- if !d.Next() {
- return nil, d.ArgErr()
- }
- pos := d.Val()
-
- // if directive exists, first remove it
- for i, d := range newOrder {
- if d == dirName {
- newOrder = append(newOrder[:i], newOrder[i+1:]...)
- break
- }
- }
-
- // act on the positional
- switch pos {
- case "first":
- newOrder = append([]string{dirName}, newOrder...)
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- directiveOrder = newOrder
- return newOrder, nil
- case "last":
- newOrder = append(newOrder, dirName)
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- directiveOrder = newOrder
- return newOrder, nil
- case "before":
- case "after":
- default:
- return nil, d.Errf("unknown positional '%s'", pos)
- }
-
- // get name of other directive
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- otherDir := d.Val()
- if d.NextArg() {
- return nil, d.ArgErr()
- }
-
- // insert directive into proper position
- for i, d := range newOrder {
- if d == otherDir {
- if pos == "before" {
- newOrder = append(newOrder[:i], append([]string{dirName}, newOrder[i:]...)...)
- } else if pos == "after" {
- newOrder = append(newOrder[:i+1], append([]string{dirName}, newOrder[i+1:]...)...)
- }
- break
- }
- }
- }
-
- directiveOrder = newOrder
-
- return newOrder, nil
-}
-
-func parseOptStorage(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- if !d.Next() { // consume option name
- return nil, d.ArgErr()
- }
- if !d.Next() { // get storage module name
- return nil, d.ArgErr()
- }
- modID := "caddy.storage." + d.Val()
- unm, err := caddyfile.UnmarshalModule(d, modID)
- if err != nil {
- return nil, err
- }
- storage, ok := unm.(caddy.StorageConverter)
- if !ok {
- return nil, d.Errf("module %s is not a caddy.StorageConverter", modID)
- }
- return storage, nil
-}
-
-func parseOptDuration(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- if !d.Next() { // consume option name
- return nil, d.ArgErr()
- }
- if !d.Next() { // get duration value
- return nil, d.ArgErr()
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return nil, err
- }
- return caddy.Duration(dur), nil
-}
-
-func parseOptACMEDNS(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- if !d.Next() { // consume option name
- return nil, d.ArgErr()
- }
- if !d.Next() { // get DNS module name
- return nil, d.ArgErr()
- }
- modID := "dns.providers." + d.Val()
- unm, err := caddyfile.UnmarshalModule(d, modID)
- if err != nil {
- return nil, err
- }
- prov, ok := unm.(certmagic.ACMEDNSProvider)
- if !ok {
- return nil, d.Errf("module %s (%T) is not a certmagic.ACMEDNSProvider", modID, unm)
- }
- return prov, nil
-}
-
-func parseOptACMEEAB(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- eab := new(acme.EAB)
- for d.Next() {
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- for nesting := d.Nesting(); d.NextBlock(nesting); {
- switch d.Val() {
- case "key_id":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- eab.KeyID = d.Val()
-
- case "mac_key":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- eab.MACKey = d.Val()
-
- default:
- return nil, d.Errf("unrecognized parameter '%s'", d.Val())
- }
- }
- }
- return eab, nil
-}
-
-func parseOptCertIssuer(d *caddyfile.Dispenser, existing interface{}) (interface{}, error) {
- var issuers []certmagic.Issuer
- if existing != nil {
- issuers = existing.([]certmagic.Issuer)
- }
- for d.Next() { // consume option name
- if !d.Next() { // get issuer module name
- return nil, d.ArgErr()
- }
- modID := "tls.issuance." + d.Val()
- unm, err := caddyfile.UnmarshalModule(d, modID)
- if err != nil {
- return nil, err
- }
- iss, ok := unm.(certmagic.Issuer)
- if !ok {
- return nil, d.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm)
- }
- issuers = append(issuers, iss)
- }
- return issuers, nil
-}
-
-func parseOptSingleString(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- d.Next() // consume parameter name
- if !d.Next() {
- return "", d.ArgErr()
- }
- val := d.Val()
- if d.Next() {
- return "", d.ArgErr()
- }
- return val, nil
-}
-
-func parseOptAdmin(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- adminCfg := new(caddy.AdminConfig)
- for d.Next() {
- if d.NextArg() {
- listenAddress := d.Val()
- if listenAddress == "off" {
- adminCfg.Disabled = true
- if d.Next() { // Do not accept any remaining options including block
- return nil, d.Err("No more option is allowed after turning off admin config")
- }
- } else {
- adminCfg.Listen = listenAddress
- if d.NextArg() { // At most 1 arg is allowed
- return nil, d.ArgErr()
- }
- }
- }
- for nesting := d.Nesting(); d.NextBlock(nesting); {
- switch d.Val() {
- case "enforce_origin":
- adminCfg.EnforceOrigin = true
-
- case "origins":
- adminCfg.Origins = d.RemainingArgs()
-
- default:
- return nil, d.Errf("unrecognized parameter '%s'", d.Val())
- }
- }
- }
- if adminCfg.Listen == "" && !adminCfg.Disabled {
- adminCfg.Listen = caddy.DefaultAdminListen
- }
- return adminCfg, nil
-}
-
-func parseOptOnDemand(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- var ond *caddytls.OnDemandConfig
- for d.Next() {
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- for nesting := d.Nesting(); d.NextBlock(nesting); {
- switch d.Val() {
- case "ask":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- if ond == nil {
- ond = new(caddytls.OnDemandConfig)
- }
- ond.Ask = d.Val()
-
- case "interval":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return nil, err
- }
- if ond == nil {
- ond = new(caddytls.OnDemandConfig)
- }
- if ond.RateLimit == nil {
- ond.RateLimit = new(caddytls.RateLimit)
- }
- ond.RateLimit.Interval = caddy.Duration(dur)
-
- case "burst":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- burst, err := strconv.Atoi(d.Val())
- if err != nil {
- return nil, err
- }
- if ond == nil {
- ond = new(caddytls.OnDemandConfig)
- }
- if ond.RateLimit == nil {
- ond.RateLimit = new(caddytls.RateLimit)
- }
- ond.RateLimit.Burst = burst
-
- default:
- return nil, d.Errf("unrecognized parameter '%s'", d.Val())
- }
- }
- }
- if ond == nil {
- return nil, d.Err("expected at least one config parameter for on_demand_tls")
- }
- return ond, nil
-}
-
-func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- d.Next() // consume parameter name
- if !d.Next() {
- return "", d.ArgErr()
- }
- val := d.Val()
- if d.Next() {
- return "", d.ArgErr()
- }
- if val != "off" && val != "disable_redirects" && val != "ignore_loaded_certs" {
- return "", d.Errf("auto_https must be one of 'off', 'disable_redirects' or 'ignore_loaded_certs'")
- }
- return val, nil
-}
-
-func parseServerOptions(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- return unmarshalCaddyfileServerOptions(d)
-}
-
-func parseOCSPStaplingOptions(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- d.Next() // consume option name
- var val string
- if !d.AllArgs(&val) {
- return nil, d.ArgErr()
- }
- if val != "off" {
- return nil, d.Errf("invalid argument '%s'", val)
- }
- return certmagic.OCSPConfig{
- DisableStapling: val == "off",
- }, nil
-}
-
-// parseLogOptions parses the global log option. Syntax:
-//
-// log [name] {
-// output ...
-// format ...
-// level
-// include
-// exclude
-// }
-//
-// When the name argument is unspecified, this directive modifies the default
-// logger.
-//
-func parseLogOptions(d *caddyfile.Dispenser, existingVal interface{}) (interface{}, error) {
- currentNames := make(map[string]struct{})
- if existingVal != nil {
- innerVals, ok := existingVal.([]ConfigValue)
- if !ok {
- return nil, d.Errf("existing log values of unexpected type: %T", existingVal)
- }
- for _, rawVal := range innerVals {
- val, ok := rawVal.Value.(namedCustomLog)
- if !ok {
- return nil, d.Errf("existing log value of unexpected type: %T", existingVal)
- }
- currentNames[val.name] = struct{}{}
- }
- }
-
- var warnings []caddyconfig.Warning
- // Call out the same parser that handles server-specific log configuration.
- configValues, err := parseLogHelper(
- Helper{
- Dispenser: d,
- warnings: &warnings,
- },
- currentNames,
- )
- if err != nil {
- return nil, err
- }
- if len(warnings) > 0 {
- return nil, d.Errf("warnings found in parsing global log options: %+v", warnings)
- }
-
- return configValues, nil
-}
-
-func parseOptPreferredChains(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
- d.Next()
- return caddytls.ParseCaddyfilePreferredChainsOptions(d)
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/pkiapp.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/pkiapp.go
deleted file mode 100644
index a21951db..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/pkiapp.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpcaddyfile
-
-import (
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/caddy/v2/modules/caddypki"
-)
-
-func (st ServerType) buildPKIApp(
- pairings []sbAddrAssociation,
- options map[string]interface{},
- warnings []caddyconfig.Warning,
-) (*caddypki.PKI, []caddyconfig.Warning, error) {
-
- pkiApp := &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}
-
- skipInstallTrust := false
- if _, ok := options["skip_install_trust"]; ok {
- skipInstallTrust = true
- }
- falseBool := false
-
- for _, p := range pairings {
- for _, sblock := range p.serverBlocks {
- // find all the CAs that were defined and add them to the app config
- // i.e. from any "acme_server" directives
- for _, caCfgValue := range sblock.pile["pki.ca"] {
- ca := caCfgValue.Value.(*caddypki.CA)
- if skipInstallTrust {
- ca.InstallTrust = &falseBool
- }
- pkiApp.CAs[ca.ID] = ca
- }
- }
- }
-
- // if there was no CAs defined in any of the servers,
- // and we were requested to not install trust, then
- // add one for the default/local CA to do so
- if len(pkiApp.CAs) == 0 && skipInstallTrust {
- ca := new(caddypki.CA)
- ca.ID = caddypki.DefaultCAID
- ca.InstallTrust = &falseBool
- pkiApp.CAs[ca.ID] = ca
- }
-
- return pkiApp, warnings, nil
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/serveroptions.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/serveroptions.go
deleted file mode 100644
index 9e94b863..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/serveroptions.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpcaddyfile
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
- "github.com/dustin/go-humanize"
-)
-
-// serverOptions collects server config overrides parsed from Caddyfile global options
-type serverOptions struct {
- // If set, will only apply these options to servers that contain a
- // listener address that matches exactly. If empty, will apply to all
- // servers that were not already matched by another serverOptions.
- ListenerAddress string
-
- // These will all map 1:1 to the caddyhttp.Server struct
- ListenerWrappersRaw []json.RawMessage
- ReadTimeout caddy.Duration
- ReadHeaderTimeout caddy.Duration
- WriteTimeout caddy.Duration
- IdleTimeout caddy.Duration
- MaxHeaderBytes int
- AllowH2C bool
- ExperimentalHTTP3 bool
- StrictSNIHost *bool
-}
-
-func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (interface{}, error) {
- serverOpts := serverOptions{}
- for d.Next() {
- if d.NextArg() {
- serverOpts.ListenerAddress = d.Val()
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- }
- for nesting := d.Nesting(); d.NextBlock(nesting); {
- switch d.Val() {
- case "listener_wrappers":
- for nesting := d.Nesting(); d.NextBlock(nesting); {
- modID := "caddy.listeners." + d.Val()
- unm, err := caddyfile.UnmarshalModule(d, modID)
- if err != nil {
- return nil, err
- }
- listenerWrapper, ok := unm.(caddy.ListenerWrapper)
- if !ok {
- return nil, fmt.Errorf("module %s (%T) is not a listener wrapper", modID, unm)
- }
- jsonListenerWrapper := caddyconfig.JSONModuleObject(
- listenerWrapper,
- "wrapper",
- listenerWrapper.(caddy.Module).CaddyModule().ID.Name(),
- nil,
- )
- serverOpts.ListenerWrappersRaw = append(serverOpts.ListenerWrappersRaw, jsonListenerWrapper)
- }
-
- case "timeouts":
- for nesting := d.Nesting(); d.NextBlock(nesting); {
- switch d.Val() {
- case "read_body":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return nil, d.Errf("parsing read_body timeout duration: %v", err)
- }
- serverOpts.ReadTimeout = caddy.Duration(dur)
-
- case "read_header":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return nil, d.Errf("parsing read_header timeout duration: %v", err)
- }
- serverOpts.ReadHeaderTimeout = caddy.Duration(dur)
-
- case "write":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return nil, d.Errf("parsing write timeout duration: %v", err)
- }
- serverOpts.WriteTimeout = caddy.Duration(dur)
-
- case "idle":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return nil, d.Errf("parsing idle timeout duration: %v", err)
- }
- serverOpts.IdleTimeout = caddy.Duration(dur)
-
- default:
- return nil, d.Errf("unrecognized timeouts option '%s'", d.Val())
- }
- }
-
- case "max_header_size":
- var sizeStr string
- if !d.AllArgs(&sizeStr) {
- return nil, d.ArgErr()
- }
- size, err := humanize.ParseBytes(sizeStr)
- if err != nil {
- return nil, d.Errf("parsing max_header_size: %v", err)
- }
- serverOpts.MaxHeaderBytes = int(size)
-
- case "protocol":
- for nesting := d.Nesting(); d.NextBlock(nesting); {
- switch d.Val() {
- case "allow_h2c":
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- serverOpts.AllowH2C = true
-
- case "experimental_http3":
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- serverOpts.ExperimentalHTTP3 = true
-
- case "strict_sni_host":
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- trueBool := true
- serverOpts.StrictSNIHost = &trueBool
-
- default:
- return nil, d.Errf("unrecognized protocol option '%s'", d.Val())
- }
- }
-
- default:
- return nil, d.Errf("unrecognized servers option '%s'", d.Val())
- }
- }
- }
- return serverOpts, nil
-}
-
-// applyServerOptions sets the server options on the appropriate servers
-func applyServerOptions(
- servers map[string]*caddyhttp.Server,
- options map[string]interface{},
- warnings *[]caddyconfig.Warning,
-) error {
- // If experimental HTTP/3 is enabled, enable it on each server.
- // We already know there won't be a conflict with serverOptions because
- // we validated earlier that "experimental_http3" cannot be set at the same
- // time as "servers"
- if enableH3, ok := options["experimental_http3"].(bool); ok && enableH3 {
- *warnings = append(*warnings, caddyconfig.Warning{Message: "the 'experimental_http3' global option is deprecated, please use the 'servers > protocol > experimental_http3' option instead"})
- for _, srv := range servers {
- srv.ExperimentalHTTP3 = true
- }
- }
-
- serverOpts, ok := options["servers"].([]serverOptions)
- if !ok {
- return nil
- }
-
- for _, server := range servers {
- // find the options that apply to this server
- opts := func() *serverOptions {
- for _, entry := range serverOpts {
- if entry.ListenerAddress == "" {
- return &entry
- }
- for _, listener := range server.Listen {
- if entry.ListenerAddress == listener {
- return &entry
- }
- }
- }
- return nil
- }()
-
- // if none apply, then move to the next server
- if opts == nil {
- continue
- }
-
- // set all the options
- server.ListenerWrappersRaw = opts.ListenerWrappersRaw
- server.ReadTimeout = opts.ReadTimeout
- server.ReadHeaderTimeout = opts.ReadHeaderTimeout
- server.WriteTimeout = opts.WriteTimeout
- server.IdleTimeout = opts.IdleTimeout
- server.MaxHeaderBytes = opts.MaxHeaderBytes
- server.AllowH2C = opts.AllowH2C
- server.ExperimentalHTTP3 = opts.ExperimentalHTTP3
- server.StrictSNIHost = opts.StrictSNIHost
- }
-
- return nil
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/tlsapp.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/tlsapp.go
deleted file mode 100644
index 0fe1fc5f..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/tlsapp.go
+++ /dev/null
@@ -1,632 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpcaddyfile
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "reflect"
- "sort"
- "strconv"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
- "github.com/caddyserver/caddy/v2/modules/caddytls"
- "github.com/caddyserver/certmagic"
- "github.com/mholt/acmez/acme"
-)
-
-func (st ServerType) buildTLSApp(
- pairings []sbAddrAssociation,
- options map[string]interface{},
- warnings []caddyconfig.Warning,
-) (*caddytls.TLS, []caddyconfig.Warning, error) {
-
- tlsApp := &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}
- var certLoaders []caddytls.CertificateLoader
-
- httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
- if hp, ok := options["http_port"].(int); ok {
- httpPort = strconv.Itoa(hp)
- }
- httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort)
- if hsp, ok := options["https_port"].(int); ok {
- httpsPort = strconv.Itoa(hsp)
- }
-
- // count how many server blocks have a TLS-enabled key with
- // no host, and find all hosts that share a server block with
- // a hostless key, so that they don't get forgotten/omitted
- // by auto-HTTPS (since they won't appear in route matchers)
- var serverBlocksWithTLSHostlessKey int
- httpsHostsSharedWithHostlessKey := make(map[string]struct{})
- for _, pair := range pairings {
- for _, sb := range pair.serverBlocks {
- for _, addr := range sb.keys {
- if addr.Host == "" {
- // this address has no hostname, but if it's explicitly set
- // to HTTPS, then we need to count it as being TLS-enabled
- if addr.Scheme == "https" || addr.Port == httpsPort {
- serverBlocksWithTLSHostlessKey++
- }
- // this server block has a hostless key, now
- // go through and add all the hosts to the set
- for _, otherAddr := range sb.keys {
- if otherAddr.Original == addr.Original {
- continue
- }
- if otherAddr.Host != "" && otherAddr.Scheme != "http" && otherAddr.Port != httpPort {
- httpsHostsSharedWithHostlessKey[otherAddr.Host] = struct{}{}
- }
- }
- break
- }
- }
- }
- }
-
- // a catch-all automation policy is used as a "default" for all subjects that
- // don't have custom configuration explicitly associated with them; this
- // is only to add if the global settings or defaults are non-empty
- catchAllAP, err := newBaseAutomationPolicy(options, warnings, false)
- if err != nil {
- return nil, warnings, err
- }
- if catchAllAP != nil {
- if tlsApp.Automation == nil {
- tlsApp.Automation = new(caddytls.AutomationConfig)
- }
- tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, catchAllAP)
- }
-
- for _, p := range pairings {
- // avoid setting up TLS automation policies for a server that is HTTP-only
- if !listenersUseAnyPortOtherThan(p.addresses, httpPort) {
- continue
- }
-
- for _, sblock := range p.serverBlocks {
- // get values that populate an automation policy for this block
- ap, err := newBaseAutomationPolicy(options, warnings, true)
- if err != nil {
- return nil, warnings, err
- }
-
- sblockHosts := sblock.hostsFromKeys(false)
- if len(sblockHosts) == 0 && catchAllAP != nil {
- ap = catchAllAP
- }
-
- // on-demand tls
- if _, ok := sblock.pile["tls.on_demand"]; ok {
- ap.OnDemand = true
- }
-
- if keyTypeVals, ok := sblock.pile["tls.key_type"]; ok {
- ap.KeyType = keyTypeVals[0].Value.(string)
- }
-
- // certificate issuers
- if issuerVals, ok := sblock.pile["tls.cert_issuer"]; ok {
- var issuers []certmagic.Issuer
- for _, issuerVal := range issuerVals {
- issuers = append(issuers, issuerVal.Value.(certmagic.Issuer))
- }
- if ap == catchAllAP && !reflect.DeepEqual(ap.Issuers, issuers) {
- return nil, warnings, fmt.Errorf("automation policy from site block is also default/catch-all policy because of key without hostname, and the two are in conflict: %#v != %#v", ap.Issuers, issuers)
- }
- ap.Issuers = issuers
- }
-
- // custom bind host
- for _, cfgVal := range sblock.pile["bind"] {
- for _, iss := range ap.Issuers {
- // if an issuer was already configured and it is NOT an ACME issuer,
- // skip, since we intend to adjust only ACME issuers; ensure we
- // include any issuer that embeds/wraps an underlying ACME issuer
- var acmeIssuer *caddytls.ACMEIssuer
- if acmeWrapper, ok := iss.(acmeCapable); ok {
- acmeIssuer = acmeWrapper.GetACMEIssuer()
- }
- if acmeIssuer == nil {
- continue
- }
-
- // proceed to configure the ACME issuer's bind host, without
- // overwriting any existing settings
- if acmeIssuer.Challenges == nil {
- acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
- }
- if acmeIssuer.Challenges.BindHost == "" {
- // only binding to one host is supported
- var bindHost string
- if bindHosts, ok := cfgVal.Value.([]string); ok && len(bindHosts) > 0 {
- bindHost = bindHosts[0]
- }
- acmeIssuer.Challenges.BindHost = bindHost
- }
- }
- }
-
- // first make sure this block is allowed to create an automation policy;
- // doing so is forbidden if it has a key with no host (i.e. ":443")
- // and if there is a different server block that also has a key with no
- // host -- since a key with no host matches any host, we need its
- // associated automation policy to have an empty Subjects list, i.e. no
- // host filter, which is indistinguishable between the two server blocks
- // because automation is not done in the context of a particular server...
- // this is an example of a poor mapping from Caddyfile to JSON but that's
- // the least-leaky abstraction I could figure out
- if len(sblockHosts) == 0 {
- if serverBlocksWithTLSHostlessKey > 1 {
- // this server block and at least one other has a key with no host,
- // making the two indistinguishable; it is misleading to define such
- // a policy within one server block since it actually will apply to
- // others as well
- return nil, warnings, fmt.Errorf("cannot make a TLS automation policy from a server block that has a host-less address when there are other TLS-enabled server block addresses lacking a host")
- }
- if catchAllAP == nil {
- // this server block has a key with no hosts, but there is not yet
- // a catch-all automation policy (probably because no global options
- // were set), so this one becomes it
- catchAllAP = ap
- }
- }
-
- // associate our new automation policy with this server block's hosts
- ap.Subjects = sblock.hostsFromKeysNotHTTP(httpPort)
- sort.Strings(ap.Subjects) // solely for deterministic test results
-
- // if a combination of public and internal names were given
- // for this same server block and no issuer was specified, we
- // need to separate them out in the automation policies so
- // that the internal names can use the internal issuer and
- // the other names can use the default/public/ACME issuer
- var ap2 *caddytls.AutomationPolicy
- if len(ap.Issuers) == 0 {
- var internal, external []string
- for _, s := range ap.Subjects {
- if !certmagic.SubjectQualifiesForCert(s) {
- return nil, warnings, fmt.Errorf("subject does not qualify for certificate: '%s'", s)
- }
- // we don't use certmagic.SubjectQualifiesForPublicCert() because of one nuance:
- // names like *.*.tld that may not qualify for a public certificate are actually
- // fine when used with OnDemand, since OnDemand (currently) does not obtain
- // wildcards (if it ever does, there will be a separate config option to enable
- // it that we would need to check here) since the hostname is known at handshake;
- // and it is unexpected to switch to internal issuer when the user wants to get
- // regular certificates on-demand for a class of certs like *.*.tld.
- if subjectQualifiesForPublicCert(ap, s) {
- external = append(external, s)
- } else {
- internal = append(internal, s)
- }
- }
- if len(external) > 0 && len(internal) > 0 {
- ap.Subjects = external
- apCopy := *ap
- ap2 = &apCopy
- ap2.Subjects = internal
- ap2.IssuersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(caddytls.InternalIssuer{}, "module", "internal", &warnings)}
- }
- }
- if tlsApp.Automation == nil {
- tlsApp.Automation = new(caddytls.AutomationConfig)
- }
- tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap)
- if ap2 != nil {
- tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap2)
- }
-
- // certificate loaders
- if clVals, ok := sblock.pile["tls.cert_loader"]; ok {
- for _, clVal := range clVals {
- certLoaders = append(certLoaders, clVal.Value.(caddytls.CertificateLoader))
- }
- }
- }
- }
-
- // group certificate loaders by module name, then add to config
- if len(certLoaders) > 0 {
- loadersByName := make(map[string]caddytls.CertificateLoader)
- for _, cl := range certLoaders {
- name := caddy.GetModuleName(cl)
- // ugh... technically, we may have multiple FileLoader and FolderLoader
- // modules (because the tls directive returns one per occurrence), but
- // the config structure expects only one instance of each kind of loader
- // module, so we have to combine them... instead of enumerating each
- // possible cert loader module in a type switch, we can use reflection,
- // which works on any cert loaders that are slice types
- if reflect.TypeOf(cl).Kind() == reflect.Slice {
- combined := reflect.ValueOf(loadersByName[name])
- if !combined.IsValid() {
- combined = reflect.New(reflect.TypeOf(cl)).Elem()
- }
- clVal := reflect.ValueOf(cl)
- for i := 0; i < clVal.Len(); i++ {
- combined = reflect.Append(combined, clVal.Index(i))
- }
- loadersByName[name] = combined.Interface().(caddytls.CertificateLoader)
- }
- }
- for certLoaderName, loaders := range loadersByName {
- tlsApp.CertificatesRaw[certLoaderName] = caddyconfig.JSON(loaders, &warnings)
- }
- }
-
- // set any of the on-demand options, for if/when on-demand TLS is enabled
- if onDemand, ok := options["on_demand_tls"].(*caddytls.OnDemandConfig); ok {
- if tlsApp.Automation == nil {
- tlsApp.Automation = new(caddytls.AutomationConfig)
- }
- tlsApp.Automation.OnDemand = onDemand
- }
-
- // set the storage clean interval if configured
- if storageCleanInterval, ok := options["storage_clean_interval"].(caddy.Duration); ok {
- if tlsApp.Automation == nil {
- tlsApp.Automation = new(caddytls.AutomationConfig)
- }
- tlsApp.Automation.StorageCleanInterval = storageCleanInterval
- }
-
- // if any hostnames appear on the same server block as a key with
- // no host, they will not be used with route matchers because the
- // hostless key matches all hosts, therefore, it wouldn't be
- // considered for auto-HTTPS, so we need to make sure those hosts
- // are manually considered for managed certificates; we also need
- // to make sure that any of these names which are internal-only
- // get internal certificates by default rather than ACME
- var al caddytls.AutomateLoader
- internalAP := &caddytls.AutomationPolicy{
- IssuersRaw: []json.RawMessage{json.RawMessage(`{"module":"internal"}`)},
- }
- for h := range httpsHostsSharedWithHostlessKey {
- al = append(al, h)
- if !certmagic.SubjectQualifiesForPublicCert(h) {
- internalAP.Subjects = append(internalAP.Subjects, h)
- }
- }
- if len(al) > 0 {
- tlsApp.CertificatesRaw["automate"] = caddyconfig.JSON(al, &warnings)
- }
- if len(internalAP.Subjects) > 0 {
- if tlsApp.Automation == nil {
- tlsApp.Automation = new(caddytls.AutomationConfig)
- }
- tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, internalAP)
- }
-
- // if there are any global options set for issuers (ACME ones in particular), make sure they
- // take effect in every automation policy that does not have any issuers
- if tlsApp.Automation != nil {
- globalEmail := options["email"]
- globalACMECA := options["acme_ca"]
- globalACMECARoot := options["acme_ca_root"]
- globalACMEDNS := options["acme_dns"]
- globalACMEEAB := options["acme_eab"]
- globalPreferredChains := options["preferred_chains"]
- hasGlobalACMEDefaults := globalEmail != nil || globalACMECA != nil || globalACMECARoot != nil || globalACMEDNS != nil || globalACMEEAB != nil || globalPreferredChains != nil
- if hasGlobalACMEDefaults {
- // for _, ap := range tlsApp.Automation.Policies {
- for i := 0; i < len(tlsApp.Automation.Policies); i++ {
- ap := tlsApp.Automation.Policies[i]
- if len(ap.Issuers) == 0 && automationPolicyHasAllPublicNames(ap) {
- // for public names, create default issuers which will later be filled in with configured global defaults
- // (internal names will implicitly use the internal issuer at auto-https time)
- ap.Issuers = caddytls.DefaultIssuers()
-
- // if a specific endpoint is configured, can't use multiple default issuers
- if globalACMECA != nil {
- if strings.Contains(globalACMECA.(string), "zerossl") {
- ap.Issuers = []certmagic.Issuer{&caddytls.ZeroSSLIssuer{ACMEIssuer: new(caddytls.ACMEIssuer)}}
- } else {
- ap.Issuers = []certmagic.Issuer{new(caddytls.ACMEIssuer)}
- }
- }
- }
- }
- }
- }
-
- // finalize and verify policies; do cleanup
- if tlsApp.Automation != nil {
- for i, ap := range tlsApp.Automation.Policies {
- // ensure all issuers have global defaults filled in
- for j, issuer := range ap.Issuers {
- err := fillInGlobalACMEDefaults(issuer, options)
- if err != nil {
- return nil, warnings, fmt.Errorf("filling in global issuer defaults for AP %d, issuer %d: %v", i, j, err)
- }
- }
-
- // encode all issuer values we created, so they will be rendered in the output
- if len(ap.Issuers) > 0 && ap.IssuersRaw == nil {
- for _, iss := range ap.Issuers {
- issuerName := iss.(caddy.Module).CaddyModule().ID.Name()
- ap.IssuersRaw = append(ap.IssuersRaw, caddyconfig.JSONModuleObject(iss, "module", issuerName, &warnings))
- }
- }
- }
-
- // consolidate automation policies that are the exact same
- tlsApp.Automation.Policies = consolidateAutomationPolicies(tlsApp.Automation.Policies)
-
- // ensure automation policies don't overlap subjects (this should be
- // an error at provision-time as well, but catch it in the adapt phase
- // for convenience)
- automationHostSet := make(map[string]struct{})
- for _, ap := range tlsApp.Automation.Policies {
- for _, s := range ap.Subjects {
- if _, ok := automationHostSet[s]; ok {
- return nil, warnings, fmt.Errorf("hostname appears in more than one automation policy, making certificate management ambiguous: %s", s)
- }
- automationHostSet[s] = struct{}{}
- }
- }
-
- // if nothing remains, remove any excess values to clean up the resulting config
- if len(tlsApp.Automation.Policies) == 0 {
- tlsApp.Automation.Policies = nil
- }
- if reflect.DeepEqual(tlsApp.Automation, new(caddytls.AutomationConfig)) {
- tlsApp.Automation = nil
- }
- }
-
- return tlsApp, warnings, nil
-}
-
-type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer }
-
-func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]interface{}) error {
- acmeWrapper, ok := issuer.(acmeCapable)
- if !ok {
- return nil
- }
- acmeIssuer := acmeWrapper.GetACMEIssuer()
- if acmeIssuer == nil {
- return nil
- }
-
- globalEmail := options["email"]
- globalACMECA := options["acme_ca"]
- globalACMECARoot := options["acme_ca_root"]
- globalACMEDNS := options["acme_dns"]
- globalACMEEAB := options["acme_eab"]
- globalPreferredChains := options["preferred_chains"]
-
- if globalEmail != nil && acmeIssuer.Email == "" {
- acmeIssuer.Email = globalEmail.(string)
- }
- if globalACMECA != nil && acmeIssuer.CA == "" {
- acmeIssuer.CA = globalACMECA.(string)
- }
- if globalACMECARoot != nil && !sliceContains(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string)) {
- acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string))
- }
- if globalACMEDNS != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.DNS == nil) {
- acmeIssuer.Challenges = &caddytls.ChallengesConfig{
- DNS: &caddytls.DNSChallengeConfig{
- ProviderRaw: caddyconfig.JSONModuleObject(globalACMEDNS, "name", globalACMEDNS.(caddy.Module).CaddyModule().ID.Name(), nil),
- },
- }
- }
- if globalACMEEAB != nil && acmeIssuer.ExternalAccount == nil {
- acmeIssuer.ExternalAccount = globalACMEEAB.(*acme.EAB)
- }
- if globalPreferredChains != nil && acmeIssuer.PreferredChains == nil {
- acmeIssuer.PreferredChains = globalPreferredChains.(*caddytls.ChainPreference)
- }
- return nil
-}
-
-// newBaseAutomationPolicy returns a new TLS automation policy that gets
-// its values from the global options map. It should be used as the base
-// for any other automation policies. A nil policy (and no error) will be
-// returned if there are no default/global options. However, if always is
-// true, a non-nil value will always be returned (unless there is an error).
-func newBaseAutomationPolicy(options map[string]interface{}, warnings []caddyconfig.Warning, always bool) (*caddytls.AutomationPolicy, error) {
- issuers, hasIssuers := options["cert_issuer"]
- _, hasLocalCerts := options["local_certs"]
- keyType, hasKeyType := options["key_type"]
- ocspStapling, hasOCSPStapling := options["ocsp_stapling"]
-
- hasGlobalAutomationOpts := hasIssuers || hasLocalCerts || hasKeyType || hasOCSPStapling
-
- // if there are no global options related to automation policies
- // set, then we can just return right away
- if !hasGlobalAutomationOpts {
- if always {
- return new(caddytls.AutomationPolicy), nil
- }
- return nil, nil
- }
-
- ap := new(caddytls.AutomationPolicy)
- if hasKeyType {
- ap.KeyType = keyType.(string)
- }
-
- if hasIssuers && hasLocalCerts {
- return nil, fmt.Errorf("global options are ambiguous: local_certs is confusing when combined with cert_issuer, because local_certs is also a specific kind of issuer")
- }
-
- if hasIssuers {
- ap.Issuers = issuers.([]certmagic.Issuer)
- } else if hasLocalCerts {
- ap.Issuers = []certmagic.Issuer{new(caddytls.InternalIssuer)}
- }
-
- if hasOCSPStapling {
- ocspConfig := ocspStapling.(certmagic.OCSPConfig)
- ap.DisableOCSPStapling = ocspConfig.DisableStapling
- ap.OCSPOverrides = ocspConfig.ResponderOverrides
- }
-
- return ap, nil
-}
-
-// consolidateAutomationPolicies combines automation policies that are the same,
-// for a cleaner overall output.
-func consolidateAutomationPolicies(aps []*caddytls.AutomationPolicy) []*caddytls.AutomationPolicy {
- // sort from most specific to least specific; we depend on this ordering
- sort.SliceStable(aps, func(i, j int) bool {
- if automationPolicyIsSubset(aps[i], aps[j]) {
- return true
- }
- if automationPolicyIsSubset(aps[j], aps[i]) {
- return false
- }
- return len(aps[i].Subjects) > len(aps[j].Subjects)
- })
-
- emptyAPCount := 0
- origLenAPs := len(aps)
- // compute the number of empty policies (disregarding subjects) - see #4128
- emptyAP := new(caddytls.AutomationPolicy)
- for i := 0; i < len(aps); i++ {
- emptyAP.Subjects = aps[i].Subjects
- if reflect.DeepEqual(aps[i], emptyAP) {
- emptyAPCount++
- if !automationPolicyHasAllPublicNames(aps[i]) {
- // if this automation policy has internal names, we might as well remove it
- // so auto-https can implicitly use the internal issuer
- aps = append(aps[:i], aps[i+1:]...)
- i--
- }
- }
- }
- // If all policies are empty, we can return nil, as there is no need to set any policy
- if emptyAPCount == origLenAPs {
- return nil
- }
-
- // remove or combine duplicate policies
-outer:
- for i := 0; i < len(aps); i++ {
- // compare only with next policies; we sorted by specificity so we must not delete earlier policies
- for j := i + 1; j < len(aps); j++ {
- // if they're exactly equal in every way, just keep one of them
- if reflect.DeepEqual(aps[i], aps[j]) {
- aps = append(aps[:j], aps[j+1:]...)
- // must re-evaluate current i against next j; can't skip it!
- // even if i decrements to -1, will be incremented to 0 immediately
- i--
- continue outer
- }
-
- // if the policy is the same, we can keep just one, but we have
- // to be careful which one we keep; if only one has any hostnames
- // defined, then we need to keep the one without any hostnames,
- // otherwise the one without any subjects (a catch-all) would be
- // eaten up by the one with subjects; and if both have subjects, we
- // need to combine their lists
- if reflect.DeepEqual(aps[i].IssuersRaw, aps[j].IssuersRaw) &&
- bytes.Equal(aps[i].StorageRaw, aps[j].StorageRaw) &&
- aps[i].MustStaple == aps[j].MustStaple &&
- aps[i].KeyType == aps[j].KeyType &&
- aps[i].OnDemand == aps[j].OnDemand &&
- aps[i].RenewalWindowRatio == aps[j].RenewalWindowRatio {
- if len(aps[i].Subjects) > 0 && len(aps[j].Subjects) == 0 {
- // later policy (at j) has no subjects ("catch-all"), so we can
- // remove the identical-but-more-specific policy that comes first
- // AS LONG AS it is not shadowed by another policy before it; e.g.
- // if policy i is for example.com, policy i+1 is '*.com', and policy
- // j is catch-all, we cannot remove policy i because that would
- // cause example.com to be served by the less specific policy for
- // '*.com', which might be different (yes we've seen this happen)
- if automationPolicyShadows(i, aps) >= j {
- aps = append(aps[:i], aps[i+1:]...)
- i--
- continue outer
- }
- } else {
- // avoid repeated subjects
- for _, subj := range aps[j].Subjects {
- if !sliceContains(aps[i].Subjects, subj) {
- aps[i].Subjects = append(aps[i].Subjects, subj)
- }
- }
- aps = append(aps[:j], aps[j+1:]...)
- j--
- }
- }
- }
- }
-
- return aps
-}
-
-// automationPolicyIsSubset returns true if a's subjects are a subset
-// of b's subjects.
-func automationPolicyIsSubset(a, b *caddytls.AutomationPolicy) bool {
- if len(b.Subjects) == 0 {
- return true
- }
- if len(a.Subjects) == 0 {
- return false
- }
- for _, aSubj := range a.Subjects {
- var inSuperset bool
- for _, bSubj := range b.Subjects {
- if certmagic.MatchWildcard(aSubj, bSubj) {
- inSuperset = true
- break
- }
- }
- if !inSuperset {
- return false
- }
- }
- return true
-}
-
-// automationPolicyShadows returns the index of a policy that aps[i] shadows;
-// in other words, for all policies after position i, if that policy covers
-// the same subjects but is less specific, that policy's position is returned,
-// or -1 if no shadowing is found. For example, if policy i is for
-// "foo.example.com" and policy i+2 is for "*.example.com", then i+2 will be
-// returned, since that policy is shadowed by i, which is in front.
-func automationPolicyShadows(i int, aps []*caddytls.AutomationPolicy) int {
- for j := i + 1; j < len(aps); j++ {
- if automationPolicyIsSubset(aps[i], aps[j]) {
- return j
- }
- }
- return -1
-}
-
-// subjectQualifiesForPublicCert is like certmagic.SubjectQualifiesForPublicCert() except
-// that this allows domains with multiple wildcard levels like '*.*.example.com' to qualify
-// if the automation policy has OnDemand enabled (i.e. this function is more lenient).
-func subjectQualifiesForPublicCert(ap *caddytls.AutomationPolicy, subj string) bool {
- return !certmagic.SubjectIsIP(subj) &&
- !certmagic.SubjectIsInternal(subj) &&
- (strings.Count(subj, "*.") < 2 || ap.OnDemand)
-}
-
-func automationPolicyHasAllPublicNames(ap *caddytls.AutomationPolicy) bool {
- for _, subj := range ap.Subjects {
- if !subjectQualifiesForPublicCert(ap, subj) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httploader.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httploader.go
deleted file mode 100644
index aabd1035..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httploader.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package caddyconfig
-
-import (
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "net/http"
- "time"
-
- "github.com/caddyserver/caddy/v2"
-)
-
-func init() {
- caddy.RegisterModule(HTTPLoader{})
-}
-
-// HTTPLoader can load Caddy configs over HTTP(S). It can adapt the config
-// based on the Content-Type header of the HTTP response.
-type HTTPLoader struct {
- // The method for the request. Default: GET
- Method string `json:"method,omitempty"`
-
- // The URL of the request.
- URL string `json:"url,omitempty"`
-
- // HTTP headers to add to the request.
- Headers http.Header `json:"header,omitempty"`
-
- // Maximum time allowed for a complete connection and request.
- Timeout caddy.Duration `json:"timeout,omitempty"`
-
- TLS *struct {
- // Present this instance's managed remote identity credentials to the server.
- UseServerIdentity bool `json:"use_server_identity,omitempty"`
-
- // PEM-encoded client certificate filename to present to the server.
- ClientCertificateFile string `json:"client_certificate_file,omitempty"`
-
- // PEM-encoded key to use with the client certificate.
- ClientCertificateKeyFile string `json:"client_certificate_key_file,omitempty"`
-
- // List of PEM-encoded CA certificate files to add to the same trust
- // store as RootCAPool (or root_ca_pool in the JSON).
- RootCAPEMFiles []string `json:"root_ca_pem_files,omitempty"`
- } `json:"tls,omitempty"`
-}
-
-// CaddyModule returns the Caddy module information.
-func (HTTPLoader) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "caddy.config_loaders.http",
- New: func() caddy.Module { return new(HTTPLoader) },
- }
-}
-
-// LoadConfig loads a Caddy config.
-func (hl HTTPLoader) LoadConfig(ctx caddy.Context) ([]byte, error) {
- client, err := hl.makeClient(ctx)
- if err != nil {
- return nil, err
- }
-
- method := hl.Method
- if method == "" {
- method = http.MethodGet
- }
-
- req, err := http.NewRequest(method, hl.URL, nil)
- if err != nil {
- return nil, err
- }
- req.Header = hl.Headers
-
- resp, err := client.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- if resp.StatusCode >= 400 {
- return nil, fmt.Errorf("server responded with HTTP %d", resp.StatusCode)
- }
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
-
- result, warnings, err := adaptByContentType(resp.Header.Get("Content-Type"), body)
- if err != nil {
- return nil, err
- }
- for _, warn := range warnings {
- ctx.Logger(hl).Warn(warn.String())
- }
-
- return result, nil
-}
-
-func (hl HTTPLoader) makeClient(ctx caddy.Context) (*http.Client, error) {
- client := &http.Client{
- Timeout: time.Duration(hl.Timeout),
- }
-
- if hl.TLS != nil {
- var tlsConfig *tls.Config
-
- // client authentication
- if hl.TLS.UseServerIdentity {
- certs, err := ctx.IdentityCredentials(ctx.Logger(hl))
- if err != nil {
- return nil, fmt.Errorf("getting server identity credentials: %v", err)
- }
- if tlsConfig == nil {
- tlsConfig = new(tls.Config)
- }
- tlsConfig.Certificates = certs
- } else if hl.TLS.ClientCertificateFile != "" && hl.TLS.ClientCertificateKeyFile != "" {
- cert, err := tls.LoadX509KeyPair(hl.TLS.ClientCertificateFile, hl.TLS.ClientCertificateKeyFile)
- if err != nil {
- return nil, err
- }
- if tlsConfig == nil {
- tlsConfig = new(tls.Config)
- }
- tlsConfig.Certificates = []tls.Certificate{cert}
- }
-
- // trusted server certs
- if len(hl.TLS.RootCAPEMFiles) > 0 {
- rootPool := x509.NewCertPool()
- for _, pemFile := range hl.TLS.RootCAPEMFiles {
- pemData, err := ioutil.ReadFile(pemFile)
- if err != nil {
- return nil, fmt.Errorf("failed reading ca cert: %v", err)
- }
- rootPool.AppendCertsFromPEM(pemData)
- }
- if tlsConfig == nil {
- tlsConfig = new(tls.Config)
- }
- tlsConfig.RootCAs = rootPool
- }
-
- client.Transport = &http.Transport{TLSClientConfig: tlsConfig}
- }
-
- return client, nil
-}
-
-var _ caddy.ConfigLoader = (*HTTPLoader)(nil)
diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/load.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/load.go
deleted file mode 100644
index 7a390d0b..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/load.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyconfig
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "mime"
- "net/http"
- "strings"
- "sync"
-
- "github.com/caddyserver/caddy/v2"
-)
-
-func init() {
- caddy.RegisterModule(adminLoad{})
-}
-
-// adminLoad is a module that provides the /load endpoint
-// for the Caddy admin API. The only reason it's not baked
-// into the caddy package directly is because of the import
-// of the caddyconfig package for its GetAdapter function.
-// If the caddy package depends on the caddyconfig package,
-// then the caddyconfig package will not be able to import
-// the caddy package, and it can more easily cause backward
-// edges in the dependency tree (i.e. import cycle).
-// Fortunately, the admin API has first-class support for
-// adding endpoints from modules.
-type adminLoad struct{}
-
-// CaddyModule returns the Caddy module information.
-func (adminLoad) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "admin.api.load",
- New: func() caddy.Module { return new(adminLoad) },
- }
-}
-
-// Routes returns a route for the /load endpoint.
-func (al adminLoad) Routes() []caddy.AdminRoute {
- return []caddy.AdminRoute{
- {
- Pattern: "/load",
- Handler: caddy.AdminHandlerFunc(al.handleLoad),
- },
- }
-}
-
-// handleLoad replaces the entire current configuration with
-// a new one provided in the response body. It supports config
-// adapters through the use of the Content-Type header. A
-// config that is identical to the currently-running config
-// will be a no-op unless Cache-Control: must-revalidate is set.
-func (adminLoad) handleLoad(w http.ResponseWriter, r *http.Request) error {
- if r.Method != http.MethodPost {
- return caddy.APIError{
- HTTPStatus: http.StatusMethodNotAllowed,
- Err: fmt.Errorf("method not allowed"),
- }
- }
-
- buf := bufPool.Get().(*bytes.Buffer)
- buf.Reset()
- defer bufPool.Put(buf)
-
- _, err := io.Copy(buf, r.Body)
- if err != nil {
- return caddy.APIError{
- HTTPStatus: http.StatusBadRequest,
- Err: fmt.Errorf("reading request body: %v", err),
- }
- }
- body := buf.Bytes()
-
- // if the config is formatted other than Caddy's native
- // JSON, we need to adapt it before loading it
- if ctHeader := r.Header.Get("Content-Type"); ctHeader != "" {
- result, warnings, err := adaptByContentType(ctHeader, body)
- if err != nil {
- return caddy.APIError{
- HTTPStatus: http.StatusBadRequest,
- Err: err,
- }
- }
- if len(warnings) > 0 {
- respBody, err := json.Marshal(warnings)
- if err != nil {
- caddy.Log().Named("admin.api.load").Error(err.Error())
- }
- _, _ = w.Write(respBody)
- }
- body = result
- }
-
- forceReload := r.Header.Get("Cache-Control") == "must-revalidate"
-
- err = caddy.Load(body, forceReload)
- if err != nil {
- return caddy.APIError{
- HTTPStatus: http.StatusBadRequest,
- Err: fmt.Errorf("loading config: %v", err),
- }
- }
-
- caddy.Log().Named("admin.api").Info("load complete")
-
- return nil
-}
-
-// adaptByContentType adapts body to Caddy JSON using the adapter specified by contenType.
-// If contentType is empty or ends with "/json", the input will be returned, as a no-op.
-func adaptByContentType(contentType string, body []byte) ([]byte, []Warning, error) {
- // assume JSON as the default
- if contentType == "" {
- return body, nil, nil
- }
-
- ct, _, err := mime.ParseMediaType(contentType)
- if err != nil {
- return nil, nil, caddy.APIError{
- HTTPStatus: http.StatusBadRequest,
- Err: fmt.Errorf("invalid Content-Type: %v", err),
- }
- }
-
- // if already JSON, no need to adapt
- if strings.HasSuffix(ct, "/json") {
- return body, nil, nil
- }
-
- // adapter name should be suffix of MIME type
- slashIdx := strings.Index(ct, "/")
- if slashIdx < 0 {
- return nil, nil, fmt.Errorf("malformed Content-Type")
- }
-
- adapterName := ct[slashIdx+1:]
- cfgAdapter := GetAdapter(adapterName)
- if cfgAdapter == nil {
- return nil, nil, fmt.Errorf("unrecognized config adapter '%s'", adapterName)
- }
-
- result, warnings, err := cfgAdapter.Adapt(body, nil)
- if err != nil {
- return nil, nil, fmt.Errorf("adapting config using %s adapter: %v", adapterName, err)
- }
-
- return result, warnings, nil
-}
-
-var bufPool = sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/cmd/commandfuncs.go b/vendor/github.com/caddyserver/caddy/v2/cmd/commandfuncs.go
deleted file mode 100644
index 8f1c68c8..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/cmd/commandfuncs.go
+++ /dev/null
@@ -1,716 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddycmd
-
-import (
- "bytes"
- "context"
- "crypto/rand"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "os"
- "os/exec"
- "runtime"
- "runtime/debug"
- "sort"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "go.uber.org/zap"
-)
-
-func cmdStart(fl Flags) (int, error) {
- startCmdConfigFlag := fl.String("config")
- startCmdConfigAdapterFlag := fl.String("adapter")
- startCmdPidfileFlag := fl.String("pidfile")
- startCmdWatchFlag := fl.Bool("watch")
- startCmdEnvfileFlag := fl.String("envfile")
-
- // open a listener to which the child process will connect when
- // it is ready to confirm that it has successfully started
- ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("opening listener for success confirmation: %v", err)
- }
- defer ln.Close()
-
- // craft the command with a pingback address and with a
- // pipe for its stdin, so we can tell it our confirmation
- // code that we expect so that some random port scan at
- // the most unfortunate time won't fool us into thinking
- // the child succeeded (i.e. the alternative is to just
- // wait for any connection on our listener, but better to
- // ensure it's the process we're expecting - we can be
- // sure by giving it some random bytes and having it echo
- // them back to us)
- cmd := exec.Command(os.Args[0], "run", "--pingback", ln.Addr().String())
- if startCmdConfigFlag != "" {
- cmd.Args = append(cmd.Args, "--config", startCmdConfigFlag)
- }
- if startCmdEnvfileFlag != "" {
- cmd.Args = append(cmd.Args, "--envfile", startCmdEnvfileFlag)
- }
- if startCmdConfigAdapterFlag != "" {
- cmd.Args = append(cmd.Args, "--adapter", startCmdConfigAdapterFlag)
- }
- if startCmdWatchFlag {
- cmd.Args = append(cmd.Args, "--watch")
- }
- if startCmdPidfileFlag != "" {
- cmd.Args = append(cmd.Args, "--pidfile", startCmdPidfileFlag)
- }
- stdinpipe, err := cmd.StdinPipe()
- if err != nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("creating stdin pipe: %v", err)
- }
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
-
- // generate the random bytes we'll send to the child process
- expect := make([]byte, 32)
- _, err = rand.Read(expect)
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("generating random confirmation bytes: %v", err)
- }
-
- // begin writing the confirmation bytes to the child's
- // stdin; use a goroutine since the child hasn't been
- // started yet, and writing synchronously would result
- // in a deadlock
- go func() {
- _, _ = stdinpipe.Write(expect)
- stdinpipe.Close()
- }()
-
- // start the process
- err = cmd.Start()
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("starting caddy process: %v", err)
- }
-
- // there are two ways we know we're done: either
- // the process will connect to our listener, or
- // it will exit with an error
- success, exit := make(chan struct{}), make(chan error)
-
- // in one goroutine, we await the success of the child process
- go func() {
- for {
- conn, err := ln.Accept()
- if err != nil {
- if !errors.Is(err, net.ErrClosed) {
- log.Println(err)
- }
- break
- }
- err = handlePingbackConn(conn, expect)
- if err == nil {
- close(success)
- break
- }
- log.Println(err)
- }
- }()
-
- // in another goroutine, we await the failure of the child process
- go func() {
- err := cmd.Wait() // don't send on this line! Wait blocks, but send starts before it unblocks
- exit <- err // sending on separate line ensures select won't trigger until after Wait unblocks
- }()
-
- // when one of the goroutines unblocks, we're done and can exit
- select {
- case <-success:
- fmt.Printf("Successfully started Caddy (pid=%d) - Caddy is running in the background\n", cmd.Process.Pid)
- case err := <-exit:
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("caddy process exited with error: %v", err)
- }
-
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdRun(fl Flags) (int, error) {
- caddy.TrapSignals()
-
- runCmdConfigFlag := fl.String("config")
- runCmdConfigAdapterFlag := fl.String("adapter")
- runCmdResumeFlag := fl.Bool("resume")
- runCmdLoadEnvfileFlag := fl.String("envfile")
- runCmdPrintEnvFlag := fl.Bool("environ")
- runCmdWatchFlag := fl.Bool("watch")
- runCmdPidfileFlag := fl.String("pidfile")
- runCmdPingbackFlag := fl.String("pingback")
-
- // load all additional envs as soon as possible
- if runCmdLoadEnvfileFlag != "" {
- if err := loadEnvFromFile(runCmdLoadEnvfileFlag); err != nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("loading additional environment variables: %v", err)
- }
- }
-
- // if we are supposed to print the environment, do that first
- if runCmdPrintEnvFlag {
- printEnvironment()
- }
-
- // load the config, depending on flags
- var config []byte
- var err error
- if runCmdResumeFlag {
- config, err = ioutil.ReadFile(caddy.ConfigAutosavePath)
- if os.IsNotExist(err) {
- // not a bad error; just can't resume if autosave file doesn't exist
- caddy.Log().Info("no autosave file exists", zap.String("autosave_file", caddy.ConfigAutosavePath))
- runCmdResumeFlag = false
- } else if err != nil {
- return caddy.ExitCodeFailedStartup, err
- } else {
- if runCmdConfigFlag == "" {
- caddy.Log().Info("resuming from last configuration",
- zap.String("autosave_file", caddy.ConfigAutosavePath))
- } else {
- // if they also specified a config file, user should be aware that we're not
- // using it (doing so could lead to data/config loss by overwriting!)
- caddy.Log().Warn("--config and --resume flags were used together; ignoring --config and resuming from last configuration",
- zap.String("autosave_file", caddy.ConfigAutosavePath))
- }
- }
- }
- // we don't use 'else' here since this value might have been changed in 'if' block; i.e. not mutually exclusive
- var configFile string
- if !runCmdResumeFlag {
- config, configFile, err = loadConfig(runCmdConfigFlag, runCmdConfigAdapterFlag)
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
- }
-
- // run the initial config
- err = caddy.Load(config, true)
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("loading initial config: %v", err)
- }
- caddy.Log().Info("serving initial configuration")
-
- // if we are to report to another process the successful start
- // of the server, do so now by echoing back contents of stdin
- if runCmdPingbackFlag != "" {
- confirmationBytes, err := ioutil.ReadAll(os.Stdin)
- if err != nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("reading confirmation bytes from stdin: %v", err)
- }
- conn, err := net.Dial("tcp", runCmdPingbackFlag)
- if err != nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("dialing confirmation address: %v", err)
- }
- defer conn.Close()
- _, err = conn.Write(confirmationBytes)
- if err != nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("writing confirmation bytes to %s: %v", runCmdPingbackFlag, err)
- }
- }
-
- // if enabled, reload config file automatically on changes
- // (this better only be used in dev!)
- if runCmdWatchFlag {
- go watchConfigFile(configFile, runCmdConfigAdapterFlag)
- }
-
- // create pidfile
- if runCmdPidfileFlag != "" {
- err := caddy.PIDFile(runCmdPidfileFlag)
- if err != nil {
- caddy.Log().Error("unable to write PID file",
- zap.String("pidfile", runCmdPidfileFlag),
- zap.Error(err))
- }
- }
-
- // warn if the environment does not provide enough information about the disk
- hasXDG := os.Getenv("XDG_DATA_HOME") != "" &&
- os.Getenv("XDG_CONFIG_HOME") != "" &&
- os.Getenv("XDG_CACHE_HOME") != ""
- switch runtime.GOOS {
- case "windows":
- if os.Getenv("HOME") == "" && os.Getenv("USERPROFILE") == "" && !hasXDG {
- caddy.Log().Warn("neither HOME nor USERPROFILE environment variables are set - please fix; some assets might be stored in ./caddy")
- }
- case "plan9":
- if os.Getenv("home") == "" && !hasXDG {
- caddy.Log().Warn("$home environment variable is empty - please fix; some assets might be stored in ./caddy")
- }
- default:
- if os.Getenv("HOME") == "" && !hasXDG {
- caddy.Log().Warn("$HOME environment variable is empty - please fix; some assets might be stored in ./caddy")
- }
- }
-
- select {}
-}
-
-func cmdStop(fl Flags) (int, error) {
- stopCmdAddrFlag := fl.String("address")
-
- err := apiRequest(stopCmdAddrFlag, http.MethodPost, "/stop", nil, nil)
- if err != nil {
- caddy.Log().Warn("failed using API to stop instance", zap.Error(err))
- return caddy.ExitCodeFailedStartup, err
- }
-
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdReload(fl Flags) (int, error) {
- reloadCmdConfigFlag := fl.String("config")
- reloadCmdConfigAdapterFlag := fl.String("adapter")
- reloadCmdAddrFlag := fl.String("address")
- reloadCmdForceFlag := fl.Bool("force")
-
- // get the config in caddy's native format
- config, configFile, err := loadConfig(reloadCmdConfigFlag, reloadCmdConfigAdapterFlag)
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
- if configFile == "" {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("no config file to load")
- }
-
- // get the address of the admin listener; use flag if specified
- adminAddr := reloadCmdAddrFlag
- if adminAddr == "" && len(config) > 0 {
- var tmpStruct struct {
- Admin caddy.AdminConfig `json:"admin"`
- }
- err = json.Unmarshal(config, &tmpStruct)
- if err != nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("unmarshaling admin listener address from config: %v", err)
- }
- adminAddr = tmpStruct.Admin.Listen
- }
-
- // optionally force a config reload
- headers := make(http.Header)
- if reloadCmdForceFlag {
- headers.Set("Cache-Control", "must-revalidate")
- }
-
- err = apiRequest(adminAddr, http.MethodPost, "/load", headers, bytes.NewReader(config))
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("sending configuration to instance: %v", err)
- }
-
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdVersion(_ Flags) (int, error) {
- fmt.Println(CaddyVersion())
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdBuildInfo(fl Flags) (int, error) {
- bi, ok := debug.ReadBuildInfo()
- if !ok {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("no build information")
- }
-
- fmt.Printf("go_version: %s\n", runtime.Version())
- fmt.Printf("go_os: %s\n", runtime.GOOS)
- fmt.Printf("go_arch: %s\n", runtime.GOARCH)
- fmt.Printf("path: %s\n", bi.Path)
- fmt.Printf("main: %s %s %s\n", bi.Main.Path, bi.Main.Version, bi.Main.Sum)
- fmt.Println("dependencies:")
-
- for _, goMod := range bi.Deps {
- fmt.Printf("%s %s %s", goMod.Path, goMod.Version, goMod.Sum)
- if goMod.Replace != nil {
- fmt.Printf(" => %s %s %s", goMod.Replace.Path, goMod.Replace.Version, goMod.Replace.Sum)
- }
- fmt.Println()
- }
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdListModules(fl Flags) (int, error) {
- packages := fl.Bool("packages")
- versions := fl.Bool("versions")
-
- printModuleInfo := func(mi moduleInfo) {
- fmt.Print(mi.caddyModuleID)
- if versions && mi.goModule != nil {
- fmt.Print(" " + mi.goModule.Version)
- }
- if packages && mi.goModule != nil {
- fmt.Print(" " + mi.goModule.Path)
- if mi.goModule.Replace != nil {
- fmt.Print(" => " + mi.goModule.Replace.Path)
- }
- }
- if mi.err != nil {
- fmt.Printf(" [%v]", mi.err)
- }
- fmt.Println()
- }
-
- // organize modules by whether they come with the standard distribution
- standard, nonstandard, unknown, err := getModules()
- if err != nil {
- // oh well, just print the module IDs and exit
- for _, m := range caddy.Modules() {
- fmt.Println(m)
- }
- return caddy.ExitCodeSuccess, nil
- }
-
- if len(standard) > 0 {
- for _, mod := range standard {
- printModuleInfo(mod)
- }
- }
- fmt.Printf("\n Standard modules: %d\n", len(standard))
- if len(nonstandard) > 0 {
- if len(standard) > 0 {
- fmt.Println()
- }
- for _, mod := range nonstandard {
- printModuleInfo(mod)
- }
- }
- fmt.Printf("\n Non-standard modules: %d\n", len(nonstandard))
- if len(unknown) > 0 {
- if len(standard) > 0 || len(nonstandard) > 0 {
- fmt.Println()
- }
- for _, mod := range unknown {
- printModuleInfo(mod)
- }
- }
- fmt.Printf("\n Unknown modules: %d\n", len(unknown))
-
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdEnviron(_ Flags) (int, error) {
- printEnvironment()
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdAdaptConfig(fl Flags) (int, error) {
- adaptCmdInputFlag := fl.String("config")
- adaptCmdAdapterFlag := fl.String("adapter")
- adaptCmdPrettyFlag := fl.Bool("pretty")
- adaptCmdValidateFlag := fl.Bool("validate")
-
- // if no input file was specified, try a default
- // Caddyfile if the Caddyfile adapter is plugged in
- if adaptCmdInputFlag == "" && caddyconfig.GetAdapter("caddyfile") != nil {
- _, err := os.Stat("Caddyfile")
- if err == nil {
- // default Caddyfile exists
- adaptCmdInputFlag = "Caddyfile"
- caddy.Log().Info("using adjacent Caddyfile")
- } else if !os.IsNotExist(err) {
- // default Caddyfile exists, but error accessing it
- return caddy.ExitCodeFailedStartup, fmt.Errorf("accessing default Caddyfile: %v", err)
- }
- }
-
- if adaptCmdInputFlag == "" {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("input file required when there is no Caddyfile in current directory (use --config flag)")
- }
- if adaptCmdAdapterFlag == "" {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("adapter name is required (use --adapt flag or leave unspecified for default)")
- }
-
- cfgAdapter := caddyconfig.GetAdapter(adaptCmdAdapterFlag)
- if cfgAdapter == nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("unrecognized config adapter: %s", adaptCmdAdapterFlag)
- }
-
- input, err := ioutil.ReadFile(adaptCmdInputFlag)
- if err != nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("reading input file: %v", err)
- }
-
- opts := map[string]interface{}{"filename": adaptCmdInputFlag}
-
- adaptedConfig, warnings, err := cfgAdapter.Adapt(input, opts)
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
-
- if adaptCmdPrettyFlag {
- var prettyBuf bytes.Buffer
- err = json.Indent(&prettyBuf, adaptedConfig, "", "\t")
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
- adaptedConfig = prettyBuf.Bytes()
- }
-
- // print result to stdout
- fmt.Println(string(adaptedConfig))
-
- // print warnings to stderr
- for _, warn := range warnings {
- msg := warn.Message
- if warn.Directive != "" {
- msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
- }
- fmt.Fprintf(os.Stderr, "[WARNING][%s] %s:%d: %s\n", adaptCmdAdapterFlag, warn.File, warn.Line, msg)
- }
-
- // validate output if requested
- if adaptCmdValidateFlag {
- var cfg *caddy.Config
- err = json.Unmarshal(adaptedConfig, &cfg)
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err)
- }
- err = caddy.Validate(cfg)
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("validation: %v", err)
- }
- }
-
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdValidateConfig(fl Flags) (int, error) {
- validateCmdConfigFlag := fl.String("config")
- validateCmdAdapterFlag := fl.String("adapter")
-
- input, _, err := loadConfig(validateCmdConfigFlag, validateCmdAdapterFlag)
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
- input = caddy.RemoveMetaFields(input)
-
- var cfg *caddy.Config
- err = json.Unmarshal(input, &cfg)
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err)
- }
-
- err = caddy.Validate(cfg)
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
-
- fmt.Println("Valid configuration")
-
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdFmt(fl Flags) (int, error) {
- formatCmdConfigFile := fl.Arg(0)
- if formatCmdConfigFile == "" {
- formatCmdConfigFile = "Caddyfile"
- }
-
- // as a special case, read from stdin if the file name is "-"
- if formatCmdConfigFile == "-" {
- input, err := ioutil.ReadAll(os.Stdin)
- if err != nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("reading stdin: %v", err)
- }
- fmt.Print(string(caddyfile.Format(input)))
- return caddy.ExitCodeSuccess, nil
- }
-
- input, err := ioutil.ReadFile(formatCmdConfigFile)
- if err != nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("reading input file: %v", err)
- }
-
- output := caddyfile.Format(input)
-
- if fl.Bool("overwrite") {
- if err := ioutil.WriteFile(formatCmdConfigFile, output, 0600); err != nil {
- return caddy.ExitCodeFailedStartup, nil
- }
- } else {
- fmt.Print(string(output))
- }
-
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdHelp(fl Flags) (int, error) {
- const fullDocs = `Full documentation is available at:
-https://caddyserver.com/docs/command-line`
-
- args := fl.Args()
- if len(args) == 0 {
- s := `Caddy is an extensible server platform.
-
-usage:
- caddy []
-
-commands:
-`
- keys := make([]string, 0, len(commands))
- for k := range commands {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- cmd := commands[k]
- short := strings.TrimSuffix(cmd.Short, ".")
- s += fmt.Sprintf(" %-15s %s\n", cmd.Name, short)
- }
-
- s += "\nUse 'caddy help ' for more information about a command.\n"
- s += "\n" + fullDocs + "\n"
-
- fmt.Print(s)
-
- return caddy.ExitCodeSuccess, nil
- } else if len(args) > 1 {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("can only give help with one command")
- }
-
- subcommand, ok := commands[args[0]]
- if !ok {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("unknown command: %s", args[0])
- }
-
- helpText := strings.TrimSpace(subcommand.Long)
- if helpText == "" {
- helpText = subcommand.Short
- if !strings.HasSuffix(helpText, ".") {
- helpText += "."
- }
- }
-
- result := fmt.Sprintf("%s\n\nusage:\n caddy %s %s\n",
- helpText,
- subcommand.Name,
- strings.TrimSpace(subcommand.Usage),
- )
-
- if help := flagHelp(subcommand.Flags); help != "" {
- result += fmt.Sprintf("\nflags:\n%s", help)
- }
-
- result += "\n" + fullDocs + "\n"
-
- fmt.Print(result)
-
- return caddy.ExitCodeSuccess, nil
-}
-
-// apiRequest makes an API request to the endpoint adminAddr with the
-// given HTTP method and request URI. If body is non-nil, it will be
-// assumed to be Content-Type application/json.
-func apiRequest(adminAddr, method, uri string, headers http.Header, body io.Reader) error {
- // parse the admin address
- if adminAddr == "" {
- adminAddr = caddy.DefaultAdminListen
- }
- parsedAddr, err := caddy.ParseNetworkAddress(adminAddr)
- if err != nil || parsedAddr.PortRangeSize() > 1 {
- return fmt.Errorf("invalid admin address %s: %v", adminAddr, err)
- }
- origin := parsedAddr.JoinHostPort(0)
- if parsedAddr.IsUnixNetwork() {
- origin = "unixsocket" // hack so that http.NewRequest() is happy
- }
-
- // form the request
- req, err := http.NewRequest(method, "http://"+origin+uri, body)
- if err != nil {
- return fmt.Errorf("making request: %v", err)
- }
- if parsedAddr.IsUnixNetwork() {
- // When listening on a unix socket, the admin endpoint doesn't
- // accept any Host header because there is no host:port for
- // a unix socket's address. The server's host check is fairly
- // strict for security reasons, so we don't allow just any
- // Host header. For unix sockets, the Host header must be
- // empty. Unfortunately, Go makes it impossible to make HTTP
- // requests with an empty Host header... except with this one
- // weird trick. (Hopefully they don't fix it. It's already
- // hard enough to use HTTP over unix sockets.)
- //
- // An equivalent curl command would be something like:
- // $ curl --unix-socket caddy.sock http:/:$REQUEST_URI
- req.URL.Host = " "
- req.Host = ""
- } else {
- req.Header.Set("Origin", origin)
- }
- if body != nil {
- req.Header.Set("Content-Type", "application/json")
- }
- for k, v := range headers {
- req.Header[k] = v
- }
-
- // make an HTTP client that dials our network type, since admin
- // endpoints aren't always TCP, which is what the default transport
- // expects; reuse is not of particular concern here
- client := http.Client{
- Transport: &http.Transport{
- DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
- return net.Dial(parsedAddr.Network, parsedAddr.JoinHostPort(0))
- },
- },
- }
-
- resp, err := client.Do(req)
- if err != nil {
- return fmt.Errorf("performing request: %v", err)
- }
- defer resp.Body.Close()
-
- // if it didn't work, let the user know
- if resp.StatusCode >= 400 {
- respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024*10))
- if err != nil {
- return fmt.Errorf("HTTP %d: reading error message: %v", resp.StatusCode, err)
- }
- return fmt.Errorf("caddy responded with error: HTTP %d: %s", resp.StatusCode, respBody)
- }
-
- return nil
-}
-
-type moduleInfo struct {
- caddyModuleID string
- goModule *debug.Module
- err error
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/cmd/commands.go b/vendor/github.com/caddyserver/caddy/v2/cmd/commands.go
deleted file mode 100644
index ccb82b11..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/cmd/commands.go
+++ /dev/null
@@ -1,359 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddycmd
-
-import (
- "flag"
- "regexp"
-)
-
-// Command represents a subcommand. Name, Func,
-// and Short are required.
-type Command struct {
- // The name of the subcommand. Must conform to the
- // format described by the RegisterCommand() godoc.
- // Required.
- Name string
-
- // Func is a function that executes a subcommand using
- // the parsed flags. It returns an exit code and any
- // associated error.
- // Required.
- Func CommandFunc
-
- // Usage is a brief message describing the syntax of
- // the subcommand's flags and args. Use [] to indicate
- // optional parameters and <> to enclose literal values
- // intended to be replaced by the user. Do not prefix
- // the string with "caddy" or the name of the command
- // since these will be prepended for you; only include
- // the actual parameters for this command.
- Usage string
-
- // Short is a one-line message explaining what the
- // command does. Should not end with punctuation.
- // Required.
- Short string
-
- // Long is the full help text shown to the user.
- // Will be trimmed of whitespace on both ends before
- // being printed.
- Long string
-
- // Flags is the flagset for command.
- Flags *flag.FlagSet
-}
-
-// CommandFunc is a command's function. It runs the
-// command and returns the proper exit code along with
-// any error that occurred.
-type CommandFunc func(Flags) (int, error)
-
-// Commands returns a list of commands initialised by
-// RegisterCommand
-func Commands() map[string]Command {
- return commands
-}
-
-var commands = make(map[string]Command)
-
-func init() {
- RegisterCommand(Command{
- Name: "help",
- Func: cmdHelp,
- Usage: "",
- Short: "Shows help for a Caddy subcommand",
- })
-
- RegisterCommand(Command{
- Name: "start",
- Func: cmdStart,
- Usage: "[--config [--adapter ]] [--envfile ] [--watch] [--pidfile ]",
- Short: "Starts the Caddy process in the background and then returns",
- Long: `
-Starts the Caddy process, optionally bootstrapped with an initial config file.
-This command unblocks after the server starts running or fails to run.
-
-If --envfile is specified, an environment file with environment variables in
-the KEY=VALUE format will be loaded into the Caddy process.
-
-On Windows, the spawned child process will remain attached to the terminal, so
-closing the window will forcefully stop Caddy; to avoid forgetting this, try
-using 'caddy run' instead to keep it in the foreground.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("start", flag.ExitOnError)
- fs.String("config", "", "Configuration file")
- fs.String("envfile", "", "Environment file to load")
- fs.String("adapter", "", "Name of config adapter to apply")
- fs.String("pidfile", "", "Path of file to which to write process ID")
- fs.Bool("watch", false, "Reload changed config file automatically")
- return fs
- }(),
- })
-
- RegisterCommand(Command{
- Name: "run",
- Func: cmdRun,
- Usage: "[--config [--adapter ]] [--envfile ] [--environ] [--resume] [--watch] [--pidfile ]",
- Short: `Starts the Caddy process and blocks indefinitely`,
- Long: `
-Starts the Caddy process, optionally bootstrapped with an initial config file,
-and blocks indefinitely until the server is stopped; i.e. runs Caddy in
-"daemon" mode (foreground).
-
-If a config file is specified, it will be applied immediately after the process
-is running. If the config file is not in Caddy's native JSON format, you can
-specify an adapter with --adapter to adapt the given config file to
-Caddy's native format. The config adapter must be a registered module. Any
-warnings will be printed to the log, but beware that any adaptation without
-errors will immediately be used. If you want to review the results of the
-adaptation first, use the 'adapt' subcommand.
-
-As a special case, if the current working directory has a file called
-"Caddyfile" and the caddyfile config adapter is plugged in (default), then
-that file will be loaded and used to configure Caddy, even without any command
-line flags.
-
-If --envfile is specified, an environment file with environment variables in
-the KEY=VALUE format will be loaded into the Caddy process.
-
-If --environ is specified, the environment as seen by the Caddy process will
-be printed before starting. This is the same as the environ command but does
-not quit after printing, and can be useful for troubleshooting.
-
-The --resume flag will override the --config flag if there is a config auto-
-save file. It is not an error if --resume is used and no autosave file exists.
-
-If --watch is specified, the config file will be loaded automatically after
-changes. âš ï¸ This is dangerous in production! Only use this option in a local
-development environment.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("run", flag.ExitOnError)
- fs.String("config", "", "Configuration file")
- fs.String("adapter", "", "Name of config adapter to apply")
- fs.String("envfile", "", "Environment file to load")
- fs.Bool("environ", false, "Print environment")
- fs.Bool("resume", false, "Use saved config, if any (and prefer over --config file)")
- fs.Bool("watch", false, "Watch config file for changes and reload it automatically")
- fs.String("pidfile", "", "Path of file to which to write process ID")
- fs.String("pingback", "", "Echo confirmation bytes to this address on success")
- return fs
- }(),
- })
-
- RegisterCommand(Command{
- Name: "stop",
- Func: cmdStop,
- Short: "Gracefully stops a started Caddy process",
- Long: `
-Stops the background Caddy process as gracefully as possible.
-
-It requires that the admin API is enabled and accessible, since it will
-use the API's /stop endpoint. The address of this request can be
-customized using the --address flag if it is not the default.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("stop", flag.ExitOnError)
- fs.String("address", "", "The address to use to reach the admin API endpoint, if not the default")
- return fs
- }(),
- })
-
- RegisterCommand(Command{
- Name: "reload",
- Func: cmdReload,
- Usage: "--config [--adapter ] [--address ]",
- Short: "Changes the config of the running Caddy instance",
- Long: `
-Gives the running Caddy instance a new configuration. This has the same effect
-as POSTing a document to the /load API endpoint, but is convenient for simple
-workflows revolving around config files.
-
-Since the admin endpoint is configurable, the endpoint configuration is loaded
-from the --address flag if specified; otherwise it is loaded from the given
-config file; otherwise the default is assumed.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("reload", flag.ExitOnError)
- fs.String("config", "", "Configuration file (required)")
- fs.String("adapter", "", "Name of config adapter to apply")
- fs.String("address", "", "Address of the administration listener, if different from config")
- fs.Bool("force", false, "Force config reload, even if it is the same")
- return fs
- }(),
- })
-
- RegisterCommand(Command{
- Name: "version",
- Func: cmdVersion,
- Short: "Prints the version",
- })
-
- RegisterCommand(Command{
- Name: "list-modules",
- Func: cmdListModules,
- Usage: "[--packages] [--versions]",
- Short: "Lists the installed Caddy modules",
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("list-modules", flag.ExitOnError)
- fs.Bool("packages", false, "Print package paths")
- fs.Bool("versions", false, "Print version information")
- return fs
- }(),
- })
-
- RegisterCommand(Command{
- Name: "build-info",
- Func: cmdBuildInfo,
- Short: "Prints information about this build",
- })
-
- RegisterCommand(Command{
- Name: "environ",
- Func: cmdEnviron,
- Short: "Prints the environment",
- })
-
- RegisterCommand(Command{
- Name: "adapt",
- Func: cmdAdaptConfig,
- Usage: "--config [--adapter ] [--pretty] [--validate]",
- Short: "Adapts a configuration to Caddy's native JSON",
- Long: `
-Adapts a configuration to Caddy's native JSON format and writes the
-output to stdout, along with any warnings to stderr.
-
-If --pretty is specified, the output will be formatted with indentation
-for human readability.
-
-If --validate is used, the adapted config will be checked for validity.
-If the config is invalid, an error will be printed to stderr and a non-
-zero exit status will be returned.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("adapt", flag.ExitOnError)
- fs.String("config", "", "Configuration file to adapt (required)")
- fs.String("adapter", "caddyfile", "Name of config adapter")
- fs.Bool("pretty", false, "Format the output for human readability")
- fs.Bool("validate", false, "Validate the output")
- return fs
- }(),
- })
-
- RegisterCommand(Command{
- Name: "validate",
- Func: cmdValidateConfig,
- Usage: "--config [--adapter ]",
- Short: "Tests whether a configuration file is valid",
- Long: `
-Loads and provisions the provided config, but does not start running it.
-This reveals any errors with the configuration through the loading and
-provisioning stages.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("load", flag.ExitOnError)
- fs.String("config", "", "Input configuration file")
- fs.String("adapter", "", "Name of config adapter")
- return fs
- }(),
- })
-
- RegisterCommand(Command{
- Name: "fmt",
- Func: cmdFmt,
- Usage: "[--overwrite] []",
- Short: "Formats a Caddyfile",
- Long: `
-Formats the Caddyfile by adding proper indentation and spaces to improve
-human readability. It prints the result to stdout.
-
-If --overwrite is specified, the output will be written to the config file
-directly instead of printing it.
-
-If you wish you use stdin instead of a regular file, use - as the path.
-When reading from stdin, the --overwrite flag has no effect: the result
-is always printed to stdout.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("format", flag.ExitOnError)
- fs.Bool("overwrite", false, "Overwrite the input file with the results")
- return fs
- }(),
- })
-
- RegisterCommand(Command{
- Name: "upgrade",
- Func: cmdUpgrade,
- Short: "Upgrade Caddy (EXPERIMENTAL)",
- Long: `
-Downloads an updated Caddy binary with the same modules/plugins at the
-latest versions. EXPERIMENTAL: May be changed or removed.`,
- })
-
- RegisterCommand(Command{
- Name: "add-package",
- Func: cmdAddPackage,
- Usage: "",
- Short: "Adds Caddy packages (EXPERIMENTAL)",
- Long: `
-Downloads an updated Caddy binary with the specified packages (module/plugin)
-added. Retains existing packages. Returns an error if the any of packages are
-already included. EXPERIMENTAL: May be changed or removed.
-`,
- })
-
- RegisterCommand(Command{
- Name: "remove-package",
- Func: cmdRemovePackage,
- Usage: "",
- Short: "Removes Caddy packages (EXPERIMENTAL)",
- Long: `
-Downloads an updated Caddy binaries without the specified packages (module/plugin).
-Returns an error if any of the packages are not included.
-EXPERIMENTAL: May be changed or removed.
-`,
- })
-
-}
-
-// RegisterCommand registers the command cmd.
-// cmd.Name must be unique and conform to the
-// following format:
-//
-// - lowercase
-// - alphanumeric and hyphen characters only
-// - cannot start or end with a hyphen
-// - hyphen cannot be adjacent to another hyphen
-//
-// This function panics if the name is already registered,
-// if the name does not meet the described format, or if
-// any of the fields are missing from cmd.
-//
-// This function should be used in init().
-func RegisterCommand(cmd Command) {
- if cmd.Name == "" {
- panic("command name is required")
- }
- if cmd.Func == nil {
- panic("command function missing")
- }
- if cmd.Short == "" {
- panic("command short string is required")
- }
- if _, exists := commands[cmd.Name]; exists {
- panic("command already registered: " + cmd.Name)
- }
- if !commandNameRegex.MatchString(cmd.Name) {
- panic("invalid command name")
- }
- commands[cmd.Name] = cmd
-}
-
-var commandNameRegex = regexp.MustCompile(`^[a-z0-9]$|^([a-z0-9]+-?[a-z0-9]*)+[a-z0-9]$`)
diff --git a/vendor/github.com/caddyserver/caddy/v2/cmd/main.go b/vendor/github.com/caddyserver/caddy/v2/cmd/main.go
deleted file mode 100644
index 8a9a2896..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/cmd/main.go
+++ /dev/null
@@ -1,457 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddycmd
-
-import (
- "bufio"
- "bytes"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "os"
- "path/filepath"
- "runtime"
- "runtime/debug"
- "strconv"
- "strings"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/certmagic"
- "go.uber.org/zap"
-)
-
-func init() {
- // set a fitting User-Agent for ACME requests
- goModule := caddy.GoModule()
- cleanModVersion := strings.TrimPrefix(goModule.Version, "v")
- certmagic.UserAgent = "Caddy/" + cleanModVersion
-
- // by using Caddy, user indicates agreement to CA terms
- // (very important, or ACME account creation will fail!)
- certmagic.DefaultACME.Agreed = true
-}
-
-// Main implements the main function of the caddy command.
-// Call this if Caddy is to be the main() of your program.
-func Main() {
- switch len(os.Args) {
- case 0:
- fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n")
- os.Exit(caddy.ExitCodeFailedStartup)
- case 1:
- os.Args = append(os.Args, "help")
- }
-
- subcommandName := os.Args[1]
- subcommand, ok := commands[subcommandName]
- if !ok {
- if strings.HasPrefix(os.Args[1], "-") {
- // user probably forgot to type the subcommand
- fmt.Println("[ERROR] first argument must be a subcommand; see 'caddy help'")
- } else {
- fmt.Printf("[ERROR] '%s' is not a recognized subcommand; see 'caddy help'\n", os.Args[1])
- }
- os.Exit(caddy.ExitCodeFailedStartup)
- }
-
- fs := subcommand.Flags
- if fs == nil {
- fs = flag.NewFlagSet(subcommand.Name, flag.ExitOnError)
- }
-
- err := fs.Parse(os.Args[2:])
- if err != nil {
- fmt.Println(err)
- os.Exit(caddy.ExitCodeFailedStartup)
- }
-
- exitCode, err := subcommand.Func(Flags{fs})
- if err != nil {
- fmt.Fprintf(os.Stderr, "%s: %v\n", subcommand.Name, err)
- }
-
- os.Exit(exitCode)
-}
-
-// handlePingbackConn reads from conn and ensures it matches
-// the bytes in expect, or returns an error if it doesn't.
-func handlePingbackConn(conn net.Conn, expect []byte) error {
- defer conn.Close()
- confirmationBytes, err := ioutil.ReadAll(io.LimitReader(conn, 32))
- if err != nil {
- return err
- }
- if !bytes.Equal(confirmationBytes, expect) {
- return fmt.Errorf("wrong confirmation: %x", confirmationBytes)
- }
- return nil
-}
-
-// loadConfig loads the config from configFile and adapts it
-// using adapterName. If adapterName is specified, configFile
-// must be also. If no configFile is specified, it tries
-// loading a default config file. The lack of a config file is
-// not treated as an error, but false will be returned if
-// there is no config available. It prints any warnings to stderr,
-// and returns the resulting JSON config bytes along with
-// whether a config file was loaded or not.
-func loadConfig(configFile, adapterName string) ([]byte, string, error) {
- // specifying an adapter without a config file is ambiguous
- if adapterName != "" && configFile == "" {
- return nil, "", fmt.Errorf("cannot adapt config without config file (use --config)")
- }
-
- // load initial config and adapter
- var config []byte
- var cfgAdapter caddyconfig.Adapter
- var err error
- if configFile != "" {
- if configFile == "-" {
- config, err = ioutil.ReadAll(os.Stdin)
- } else {
- config, err = ioutil.ReadFile(configFile)
- }
- if err != nil {
- return nil, "", fmt.Errorf("reading config file: %v", err)
- }
- caddy.Log().Info("using provided configuration",
- zap.String("config_file", configFile),
- zap.String("config_adapter", adapterName))
- } else if adapterName == "" {
- // as a special case when no config file or adapter
- // is specified, see if the Caddyfile adapter is
- // plugged in, and if so, try using a default Caddyfile
- cfgAdapter = caddyconfig.GetAdapter("caddyfile")
- if cfgAdapter != nil {
- config, err = ioutil.ReadFile("Caddyfile")
- if os.IsNotExist(err) {
- // okay, no default Caddyfile; pretend like this never happened
- cfgAdapter = nil
- } else if err != nil {
- // default Caddyfile exists, but error reading it
- return nil, "", fmt.Errorf("reading default Caddyfile: %v", err)
- } else {
- // success reading default Caddyfile
- configFile = "Caddyfile"
- caddy.Log().Info("using adjacent Caddyfile")
- }
- }
- }
-
- // as a special case, if a config file called "Caddyfile" was
- // specified, and no adapter is specified, assume caddyfile adapter
- // for convenience
- if strings.HasPrefix(filepath.Base(configFile), "Caddyfile") &&
- filepath.Ext(configFile) != ".json" &&
- adapterName == "" {
- adapterName = "caddyfile"
- }
-
- // load config adapter
- if adapterName != "" {
- cfgAdapter = caddyconfig.GetAdapter(adapterName)
- if cfgAdapter == nil {
- return nil, "", fmt.Errorf("unrecognized config adapter: %s", adapterName)
- }
- }
-
- // adapt config
- if cfgAdapter != nil {
- adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]interface{}{
- "filename": configFile,
- })
- if err != nil {
- return nil, "", fmt.Errorf("adapting config using %s: %v", adapterName, err)
- }
- for _, warn := range warnings {
- msg := warn.Message
- if warn.Directive != "" {
- msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
- }
- caddy.Log().Warn(msg, zap.String("adapter", adapterName), zap.String("file", warn.File), zap.Int("line", warn.Line))
- }
- config = adaptedConfig
- }
-
- return config, configFile, nil
-}
-
-// watchConfigFile watches the config file at filename for changes
-// and reloads the config if the file was updated. This function
-// blocks indefinitely; it only quits if the poller has errors for
-// long enough time. The filename passed in must be the actual
-// config file used, not one to be discovered.
-func watchConfigFile(filename, adapterName string) {
- defer func() {
- if err := recover(); err != nil {
- log.Printf("[PANIC] watching config file: %v\n%s", err, debug.Stack())
- }
- }()
-
- // make our logger; since config reloads can change the
- // default logger, we need to get it dynamically each time
- logger := func() *zap.Logger {
- return caddy.Log().
- Named("watcher").
- With(zap.String("config_file", filename))
- }
-
- // get the initial timestamp on the config file
- info, err := os.Stat(filename)
- if err != nil {
- logger().Error("cannot watch config file", zap.Error(err))
- return
- }
- lastModified := info.ModTime()
-
- logger().Info("watching config file for changes")
-
- // if the file disappears or something, we can
- // stop polling if the error lasts long enough
- var lastErr time.Time
- finalError := func(err error) bool {
- if lastErr.IsZero() {
- lastErr = time.Now()
- return false
- }
- if time.Since(lastErr) > 30*time.Second {
- logger().Error("giving up watching config file; too many errors",
- zap.Error(err))
- return true
- }
- return false
- }
-
- // begin poller
- //nolint:staticcheck
- for range time.Tick(1 * time.Second) {
- // get the file info
- info, err := os.Stat(filename)
- if err != nil {
- if finalError(err) {
- return
- }
- continue
- }
- lastErr = time.Time{} // no error, so clear any memory of one
-
- // if it hasn't changed, nothing to do
- if !info.ModTime().After(lastModified) {
- continue
- }
-
- logger().Info("config file changed; reloading")
-
- // remember this timestamp
- lastModified = info.ModTime()
-
- // load the contents of the file
- config, _, err := loadConfig(filename, adapterName)
- if err != nil {
- logger().Error("unable to load latest config", zap.Error(err))
- continue
- }
-
- // apply the updated config
- err = caddy.Load(config, false)
- if err != nil {
- logger().Error("applying latest config", zap.Error(err))
- continue
- }
- }
-}
-
-// Flags wraps a FlagSet so that typed values
-// from flags can be easily retrieved.
-type Flags struct {
- *flag.FlagSet
-}
-
-// String returns the string representation of the
-// flag given by name. It panics if the flag is not
-// in the flag set.
-func (f Flags) String(name string) string {
- return f.FlagSet.Lookup(name).Value.String()
-}
-
-// Bool returns the boolean representation of the
-// flag given by name. It returns false if the flag
-// is not a boolean type. It panics if the flag is
-// not in the flag set.
-func (f Flags) Bool(name string) bool {
- val, _ := strconv.ParseBool(f.String(name))
- return val
-}
-
-// Int returns the integer representation of the
-// flag given by name. It returns 0 if the flag
-// is not an integer type. It panics if the flag is
-// not in the flag set.
-func (f Flags) Int(name string) int {
- val, _ := strconv.ParseInt(f.String(name), 0, strconv.IntSize)
- return int(val)
-}
-
-// Float64 returns the float64 representation of the
-// flag given by name. It returns false if the flag
-// is not a float64 type. It panics if the flag is
-// not in the flag set.
-func (f Flags) Float64(name string) float64 {
- val, _ := strconv.ParseFloat(f.String(name), 64)
- return val
-}
-
-// Duration returns the duration representation of the
-// flag given by name. It returns false if the flag
-// is not a duration type. It panics if the flag is
-// not in the flag set.
-func (f Flags) Duration(name string) time.Duration {
- val, _ := caddy.ParseDuration(f.String(name))
- return val
-}
-
-// flagHelp returns the help text for fs.
-func flagHelp(fs *flag.FlagSet) string {
- if fs == nil {
- return ""
- }
-
- // temporarily redirect output
- out := fs.Output()
- defer fs.SetOutput(out)
-
- buf := new(bytes.Buffer)
- fs.SetOutput(buf)
- fs.PrintDefaults()
- return buf.String()
-}
-
-func loadEnvFromFile(envFile string) error {
- file, err := os.Open(envFile)
- if err != nil {
- return fmt.Errorf("reading environment file: %v", err)
- }
- defer file.Close()
-
- envMap, err := parseEnvFile(file)
- if err != nil {
- return fmt.Errorf("parsing environment file: %v", err)
- }
-
- for k, v := range envMap {
- if err := os.Setenv(k, v); err != nil {
- return fmt.Errorf("setting environment variables: %v", err)
- }
- }
-
- // Update the storage paths to ensure they have the proper
- // value after loading a specified env file.
- caddy.ConfigAutosavePath = filepath.Join(caddy.AppConfigDir(), "autosave.json")
- caddy.DefaultStorage = &certmagic.FileStorage{Path: caddy.AppDataDir()}
-
- return nil
-}
-
-func parseEnvFile(envInput io.Reader) (map[string]string, error) {
- envMap := make(map[string]string)
-
- scanner := bufio.NewScanner(envInput)
- var line string
- lineNumber := 0
-
- for scanner.Scan() {
- line = strings.TrimSpace(scanner.Text())
- lineNumber++
-
- // skip lines starting with comment
- if strings.HasPrefix(line, "#") {
- continue
- }
-
- // skip empty line
- if len(line) == 0 {
- continue
- }
-
- fields := strings.SplitN(line, "=", 2)
- if len(fields) != 2 {
- return nil, fmt.Errorf("can't parse line %d; line should be in KEY=VALUE format", lineNumber)
- }
-
- if strings.Contains(fields[0], " ") {
- return nil, fmt.Errorf("bad key on line %d: contains whitespace", lineNumber)
- }
-
- key := fields[0]
- val := fields[1]
-
- if key == "" {
- return nil, fmt.Errorf("missing or empty key on line %d", lineNumber)
- }
- envMap[key] = val
- }
-
- if err := scanner.Err(); err != nil {
- return nil, err
- }
-
- return envMap, nil
-}
-
-func printEnvironment() {
- fmt.Printf("caddy.HomeDir=%s\n", caddy.HomeDir())
- fmt.Printf("caddy.AppDataDir=%s\n", caddy.AppDataDir())
- fmt.Printf("caddy.AppConfigDir=%s\n", caddy.AppConfigDir())
- fmt.Printf("caddy.ConfigAutosavePath=%s\n", caddy.ConfigAutosavePath)
- fmt.Printf("caddy.Version=%s\n", CaddyVersion())
- fmt.Printf("runtime.GOOS=%s\n", runtime.GOOS)
- fmt.Printf("runtime.GOARCH=%s\n", runtime.GOARCH)
- fmt.Printf("runtime.Compiler=%s\n", runtime.Compiler)
- fmt.Printf("runtime.NumCPU=%d\n", runtime.NumCPU())
- fmt.Printf("runtime.GOMAXPROCS=%d\n", runtime.GOMAXPROCS(0))
- fmt.Printf("runtime.Version=%s\n", runtime.Version())
- cwd, err := os.Getwd()
- if err != nil {
- cwd = fmt.Sprintf("", err)
- }
- fmt.Printf("os.Getwd=%s\n\n", cwd)
- for _, v := range os.Environ() {
- fmt.Println(v)
- }
-}
-
-// CaddyVersion returns a detailed version string, if available.
-func CaddyVersion() string {
- goModule := caddy.GoModule()
- ver := goModule.Version
- if goModule.Sum != "" {
- ver += " " + goModule.Sum
- }
- if goModule.Replace != nil {
- ver += " => " + goModule.Replace.Path
- if goModule.Replace.Version != "" {
- ver += "@" + goModule.Replace.Version
- }
- if goModule.Replace.Sum != "" {
- ver += " " + goModule.Replace.Sum
- }
- }
- return ver
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/cmd/packagesfuncs.go b/vendor/github.com/caddyserver/caddy/v2/cmd/packagesfuncs.go
deleted file mode 100644
index 6aaf52bf..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/cmd/packagesfuncs.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddycmd
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "os"
- "os/exec"
- "reflect"
- "runtime"
- "runtime/debug"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "go.uber.org/zap"
-)
-
-func cmdUpgrade(_ Flags) (int, error) {
- _, nonstandard, _, err := getModules()
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err)
- }
- pluginPkgs, err := getPluginPackages(nonstandard)
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
-
- return upgradeBuild(pluginPkgs)
-}
-
-func cmdAddPackage(fl Flags) (int, error) {
- if len(fl.Args()) == 0 {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("at least one package name must be specified")
- }
- _, nonstandard, _, err := getModules()
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err)
- }
- pluginPkgs, err := getPluginPackages(nonstandard)
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
-
- for _, arg := range fl.Args() {
- if _, ok := pluginPkgs[arg]; ok {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("package is already added")
- }
- pluginPkgs[arg] = struct{}{}
- }
-
- return upgradeBuild(pluginPkgs)
-}
-
-func cmdRemovePackage(fl Flags) (int, error) {
- if len(fl.Args()) == 0 {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("at least one package name must be specified")
- }
- _, nonstandard, _, err := getModules()
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err)
- }
- pluginPkgs, err := getPluginPackages(nonstandard)
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
-
- for _, arg := range fl.Args() {
- if _, ok := pluginPkgs[arg]; !ok {
- // package does not exist
- return caddy.ExitCodeFailedStartup, fmt.Errorf("package is not added")
- }
- delete(pluginPkgs, arg)
- }
-
- return upgradeBuild(pluginPkgs)
-}
-
-func upgradeBuild(pluginPkgs map[string]struct{}) (int, error) {
- l := caddy.Log()
-
- thisExecPath, err := os.Executable()
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("determining current executable path: %v", err)
- }
- thisExecStat, err := os.Stat(thisExecPath)
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("retrieving current executable permission bits: %v", err)
- }
- l.Info("this executable will be replaced", zap.String("path", thisExecPath))
-
- // build the request URL to download this custom build
- qs := url.Values{
- "os": {runtime.GOOS},
- "arch": {runtime.GOARCH},
- }
- for pkg := range pluginPkgs {
- qs.Add("p", pkg)
- }
-
- // initiate the build
- resp, err := downloadBuild(qs)
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("download failed: %v", err)
- }
- defer resp.Body.Close()
-
- // back up the current binary, in case something goes wrong we can replace it
- backupExecPath := thisExecPath + ".tmp"
- l.Info("build acquired; backing up current executable",
- zap.String("current_path", thisExecPath),
- zap.String("backup_path", backupExecPath))
- err = os.Rename(thisExecPath, backupExecPath)
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("backing up current binary: %v", err)
- }
- defer func() {
- if err != nil {
- err2 := os.Rename(backupExecPath, thisExecPath)
- if err2 != nil {
- l.Error("restoring original executable failed; will need to be restored manually",
- zap.String("backup_path", backupExecPath),
- zap.String("original_path", thisExecPath),
- zap.Error(err2))
- }
- }
- }()
-
- // download the file; do this in a closure to close reliably before we execute it
- err = writeCaddyBinary(thisExecPath, &resp.Body, thisExecStat)
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
-
- l.Info("download successful; displaying new binary details", zap.String("location", thisExecPath))
-
- // use the new binary to print out version and module info
- fmt.Print("\nModule versions:\n\n")
- if err = listModules(thisExecPath); err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to execute: %v", err)
- }
- fmt.Println("\nVersion:")
- if err = showVersion(thisExecPath); err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to execute: %v", err)
- }
- fmt.Println()
-
- // clean up the backup file
- if err = os.Remove(backupExecPath); err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to clean up backup binary: %v", err)
- }
- l.Info("upgrade successful; please restart any running Caddy instances", zap.String("executable", thisExecPath))
-
- return caddy.ExitCodeSuccess, nil
-}
-
-func getModules() (standard, nonstandard, unknown []moduleInfo, err error) {
- bi, ok := debug.ReadBuildInfo()
- if !ok {
- err = fmt.Errorf("no build info")
- return
- }
-
- for _, modID := range caddy.Modules() {
- modInfo, err := caddy.GetModule(modID)
- if err != nil {
- // that's weird, shouldn't happen
- unknown = append(unknown, moduleInfo{caddyModuleID: modID, err: err})
- continue
- }
-
- // to get the Caddy plugin's version info, we need to know
- // the package that the Caddy module's value comes from; we
- // can use reflection but we need a non-pointer value (I'm
- // not sure why), and since New() should return a pointer
- // value, we need to dereference it first
- iface := interface{}(modInfo.New())
- if rv := reflect.ValueOf(iface); rv.Kind() == reflect.Ptr {
- iface = reflect.New(reflect.TypeOf(iface).Elem()).Elem().Interface()
- }
- modPkgPath := reflect.TypeOf(iface).PkgPath()
-
- // now we find the Go module that the Caddy module's package
- // belongs to; we assume the Caddy module package path will
- // be prefixed by its Go module path, and we will choose the
- // longest matching prefix in case there are nested modules
- var matched *debug.Module
- for _, dep := range bi.Deps {
- if strings.HasPrefix(modPkgPath, dep.Path) {
- if matched == nil || len(dep.Path) > len(matched.Path) {
- matched = dep
- }
- }
- }
-
- caddyModGoMod := moduleInfo{caddyModuleID: modID, goModule: matched}
-
- if strings.HasPrefix(modPkgPath, caddy.ImportPath) {
- standard = append(standard, caddyModGoMod)
- } else {
- nonstandard = append(nonstandard, caddyModGoMod)
- }
- }
- return
-}
-
-func listModules(path string) error {
- cmd := exec.Command(path, "list-modules", "--versions")
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- err := cmd.Run()
- if err != nil {
- return fmt.Errorf("download succeeded, but unable to execute: %v", err)
- }
- return nil
-}
-
-func showVersion(path string) error {
- cmd := exec.Command(path, "version")
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- err := cmd.Run()
- if err != nil {
- return fmt.Errorf("download succeeded, but unable to execute: %v", err)
- }
- return nil
-}
-
-func downloadBuild(qs url.Values) (*http.Response, error) {
- l := caddy.Log()
- l.Info("requesting build",
- zap.String("os", qs.Get("os")),
- zap.String("arch", qs.Get("arch")),
- zap.Strings("packages", qs["p"]))
- resp, err := http.Get(fmt.Sprintf("%s?%s", downloadPath, qs.Encode()))
- if err != nil {
- return nil, fmt.Errorf("secure request failed: %v", err)
- }
- if resp.StatusCode >= 400 {
- var details struct {
- StatusCode int `json:"status_code"`
- Error struct {
- Message string `json:"message"`
- ID string `json:"id"`
- } `json:"error"`
- }
- err2 := json.NewDecoder(resp.Body).Decode(&details)
- if err2 != nil {
- return nil, fmt.Errorf("download and error decoding failed: HTTP %d: %v", resp.StatusCode, err2)
- }
- return nil, fmt.Errorf("download failed: HTTP %d: %s (id=%s)", resp.StatusCode, details.Error.Message, details.Error.ID)
- }
- return resp, nil
-}
-
-func getPluginPackages(modules []moduleInfo) (map[string]struct{}, error) {
- pluginPkgs := make(map[string]struct{})
- for _, mod := range modules {
- if mod.goModule.Replace != nil {
- return nil, fmt.Errorf("cannot auto-upgrade when Go module has been replaced: %s => %s",
- mod.goModule.Path, mod.goModule.Replace.Path)
- }
- pluginPkgs[mod.goModule.Path] = struct{}{}
- }
- return pluginPkgs, nil
-}
-
-func writeCaddyBinary(path string, body *io.ReadCloser, fileInfo os.FileInfo) error {
- l := caddy.Log()
- destFile, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileInfo.Mode())
- if err != nil {
- return fmt.Errorf("unable to open destination file: %v", err)
- }
- defer destFile.Close()
-
- l.Info("downloading binary", zap.String("destination", path))
-
- _, err = io.Copy(destFile, *body)
- if err != nil {
- return fmt.Errorf("unable to download file: %v", err)
- }
-
- err = destFile.Sync()
- if err != nil {
- return fmt.Errorf("syncing downloaded file to device: %v", err)
- }
-
- return nil
-}
-
-const downloadPath = "https://caddyserver.com/api/download"
diff --git a/vendor/github.com/caddyserver/caddy/v2/context.go b/vendor/github.com/caddyserver/caddy/v2/context.go
deleted file mode 100644
index a6386aa8..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/context.go
+++ /dev/null
@@ -1,442 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddy
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "log"
- "reflect"
-
- "github.com/caddyserver/certmagic"
- "go.uber.org/zap"
-)
-
-// Context is a type which defines the lifetime of modules that
-// are loaded and provides access to the parent configuration
-// that spawned the modules which are loaded. It should be used
-// with care and wrapped with derivation functions from the
-// standard context package only if you don't need the Caddy
-// specific features. These contexts are canceled when the
-// lifetime of the modules loaded from it is over.
-//
-// Use NewContext() to get a valid value (but most modules will
-// not actually need to do this).
-type Context struct {
- context.Context
- moduleInstances map[string][]interface{}
- cfg *Config
- cleanupFuncs []func()
-}
-
-// NewContext provides a new context derived from the given
-// context ctx. Normally, you will not need to call this
-// function unless you are loading modules which have a
-// different lifespan than the ones for the context the
-// module was provisioned with. Be sure to call the cancel
-// func when the context is to be cleaned up so that
-// modules which are loaded will be properly unloaded.
-// See standard library context package's documentation.
-func NewContext(ctx Context) (Context, context.CancelFunc) {
- newCtx := Context{moduleInstances: make(map[string][]interface{}), cfg: ctx.cfg}
- c, cancel := context.WithCancel(ctx.Context)
- wrappedCancel := func() {
- cancel()
-
- for _, f := range ctx.cleanupFuncs {
- f()
- }
-
- for modName, modInstances := range newCtx.moduleInstances {
- for _, inst := range modInstances {
- if cu, ok := inst.(CleanerUpper); ok {
- err := cu.Cleanup()
- if err != nil {
- log.Printf("[ERROR] %s (%p): cleanup: %v", modName, inst, err)
- }
- }
- }
- }
- }
- newCtx.Context = c
- return newCtx, wrappedCancel
-}
-
-// OnCancel executes f when ctx is canceled.
-func (ctx *Context) OnCancel(f func()) {
- ctx.cleanupFuncs = append(ctx.cleanupFuncs, f)
-}
-
-// LoadModule loads the Caddy module(s) from the specified field of the parent struct
-// pointer and returns the loaded module(s). The struct pointer and its field name as
-// a string are necessary so that reflection can be used to read the struct tag on the
-// field to get the module namespace and inline module name key (if specified).
-//
-// The field can be any one of the supported raw module types: json.RawMessage,
-// []json.RawMessage, map[string]json.RawMessage, or []map[string]json.RawMessage.
-// ModuleMap may be used in place of map[string]json.RawMessage. The return value's
-// underlying type mirrors the input field's type:
-//
-// json.RawMessage => interface{}
-// []json.RawMessage => []interface{}
-// [][]json.RawMessage => [][]interface{}
-// map[string]json.RawMessage => map[string]interface{}
-// []map[string]json.RawMessage => []map[string]interface{}
-//
-// The field must have a "caddy" struct tag in this format:
-//
-// caddy:"key1=val1 key2=val2"
-//
-// To load modules, a "namespace" key is required. For example, to load modules
-// in the "http.handlers" namespace, you'd put: `namespace=http.handlers` in the
-// Caddy struct tag.
-//
-// The module name must also be available. If the field type is a map or slice of maps,
-// then key is assumed to be the module name if an "inline_key" is NOT specified in the
-// caddy struct tag. In this case, the module name does NOT need to be specified in-line
-// with the module itself.
-//
-// If not a map, or if inline_key is non-empty, then the module name must be embedded
-// into the values, which must be objects; then there must be a key in those objects
-// where its associated value is the module name. This is called the "inline key",
-// meaning the key containing the module's name that is defined inline with the module
-// itself. You must specify the inline key in a struct tag, along with the namespace:
-//
-// caddy:"namespace=http.handlers inline_key=handler"
-//
-// This will look for a key/value pair like `"handler": "..."` in the json.RawMessage
-// in order to know the module name.
-//
-// To make use of the loaded module(s) (the return value), you will probably want
-// to type-assert each interface{} value(s) to the types that are useful to you
-// and store them on the same struct. Storing them on the same struct makes for
-// easy garbage collection when your host module is no longer needed.
-//
-// Loaded modules have already been provisioned and validated. Upon returning
-// successfully, this method clears the json.RawMessage(s) in the field since
-// the raw JSON is no longer needed, and this allows the GC to free up memory.
-func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (interface{}, error) {
- val := reflect.ValueOf(structPointer).Elem().FieldByName(fieldName)
- typ := val.Type()
-
- field, ok := reflect.TypeOf(structPointer).Elem().FieldByName(fieldName)
- if !ok {
- panic(fmt.Sprintf("field %s does not exist in %#v", fieldName, structPointer))
- }
-
- opts, err := ParseStructTag(field.Tag.Get("caddy"))
- if err != nil {
- panic(fmt.Sprintf("malformed tag on field %s: %v", fieldName, err))
- }
-
- moduleNamespace, ok := opts["namespace"]
- if !ok {
- panic(fmt.Sprintf("missing 'namespace' key in struct tag on field %s", fieldName))
- }
- inlineModuleKey := opts["inline_key"]
-
- var result interface{}
-
- switch val.Kind() {
- case reflect.Slice:
- if isJSONRawMessage(typ) {
- // val is `json.RawMessage` ([]uint8 under the hood)
-
- if inlineModuleKey == "" {
- panic("unable to determine module name without inline_key when type is not a ModuleMap")
- }
- val, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, val.Interface().(json.RawMessage))
- if err != nil {
- return nil, err
- }
- result = val
-
- } else if isJSONRawMessage(typ.Elem()) {
- // val is `[]json.RawMessage`
-
- if inlineModuleKey == "" {
- panic("unable to determine module name without inline_key because type is not a ModuleMap")
- }
- var all []interface{}
- for i := 0; i < val.Len(); i++ {
- val, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, val.Index(i).Interface().(json.RawMessage))
- if err != nil {
- return nil, fmt.Errorf("position %d: %v", i, err)
- }
- all = append(all, val)
- }
- result = all
-
- } else if typ.Elem().Kind() == reflect.Slice && isJSONRawMessage(typ.Elem().Elem()) {
- // val is `[][]json.RawMessage`
-
- if inlineModuleKey == "" {
- panic("unable to determine module name without inline_key because type is not a ModuleMap")
- }
- var all [][]interface{}
- for i := 0; i < val.Len(); i++ {
- innerVal := val.Index(i)
- var allInner []interface{}
- for j := 0; j < innerVal.Len(); j++ {
- innerInnerVal, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, innerVal.Index(j).Interface().(json.RawMessage))
- if err != nil {
- return nil, fmt.Errorf("position %d: %v", j, err)
- }
- allInner = append(allInner, innerInnerVal)
- }
- all = append(all, allInner)
- }
- result = all
-
- } else if isModuleMapType(typ.Elem()) {
- // val is `[]map[string]json.RawMessage`
-
- var all []map[string]interface{}
- for i := 0; i < val.Len(); i++ {
- thisSet, err := ctx.loadModulesFromSomeMap(moduleNamespace, inlineModuleKey, val.Index(i))
- if err != nil {
- return nil, err
- }
- all = append(all, thisSet)
- }
- result = all
- }
-
- case reflect.Map:
- // val is a ModuleMap or some other kind of map
- result, err = ctx.loadModulesFromSomeMap(moduleNamespace, inlineModuleKey, val)
- if err != nil {
- return nil, err
- }
-
- default:
- return nil, fmt.Errorf("unrecognized type for module: %s", typ)
- }
-
- // we're done with the raw bytes; allow GC to deallocate
- val.Set(reflect.Zero(typ))
-
- return result, nil
-}
-
-// loadModulesFromSomeMap loads modules from val, which must be a type of map[string]interface{}.
-// Depending on inlineModuleKey, it will be interpreted as either a ModuleMap (key is the module
-// name) or as a regular map (key is not the module name, and module name is defined inline).
-func (ctx Context) loadModulesFromSomeMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]interface{}, error) {
- // if no inline_key is specified, then val must be a ModuleMap,
- // where the key is the module name
- if inlineModuleKey == "" {
- if !isModuleMapType(val.Type()) {
- panic(fmt.Sprintf("expected ModuleMap because inline_key is empty; but we do not recognize this type: %s", val.Type()))
- }
- return ctx.loadModuleMap(namespace, val)
- }
-
- // otherwise, val is a map with modules, but the module name is
- // inline with each value (the key means something else)
- return ctx.loadModulesFromRegularMap(namespace, inlineModuleKey, val)
-}
-
-// loadModulesFromRegularMap loads modules from val, where val is a map[string]json.RawMessage.
-// Map keys are NOT interpreted as module names, so module names are still expected to appear
-// inline with the objects.
-func (ctx Context) loadModulesFromRegularMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]interface{}, error) {
- mods := make(map[string]interface{})
- iter := val.MapRange()
- for iter.Next() {
- k := iter.Key()
- v := iter.Value()
- mod, err := ctx.loadModuleInline(inlineModuleKey, namespace, v.Interface().(json.RawMessage))
- if err != nil {
- return nil, fmt.Errorf("key %s: %v", k, err)
- }
- mods[k.String()] = mod
- }
- return mods, nil
-}
-
-// loadModuleMap loads modules from a ModuleMap, i.e. map[string]interface{}, where the key is the
-// module name. With a module map, module names do not need to be defined inline with their values.
-func (ctx Context) loadModuleMap(namespace string, val reflect.Value) (map[string]interface{}, error) {
- all := make(map[string]interface{})
- iter := val.MapRange()
- for iter.Next() {
- k := iter.Key().Interface().(string)
- v := iter.Value().Interface().(json.RawMessage)
- moduleName := namespace + "." + k
- if namespace == "" {
- moduleName = k
- }
- val, err := ctx.LoadModuleByID(moduleName, v)
- if err != nil {
- return nil, fmt.Errorf("module name '%s': %v", k, err)
- }
- all[k] = val
- }
- return all, nil
-}
-
-// LoadModuleByID decodes rawMsg into a new instance of mod and
-// returns the value. If mod.New is nil, an error is returned.
-// If the module implements Validator or Provisioner interfaces,
-// those methods are invoked to ensure the module is fully
-// configured and valid before being used.
-//
-// This is a lower-level method and will usually not be called
-// directly by most modules. However, this method is useful when
-// dynamically loading/unloading modules in their own context,
-// like from embedded scripts, etc.
-func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{}, error) {
- modulesMu.RLock()
- mod, ok := modules[id]
- modulesMu.RUnlock()
- if !ok {
- return nil, fmt.Errorf("unknown module: %s", id)
- }
-
- if mod.New == nil {
- return nil, fmt.Errorf("module '%s' has no constructor", mod.ID)
- }
-
- val := mod.New().(interface{})
-
- // value must be a pointer for unmarshaling into concrete type, even if
- // the module's concrete type is a slice or map; New() *should* return
- // a pointer, otherwise unmarshaling errors or panics will occur
- if rv := reflect.ValueOf(val); rv.Kind() != reflect.Ptr {
- log.Printf("[WARNING] ModuleInfo.New() for module '%s' did not return a pointer,"+
- " so we are using reflection to make a pointer instead; please fix this by"+
- " using new(Type) or &Type notation in your module's New() function.", id)
- val = reflect.New(rv.Type()).Elem().Addr().Interface().(Module)
- }
-
- // fill in its config only if there is a config to fill in
- if len(rawMsg) > 0 {
- err := strictUnmarshalJSON(rawMsg, &val)
- if err != nil {
- return nil, fmt.Errorf("decoding module config: %s: %v", mod, err)
- }
- }
-
- if val == nil {
- // returned module values are almost always type-asserted
- // before being used, so a nil value would panic; and there
- // is no good reason to explicitly declare null modules in
- // a config; it might be because the user is trying to achieve
- // a result the developer isn't expecting, which is a smell
- return nil, fmt.Errorf("module value cannot be null")
- }
-
- if prov, ok := val.(Provisioner); ok {
- err := prov.Provision(ctx)
- if err != nil {
- // incomplete provisioning could have left state
- // dangling, so make sure it gets cleaned up
- if cleanerUpper, ok := val.(CleanerUpper); ok {
- err2 := cleanerUpper.Cleanup()
- if err2 != nil {
- err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2)
- }
- }
- return nil, fmt.Errorf("provision %s: %v", mod, err)
- }
- }
-
- if validator, ok := val.(Validator); ok {
- err := validator.Validate()
- if err != nil {
- // since the module was already provisioned, make sure we clean up
- if cleanerUpper, ok := val.(CleanerUpper); ok {
- err2 := cleanerUpper.Cleanup()
- if err2 != nil {
- err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2)
- }
- }
- return nil, fmt.Errorf("%s: invalid configuration: %v", mod, err)
- }
- }
-
- ctx.moduleInstances[id] = append(ctx.moduleInstances[id], val)
-
- return val, nil
-}
-
-// loadModuleInline loads a module from a JSON raw message which decodes to
-// a map[string]interface{}, where one of the object keys is moduleNameKey
-// and the corresponding value is the module name (as a string) which can
-// be found in the given scope. In other words, the module name is declared
-// in-line with the module itself.
-//
-// This allows modules to be decoded into their concrete types and used when
-// their names cannot be the unique key in a map, such as when there are
-// multiple instances in the map or it appears in an array (where there are
-// no custom keys). In other words, the key containing the module name is
-// treated special/separate from all the other keys in the object.
-func (ctx Context) loadModuleInline(moduleNameKey, moduleScope string, raw json.RawMessage) (interface{}, error) {
- moduleName, raw, err := getModuleNameInline(moduleNameKey, raw)
- if err != nil {
- return nil, err
- }
-
- val, err := ctx.LoadModuleByID(moduleScope+"."+moduleName, raw)
- if err != nil {
- return nil, fmt.Errorf("loading module '%s': %v", moduleName, err)
- }
-
- return val, nil
-}
-
-// App returns the configured app named name. If that app has
-// not yet been loaded and provisioned, it will be immediately
-// loaded and provisioned. If no app with that name is
-// configured, a new empty one will be instantiated instead.
-// (The app module must still be registered.) This must not be
-// called during the Provision/Validate phase to reference a
-// module's own host app (since the parent app module is still
-// in the process of being provisioned, it is not yet ready).
-func (ctx Context) App(name string) (interface{}, error) {
- if app, ok := ctx.cfg.apps[name]; ok {
- return app, nil
- }
- appRaw := ctx.cfg.AppsRaw[name]
- modVal, err := ctx.LoadModuleByID(name, appRaw)
- if err != nil {
- return nil, fmt.Errorf("loading %s app module: %v", name, err)
- }
- if appRaw != nil {
- ctx.cfg.AppsRaw[name] = nil // allow GC to deallocate
- }
- ctx.cfg.apps[name] = modVal.(App)
- return modVal, nil
-}
-
-// Storage returns the configured Caddy storage implementation.
-func (ctx Context) Storage() certmagic.Storage {
- return ctx.cfg.storage
-}
-
-// Logger returns a logger that can be used by mod.
-func (ctx Context) Logger(mod Module) *zap.Logger {
- if ctx.cfg == nil {
- // often the case in tests; just use a dev logger
- l, err := zap.NewDevelopment()
- if err != nil {
- panic("config missing, unable to create dev logger: " + err.Error())
- }
- return l
- }
- return ctx.cfg.Logging.Logger(mod)
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/duration_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/duration_fuzz.go
deleted file mode 100644
index e7afed3f..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/duration_fuzz.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build gofuzz
-
-package caddy
-
-func FuzzParseDuration(data []byte) int {
- _, err := ParseDuration(string(data))
- if err != nil {
- return 0
- }
- return 1
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/listeners.go b/vendor/github.com/caddyserver/caddy/v2/listeners.go
deleted file mode 100644
index e1edcd6c..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/listeners.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddy
-
-import (
- "fmt"
- "log"
- "net"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "syscall"
- "time"
-)
-
-// Listen returns a listener suitable for use in a Caddy module.
-// Always be sure to close listeners when you are done with them.
-func Listen(network, addr string) (net.Listener, error) {
- lnKey := network + "/" + addr
-
- listenersMu.Lock()
- defer listenersMu.Unlock()
-
- // if listener already exists, increment usage counter, then return listener
- if lnGlobal, ok := listeners[lnKey]; ok {
- atomic.AddInt32(&lnGlobal.usage, 1)
- return &fakeCloseListener{
- usage: &lnGlobal.usage,
- deadline: &lnGlobal.deadline,
- deadlineMu: &lnGlobal.deadlineMu,
- key: lnKey,
- Listener: lnGlobal.ln,
- }, nil
- }
-
- // or, create new one and save it
- ln, err := net.Listen(network, addr)
- if err != nil {
- return nil, err
- }
-
- // make sure to start its usage counter at 1
- lnGlobal := &globalListener{usage: 1, ln: ln}
- listeners[lnKey] = lnGlobal
-
- return &fakeCloseListener{
- usage: &lnGlobal.usage,
- deadline: &lnGlobal.deadline,
- deadlineMu: &lnGlobal.deadlineMu,
- key: lnKey,
- Listener: ln,
- }, nil
-}
-
-// ListenPacket returns a net.PacketConn suitable for use in a Caddy module.
-// Always be sure to close the PacketConn when you are done.
-func ListenPacket(network, addr string) (net.PacketConn, error) {
- lnKey := network + "/" + addr
-
- listenersMu.Lock()
- defer listenersMu.Unlock()
-
- // if listener already exists, increment usage counter, then return listener
- if lnGlobal, ok := listeners[lnKey]; ok {
- atomic.AddInt32(&lnGlobal.usage, 1)
- log.Printf("[DEBUG] %s: Usage counter should not go above 2 or maybe 3, is now: %d", lnKey, atomic.LoadInt32(&lnGlobal.usage)) // TODO: remove
- return &fakeClosePacketConn{usage: &lnGlobal.usage, key: lnKey, PacketConn: lnGlobal.pc}, nil
- }
-
- // or, create new one and save it
- pc, err := net.ListenPacket(network, addr)
- if err != nil {
- return nil, err
- }
-
- // make sure to start its usage counter at 1
- lnGlobal := &globalListener{usage: 1, pc: pc}
- listeners[lnKey] = lnGlobal
-
- return &fakeClosePacketConn{usage: &lnGlobal.usage, key: lnKey, PacketConn: pc}, nil
-}
-
-// fakeCloseListener's Close() method is a no-op. This allows
-// stopping servers that are using the listener without giving
-// up the socket; thus, servers become hot-swappable while the
-// listener remains running. Listeners should be re-wrapped in
-// a new fakeCloseListener each time the listener is reused.
-// Other than the 'closed' field (which pertains to this value
-// only), the other fields in this struct should be pointers to
-// the associated globalListener's struct fields (except 'key'
-// which is there for read-only purposes, so it can be a copy).
-type fakeCloseListener struct {
- closed int32 // accessed atomically; belongs to this struct only
- usage *int32 // accessed atomically; global
- deadline *bool // protected by deadlineMu; global
- deadlineMu *sync.Mutex // global
- key string // global, but read-only, so can be copy
- net.Listener // global
-}
-
-// Accept accepts connections until Close() is called.
-func (fcl *fakeCloseListener) Accept() (net.Conn, error) {
- // if the listener is already "closed", return error
- if atomic.LoadInt32(&fcl.closed) == 1 {
- return nil, fcl.fakeClosedErr()
- }
-
- // wrap underlying accept
- conn, err := fcl.Listener.Accept()
- if err == nil {
- return conn, nil
- }
-
- // accept returned with error
- // TODO: This may be better as a condition variable so the deadline is cleared only once?
- fcl.deadlineMu.Lock()
- if *fcl.deadline {
- switch ln := fcl.Listener.(type) {
- case *net.TCPListener:
- _ = ln.SetDeadline(time.Time{})
- case *net.UnixListener:
- _ = ln.SetDeadline(time.Time{})
- }
- *fcl.deadline = false
- }
- fcl.deadlineMu.Unlock()
-
- if atomic.LoadInt32(&fcl.closed) == 1 {
- // if we canceled the Accept() by setting a deadline
- // on the listener, we need to make sure any callers of
- // Accept() think the listener was actually closed;
- // if we return the timeout error instead, callers might
- // simply retry, leaking goroutines for longer
- if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
- return nil, fcl.fakeClosedErr()
- }
- }
-
- return nil, err
-}
-
-// Close stops accepting new connections without
-// closing the underlying listener, unless no one
-// else is using it.
-func (fcl *fakeCloseListener) Close() error {
- if atomic.CompareAndSwapInt32(&fcl.closed, 0, 1) {
- // unfortunately, there is no way to cancel any
- // currently-blocking calls to Accept() that are
- // awaiting connections since we're not actually
- // closing the listener; so we cheat by setting
- // a deadline in the past, which forces it to
- // time out; note that this only works for
- // certain types of listeners...
- fcl.deadlineMu.Lock()
- if !*fcl.deadline {
- switch ln := fcl.Listener.(type) {
- case *net.TCPListener:
- _ = ln.SetDeadline(time.Now().Add(-1 * time.Minute))
- case *net.UnixListener:
- _ = ln.SetDeadline(time.Now().Add(-1 * time.Minute))
- }
- *fcl.deadline = true
- }
- fcl.deadlineMu.Unlock()
-
- // since we're no longer using this listener,
- // decrement the usage counter and, if no one
- // else is using it, close underlying listener
- if atomic.AddInt32(fcl.usage, -1) == 0 {
- listenersMu.Lock()
- delete(listeners, fcl.key)
- listenersMu.Unlock()
- err := fcl.Listener.Close()
- if err != nil {
- return err
- }
- }
-
- }
-
- return nil
-}
-
-func (fcl *fakeCloseListener) fakeClosedErr() error {
- return &net.OpError{
- Op: "accept",
- Net: fcl.Listener.Addr().Network(),
- Addr: fcl.Listener.Addr(),
- Err: errFakeClosed,
- }
-}
-
-type fakeClosePacketConn struct {
- closed int32 // accessed atomically
- usage *int32 // accessed atomically
- key string
- net.PacketConn
-}
-
-func (fcpc *fakeClosePacketConn) Close() error {
- log.Println("[DEBUG] Fake-closing underlying packet conn") // TODO: remove this
-
- if atomic.CompareAndSwapInt32(&fcpc.closed, 0, 1) {
- // since we're no longer using this listener,
- // decrement the usage counter and, if no one
- // else is using it, close underlying listener
- if atomic.AddInt32(fcpc.usage, -1) == 0 {
- listenersMu.Lock()
- delete(listeners, fcpc.key)
- listenersMu.Unlock()
- err := fcpc.PacketConn.Close()
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// Supports QUIC implementation: https://github.com/caddyserver/caddy/issues/3998
-func (fcpc fakeClosePacketConn) SetReadBuffer(bytes int) error {
- if conn, ok := fcpc.PacketConn.(interface{ SetReadBuffer(int) error }); ok {
- return conn.SetReadBuffer(bytes)
- }
- return fmt.Errorf("SetReadBuffer() not implemented for %T", fcpc.PacketConn)
-}
-
-// Supports QUIC implementation: https://github.com/caddyserver/caddy/issues/3998
-func (fcpc fakeClosePacketConn) SyscallConn() (syscall.RawConn, error) {
- if conn, ok := fcpc.PacketConn.(interface {
- SyscallConn() (syscall.RawConn, error)
- }); ok {
- return conn.SyscallConn()
- }
- return nil, fmt.Errorf("SyscallConn() not implemented for %T", fcpc.PacketConn)
-}
-
-// ErrFakeClosed is the underlying error value returned by
-// fakeCloseListener.Accept() after Close() has been called,
-// indicating that it is pretending to be closed so that the
-// server using it can terminate, while the underlying
-// socket is actually left open.
-var errFakeClosed = fmt.Errorf("listener 'closed' 😉")
-
-// globalListener keeps global state for a listener
-// that may be shared by multiple servers. In other
-// words, values in this struct exist only once and
-// all other uses of these values point to the ones
-// in this struct. In particular, the usage count
-// (how many callers are using the listener), the
-// actual listener, and synchronization of the
-// listener's deadline changes are singular, global
-// values that must not be copied.
-type globalListener struct {
- usage int32 // accessed atomically
- deadline bool
- deadlineMu sync.Mutex
- ln net.Listener
- pc net.PacketConn
-}
-
-// NetworkAddress contains the individual components
-// for a parsed network address of the form accepted
-// by ParseNetworkAddress(). Network should be a
-// network value accepted by Go's net package. Port
-// ranges are given by [StartPort, EndPort].
-type NetworkAddress struct {
- Network string
- Host string
- StartPort uint
- EndPort uint
-}
-
-// IsUnixNetwork returns true if na.Network is
-// unix, unixgram, or unixpacket.
-func (na NetworkAddress) IsUnixNetwork() bool {
- return isUnixNetwork(na.Network)
-}
-
-// JoinHostPort is like net.JoinHostPort, but where the port
-// is StartPort + offset.
-func (na NetworkAddress) JoinHostPort(offset uint) string {
- if na.IsUnixNetwork() {
- return na.Host
- }
- return net.JoinHostPort(na.Host, strconv.Itoa(int(na.StartPort+offset)))
-}
-
-// PortRangeSize returns how many ports are in
-// pa's port range. Port ranges are inclusive,
-// so the size is the difference of start and
-// end ports plus one.
-func (na NetworkAddress) PortRangeSize() uint {
- return (na.EndPort - na.StartPort) + 1
-}
-
-func (na NetworkAddress) isLoopback() bool {
- if na.IsUnixNetwork() {
- return true
- }
- if na.Host == "localhost" {
- return true
- }
- if ip := net.ParseIP(na.Host); ip != nil {
- return ip.IsLoopback()
- }
- return false
-}
-
-func (na NetworkAddress) isWildcardInterface() bool {
- if na.Host == "" {
- return true
- }
- if ip := net.ParseIP(na.Host); ip != nil {
- return ip.IsUnspecified()
- }
- return false
-}
-
-func (na NetworkAddress) port() string {
- if na.StartPort == na.EndPort {
- return strconv.FormatUint(uint64(na.StartPort), 10)
- }
- return fmt.Sprintf("%d-%d", na.StartPort, na.EndPort)
-}
-
-// String reconstructs the address string to the form expected
-// by ParseNetworkAddress(). If the address is a unix socket,
-// any non-zero port will be dropped.
-func (na NetworkAddress) String() string {
- return JoinNetworkAddress(na.Network, na.Host, na.port())
-}
-
-func isUnixNetwork(netw string) bool {
- return netw == "unix" || netw == "unixgram" || netw == "unixpacket"
-}
-
-// ParseNetworkAddress parses addr into its individual
-// components. The input string is expected to be of
-// the form "network/host:port-range" where any part is
-// optional. The default network, if unspecified, is tcp.
-// Port ranges are inclusive.
-//
-// Network addresses are distinct from URLs and do not
-// use URL syntax.
-func ParseNetworkAddress(addr string) (NetworkAddress, error) {
- var host, port string
- network, host, port, err := SplitNetworkAddress(addr)
- if network == "" {
- network = "tcp"
- }
- if err != nil {
- return NetworkAddress{}, err
- }
- if isUnixNetwork(network) {
- return NetworkAddress{
- Network: network,
- Host: host,
- }, nil
- }
- ports := strings.SplitN(port, "-", 2)
- if len(ports) == 1 {
- ports = append(ports, ports[0])
- }
- var start, end uint64
- start, err = strconv.ParseUint(ports[0], 10, 16)
- if err != nil {
- return NetworkAddress{}, fmt.Errorf("invalid start port: %v", err)
- }
- end, err = strconv.ParseUint(ports[1], 10, 16)
- if err != nil {
- return NetworkAddress{}, fmt.Errorf("invalid end port: %v", err)
- }
- if end < start {
- return NetworkAddress{}, fmt.Errorf("end port must not be less than start port")
- }
- if (end - start) > maxPortSpan {
- return NetworkAddress{}, fmt.Errorf("port range exceeds %d ports", maxPortSpan)
- }
- return NetworkAddress{
- Network: network,
- Host: host,
- StartPort: uint(start),
- EndPort: uint(end),
- }, nil
-}
-
-// SplitNetworkAddress splits a into its network, host, and port components.
-// Note that port may be a port range (:X-Y), or omitted for unix sockets.
-func SplitNetworkAddress(a string) (network, host, port string, err error) {
- if idx := strings.Index(a, "/"); idx >= 0 {
- network = strings.ToLower(strings.TrimSpace(a[:idx]))
- a = a[idx+1:]
- }
- if isUnixNetwork(network) {
- host = a
- return
- }
- host, port, err = net.SplitHostPort(a)
- return
-}
-
-// JoinNetworkAddress combines network, host, and port into a single
-// address string of the form accepted by ParseNetworkAddress(). For
-// unix sockets, the network should be "unix" (or "unixgram" or
-// "unixpacket") and the path to the socket should be given as the
-// host parameter.
-func JoinNetworkAddress(network, host, port string) string {
- var a string
- if network != "" {
- a = network + "/"
- }
- if (host != "" && port == "") || isUnixNetwork(network) {
- a += host
- } else if port != "" {
- a += net.JoinHostPort(host, port)
- }
- return a
-}
-
-// ListenerWrapper is a type that wraps a listener
-// so it can modify the input listener's methods.
-// Modules that implement this interface are found
-// in the caddy.listeners namespace. Usually, to
-// wrap a listener, you will define your own struct
-// type that embeds the input listener, then
-// implement your own methods that you want to wrap,
-// calling the underlying listener's methods where
-// appropriate.
-type ListenerWrapper interface {
- WrapListener(net.Listener) net.Listener
-}
-
-var (
- listeners = make(map[string]*globalListener)
- listenersMu sync.Mutex
-)
-
-const maxPortSpan = 65535
-
-// Interface guards (see https://github.com/caddyserver/caddy/issues/3998)
-var (
- _ (interface{ SetReadBuffer(int) error }) = (*fakeClosePacketConn)(nil)
- _ (interface {
- SyscallConn() (syscall.RawConn, error)
- }) = (*fakeClosePacketConn)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/listeners_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/listeners_fuzz.go
deleted file mode 100644
index 823d0beb..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/listeners_fuzz.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build gofuzz
-
-package caddy
-
-func FuzzParseNetworkAddress(data []byte) int {
- _, err := ParseNetworkAddress(string(data))
- if err != nil {
- return 0
- }
- return 1
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/logging.go b/vendor/github.com/caddyserver/caddy/v2/logging.go
deleted file mode 100644
index 7837145d..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/logging.go
+++ /dev/null
@@ -1,705 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddy
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "strings"
- "sync"
- "time"
-
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
- "golang.org/x/term"
-)
-
-func init() {
- RegisterModule(StdoutWriter{})
- RegisterModule(StderrWriter{})
- RegisterModule(DiscardWriter{})
-}
-
-// Logging facilitates logging within Caddy. The default log is
-// called "default" and you can customize it. You can also define
-// additional logs.
-//
-// By default, all logs at INFO level and higher are written to
-// standard error ("stderr" writer) in a human-readable format
-// ("console" encoder if stdout is an interactive terminal, "json"
-// encoder otherwise).
-//
-// All defined logs accept all log entries by default, but you
-// can filter by level and module/logger names. A logger's name
-// is the same as the module's name, but a module may append to
-// logger names for more specificity. For example, you can
-// filter logs emitted only by HTTP handlers using the name
-// "http.handlers", because all HTTP handler module names have
-// that prefix.
-//
-// Caddy logs (except the sink) are zero-allocation, so they are
-// very high-performing in terms of memory and CPU time. Enabling
-// sampling can further increase throughput on extremely high-load
-// servers.
-type Logging struct {
- // Sink is the destination for all unstructured logs emitted
- // from Go's standard library logger. These logs are common
- // in dependencies that are not designed specifically for use
- // in Caddy. Because it is global and unstructured, the sink
- // lacks most advanced features and customizations.
- Sink *StandardLibLog `json:"sink,omitempty"`
-
- // Logs are your logs, keyed by an arbitrary name of your
- // choosing. The default log can be customized by defining
- // a log called "default". You can further define other logs
- // and filter what kinds of entries they accept.
- Logs map[string]*CustomLog `json:"logs,omitempty"`
-
- // a list of all keys for open writers; all writers
- // that are opened to provision this logging config
- // must have their keys added to this list so they
- // can be closed when cleaning up
- writerKeys []string
-}
-
-// openLogs sets up the config and opens all the configured writers.
-// It closes its logs when ctx is canceled, so it should clean up
-// after itself.
-func (logging *Logging) openLogs(ctx Context) error {
- // make sure to deallocate resources when context is done
- ctx.OnCancel(func() {
- err := logging.closeLogs()
- if err != nil {
- Log().Error("closing logs", zap.Error(err))
- }
- })
-
- // set up the "sink" log first (std lib's default global logger)
- if logging.Sink != nil {
- err := logging.Sink.provision(ctx, logging)
- if err != nil {
- return fmt.Errorf("setting up sink log: %v", err)
- }
- }
-
- // as a special case, set up the default structured Caddy log next
- if err := logging.setupNewDefault(ctx); err != nil {
- return err
- }
-
- // then set up any other custom logs
- for name, l := range logging.Logs {
- // the default log is already set up
- if name == "default" {
- continue
- }
-
- err := l.provision(ctx, logging)
- if err != nil {
- return fmt.Errorf("setting up custom log '%s': %v", name, err)
- }
-
- // Any other logs that use the discard writer can be deleted
- // entirely. This avoids encoding and processing of each
- // log entry that would just be thrown away anyway. Notably,
- // we do not reach this point for the default log, which MUST
- // exist, otherwise core log emissions would panic because
- // they use the Log() function directly which expects a non-nil
- // logger. Even if we keep logs with a discard writer, they
- // have a nop core, and keeping them at all seems unnecessary.
- if _, ok := l.writerOpener.(*DiscardWriter); ok {
- delete(logging.Logs, name)
- continue
- }
- }
-
- return nil
-}
-
-func (logging *Logging) setupNewDefault(ctx Context) error {
- if logging.Logs == nil {
- logging.Logs = make(map[string]*CustomLog)
- }
-
- // extract the user-defined default log, if any
- newDefault := new(defaultCustomLog)
- if userDefault, ok := logging.Logs["default"]; ok {
- newDefault.CustomLog = userDefault
- } else {
- // if none, make one with our own default settings
- var err error
- newDefault, err = newDefaultProductionLog()
- if err != nil {
- return fmt.Errorf("setting up default Caddy log: %v", err)
- }
- logging.Logs["default"] = newDefault.CustomLog
- }
-
- // set up this new log
- err := newDefault.CustomLog.provision(ctx, logging)
- if err != nil {
- return fmt.Errorf("setting up default log: %v", err)
- }
- newDefault.logger = zap.New(newDefault.CustomLog.core)
-
- // redirect the default caddy logs
- defaultLoggerMu.Lock()
- oldDefault := defaultLogger
- defaultLogger = newDefault
- defaultLoggerMu.Unlock()
-
- // if the new writer is different, indicate it in the logs for convenience
- var newDefaultLogWriterKey, currentDefaultLogWriterKey string
- var newDefaultLogWriterStr, currentDefaultLogWriterStr string
- if newDefault.writerOpener != nil {
- newDefaultLogWriterKey = newDefault.writerOpener.WriterKey()
- newDefaultLogWriterStr = newDefault.writerOpener.String()
- }
- if oldDefault.writerOpener != nil {
- currentDefaultLogWriterKey = oldDefault.writerOpener.WriterKey()
- currentDefaultLogWriterStr = oldDefault.writerOpener.String()
- }
- if newDefaultLogWriterKey != currentDefaultLogWriterKey {
- oldDefault.logger.Info("redirected default logger",
- zap.String("from", currentDefaultLogWriterStr),
- zap.String("to", newDefaultLogWriterStr),
- )
- }
-
- return nil
-}
-
-// closeLogs cleans up resources allocated during openLogs.
-// A successful call to openLogs calls this automatically
-// when the context is canceled.
-func (logging *Logging) closeLogs() error {
- for _, key := range logging.writerKeys {
- _, err := writers.Delete(key)
- if err != nil {
- log.Printf("[ERROR] Closing log writer %v: %v", key, err)
- }
- }
- return nil
-}
-
-// Logger returns a logger that is ready for the module to use.
-func (logging *Logging) Logger(mod Module) *zap.Logger {
- modID := string(mod.CaddyModule().ID)
- var cores []zapcore.Core
-
- if logging != nil {
- for _, l := range logging.Logs {
- if l.matchesModule(modID) {
- if len(l.Include) == 0 && len(l.Exclude) == 0 {
- cores = append(cores, l.core)
- continue
- }
- cores = append(cores, &filteringCore{Core: l.core, cl: l})
- }
- }
- }
-
- multiCore := zapcore.NewTee(cores...)
-
- return zap.New(multiCore).Named(modID)
-}
-
-// openWriter opens a writer using opener, and returns true if
-// the writer is new, or false if the writer already exists.
-func (logging *Logging) openWriter(opener WriterOpener) (io.WriteCloser, bool, error) {
- key := opener.WriterKey()
- writer, loaded, err := writers.LoadOrNew(key, func() (Destructor, error) {
- w, err := opener.OpenWriter()
- return writerDestructor{w}, err
- })
- if err != nil {
- return nil, false, err
- }
- logging.writerKeys = append(logging.writerKeys, key)
- return writer.(io.WriteCloser), !loaded, nil
-}
-
-// WriterOpener is a module that can open a log writer.
-// It can return a human-readable string representation
-// of itself so that operators can understand where
-// the logs are going.
-type WriterOpener interface {
- fmt.Stringer
-
- // WriterKey is a string that uniquely identifies this
- // writer configuration. It is not shown to humans.
- WriterKey() string
-
- // OpenWriter opens a log for writing. The writer
- // should be safe for concurrent use but need not
- // be synchronous.
- OpenWriter() (io.WriteCloser, error)
-}
-
-type writerDestructor struct {
- io.WriteCloser
-}
-
-func (wdest writerDestructor) Destruct() error {
- return wdest.Close()
-}
-
-// StandardLibLog configures the default Go standard library
-// global logger in the log package. This is necessary because
-// module dependencies which are not built specifically for
-// Caddy will use the standard logger. This is also known as
-// the "sink" logger.
-type StandardLibLog struct {
- // The module that writes out log entries for the sink.
- WriterRaw json.RawMessage `json:"writer,omitempty" caddy:"namespace=caddy.logging.writers inline_key=output"`
-
- writer io.WriteCloser
-}
-
-func (sll *StandardLibLog) provision(ctx Context, logging *Logging) error {
- if sll.WriterRaw != nil {
- mod, err := ctx.LoadModule(sll, "WriterRaw")
- if err != nil {
- return fmt.Errorf("loading sink log writer module: %v", err)
- }
- wo := mod.(WriterOpener)
-
- var isNew bool
- sll.writer, isNew, err = logging.openWriter(wo)
- if err != nil {
- return fmt.Errorf("opening sink log writer %#v: %v", mod, err)
- }
-
- if isNew {
- log.Printf("[INFO] Redirecting sink to: %s", wo)
- log.SetOutput(sll.writer)
- log.Printf("[INFO] Redirected sink to here (%s)", wo)
- }
- }
-
- return nil
-}
-
-// CustomLog represents a custom logger configuration.
-//
-// By default, a log will emit all log entries. Some entries
-// will be skipped if sampling is enabled. Further, the Include
-// and Exclude parameters define which loggers (by name) are
-// allowed or rejected from emitting in this log. If both Include
-// and Exclude are populated, their values must be mutually
-// exclusive, and longer namespaces have priority. If neither
-// are populated, all logs are emitted.
-type CustomLog struct {
- // The writer defines where log entries are emitted.
- WriterRaw json.RawMessage `json:"writer,omitempty" caddy:"namespace=caddy.logging.writers inline_key=output"`
-
- // The encoder is how the log entries are formatted or encoded.
- EncoderRaw json.RawMessage `json:"encoder,omitempty" caddy:"namespace=caddy.logging.encoders inline_key=format"`
-
- // Level is the minimum level to emit, and is inclusive.
- // Possible levels: DEBUG, INFO, WARN, ERROR, PANIC, and FATAL
- Level string `json:"level,omitempty"`
-
- // Sampling configures log entry sampling. If enabled,
- // only some log entries will be emitted. This is useful
- // for improving performance on extremely high-pressure
- // servers.
- Sampling *LogSampling `json:"sampling,omitempty"`
-
- // Include defines the names of loggers to emit in this
- // log. For example, to include only logs emitted by the
- // admin API, you would include "admin.api".
- Include []string `json:"include,omitempty"`
-
- // Exclude defines the names of loggers that should be
- // skipped by this log. For example, to exclude only
- // HTTP access logs, you would exclude "http.log.access".
- Exclude []string `json:"exclude,omitempty"`
-
- writerOpener WriterOpener
- writer io.WriteCloser
- encoder zapcore.Encoder
- levelEnabler zapcore.LevelEnabler
- core zapcore.Core
-}
-
-func (cl *CustomLog) provision(ctx Context, logging *Logging) error {
- // Replace placeholder for log level
- repl := NewReplacer()
- level, err := repl.ReplaceOrErr(cl.Level, true, true)
- if err != nil {
- return fmt.Errorf("invalid log level: %v", err)
- }
- level = strings.ToLower(level)
-
- // set up the log level
- switch level {
- case "debug":
- cl.levelEnabler = zapcore.DebugLevel
- case "", "info":
- cl.levelEnabler = zapcore.InfoLevel
- case "warn":
- cl.levelEnabler = zapcore.WarnLevel
- case "error":
- cl.levelEnabler = zapcore.ErrorLevel
- case "panic":
- cl.levelEnabler = zapcore.PanicLevel
- case "fatal":
- cl.levelEnabler = zapcore.FatalLevel
- default:
- return fmt.Errorf("unrecognized log level: %s", cl.Level)
- }
-
- // If both Include and Exclude lists are populated, then each item must
- // be a superspace or subspace of an item in the other list, because
- // populating both lists means that any given item is either a rule
- // or an exception to another rule. But if the item is not a super-
- // or sub-space of any item in the other list, it is neither a rule
- // nor an exception, and is a contradiction. Ensure, too, that the
- // sets do not intersect, which is also a contradiction.
- if len(cl.Include) > 0 && len(cl.Exclude) > 0 {
- // prevent intersections
- for _, allow := range cl.Include {
- for _, deny := range cl.Exclude {
- if allow == deny {
- return fmt.Errorf("include and exclude must not intersect, but found %s in both lists", allow)
- }
- }
- }
-
- // ensure namespaces are nested
- outer:
- for _, allow := range cl.Include {
- for _, deny := range cl.Exclude {
- if strings.HasPrefix(allow+".", deny+".") ||
- strings.HasPrefix(deny+".", allow+".") {
- continue outer
- }
- }
- return fmt.Errorf("when both include and exclude are populated, each element must be a superspace or subspace of one in the other list; check '%s' in include", allow)
- }
- }
-
- if cl.WriterRaw != nil {
- mod, err := ctx.LoadModule(cl, "WriterRaw")
- if err != nil {
- return fmt.Errorf("loading log writer module: %v", err)
- }
- cl.writerOpener = mod.(WriterOpener)
- }
- if cl.writerOpener == nil {
- cl.writerOpener = StderrWriter{}
- }
-
- cl.writer, _, err = logging.openWriter(cl.writerOpener)
- if err != nil {
- return fmt.Errorf("opening log writer using %#v: %v", cl.writerOpener, err)
- }
-
- if cl.EncoderRaw != nil {
- mod, err := ctx.LoadModule(cl, "EncoderRaw")
- if err != nil {
- return fmt.Errorf("loading log encoder module: %v", err)
- }
- cl.encoder = mod.(zapcore.Encoder)
- }
- if cl.encoder == nil {
- // only allow colorized output if this log is going to stdout or stderr
- var colorize bool
- switch cl.writerOpener.(type) {
- case StdoutWriter, StderrWriter,
- *StdoutWriter, *StderrWriter:
- colorize = true
- }
- cl.encoder = newDefaultProductionLogEncoder(colorize)
- }
-
- cl.buildCore()
-
- return nil
-}
-
-func (cl *CustomLog) buildCore() {
- // logs which only discard their output don't need
- // to perform encoding or any other processing steps
- // at all, so just shorcut to a nop core instead
- if _, ok := cl.writerOpener.(*DiscardWriter); ok {
- cl.core = zapcore.NewNopCore()
- return
- }
- c := zapcore.NewCore(
- cl.encoder,
- zapcore.AddSync(cl.writer),
- cl.levelEnabler,
- )
- if cl.Sampling != nil {
- if cl.Sampling.Interval == 0 {
- cl.Sampling.Interval = 1 * time.Second
- }
- if cl.Sampling.First == 0 {
- cl.Sampling.First = 100
- }
- if cl.Sampling.Thereafter == 0 {
- cl.Sampling.Thereafter = 100
- }
- c = zapcore.NewSamplerWithOptions(c, cl.Sampling.Interval,
- cl.Sampling.First, cl.Sampling.Thereafter)
- }
- cl.core = c
-}
-
-func (cl *CustomLog) matchesModule(moduleID string) bool {
- return cl.loggerAllowed(moduleID, true)
-}
-
-// loggerAllowed returns true if name is allowed to emit
-// to cl. isModule should be true if name is the name of
-// a module and you want to see if ANY of that module's
-// logs would be permitted.
-func (cl *CustomLog) loggerAllowed(name string, isModule bool) bool {
- // accept all loggers by default
- if len(cl.Include) == 0 && len(cl.Exclude) == 0 {
- return true
- }
-
- // append a dot so that partial names don't match
- // (i.e. we don't want "foo.b" to match "foo.bar"); we
- // will also have to append a dot when we do HasPrefix
- // below to compensate for when when namespaces are equal
- if name != "" && name != "*" && name != "." {
- name += "."
- }
-
- var longestAccept, longestReject int
-
- if len(cl.Include) > 0 {
- for _, namespace := range cl.Include {
- var hasPrefix bool
- if isModule {
- hasPrefix = strings.HasPrefix(namespace+".", name)
- } else {
- hasPrefix = strings.HasPrefix(name, namespace+".")
- }
- if hasPrefix && len(namespace) > longestAccept {
- longestAccept = len(namespace)
- }
- }
- // the include list was populated, meaning that
- // a match in this list is absolutely required
- // if we are to accept the entry
- if longestAccept == 0 {
- return false
- }
- }
-
- if len(cl.Exclude) > 0 {
- for _, namespace := range cl.Exclude {
- // * == all logs emitted by modules
- // . == all logs emitted by core
- if (namespace == "*" && name != ".") ||
- (namespace == "." && name == ".") {
- return false
- }
- if strings.HasPrefix(name, namespace+".") &&
- len(namespace) > longestReject {
- longestReject = len(namespace)
- }
- }
- // the reject list is populated, so we have to
- // reject this entry if its match is better
- // than the best from the accept list
- if longestReject > longestAccept {
- return false
- }
- }
-
- return (longestAccept > longestReject) ||
- (len(cl.Include) == 0 && longestReject == 0)
-}
-
-// filteringCore filters log entries based on logger name,
-// according to the rules of a CustomLog.
-type filteringCore struct {
- zapcore.Core
- cl *CustomLog
-}
-
-// With properly wraps With.
-func (fc *filteringCore) With(fields []zapcore.Field) zapcore.Core {
- return &filteringCore{
- Core: fc.Core.With(fields),
- cl: fc.cl,
- }
-}
-
-// Check only allows the log entry if its logger name
-// is allowed from the include/exclude rules of fc.cl.
-func (fc *filteringCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
- if fc.cl.loggerAllowed(e.LoggerName, false) {
- return fc.Core.Check(e, ce)
- }
- return ce
-}
-
-// LogSampling configures log entry sampling.
-type LogSampling struct {
- // The window over which to conduct sampling.
- Interval time.Duration `json:"interval,omitempty"`
-
- // Log this many entries within a given level and
- // message for each interval.
- First int `json:"first,omitempty"`
-
- // If more entries with the same level and message
- // are seen during the same interval, keep one in
- // this many entries until the end of the interval.
- Thereafter int `json:"thereafter,omitempty"`
-}
-
-type (
- // StdoutWriter writes logs to standard out.
- StdoutWriter struct{}
-
- // StderrWriter writes logs to standard error.
- StderrWriter struct{}
-
- // DiscardWriter discards all writes.
- DiscardWriter struct{}
-)
-
-// CaddyModule returns the Caddy module information.
-func (StdoutWriter) CaddyModule() ModuleInfo {
- return ModuleInfo{
- ID: "caddy.logging.writers.stdout",
- New: func() Module { return new(StdoutWriter) },
- }
-}
-
-// CaddyModule returns the Caddy module information.
-func (StderrWriter) CaddyModule() ModuleInfo {
- return ModuleInfo{
- ID: "caddy.logging.writers.stderr",
- New: func() Module { return new(StderrWriter) },
- }
-}
-
-// CaddyModule returns the Caddy module information.
-func (DiscardWriter) CaddyModule() ModuleInfo {
- return ModuleInfo{
- ID: "caddy.logging.writers.discard",
- New: func() Module { return new(DiscardWriter) },
- }
-}
-
-func (StdoutWriter) String() string { return "stdout" }
-func (StderrWriter) String() string { return "stderr" }
-func (DiscardWriter) String() string { return "discard" }
-
-// WriterKey returns a unique key representing stdout.
-func (StdoutWriter) WriterKey() string { return "std:out" }
-
-// WriterKey returns a unique key representing stderr.
-func (StderrWriter) WriterKey() string { return "std:err" }
-
-// WriterKey returns a unique key representing discard.
-func (DiscardWriter) WriterKey() string { return "discard" }
-
-// OpenWriter returns os.Stdout that can't be closed.
-func (StdoutWriter) OpenWriter() (io.WriteCloser, error) {
- return notClosable{os.Stdout}, nil
-}
-
-// OpenWriter returns os.Stderr that can't be closed.
-func (StderrWriter) OpenWriter() (io.WriteCloser, error) {
- return notClosable{os.Stderr}, nil
-}
-
-// OpenWriter returns ioutil.Discard that can't be closed.
-func (DiscardWriter) OpenWriter() (io.WriteCloser, error) {
- return notClosable{ioutil.Discard}, nil
-}
-
-// notClosable is an io.WriteCloser that can't be closed.
-type notClosable struct{ io.Writer }
-
-func (fc notClosable) Close() error { return nil }
-
-type defaultCustomLog struct {
- *CustomLog
- logger *zap.Logger
-}
-
-// newDefaultProductionLog configures a custom log that is
-// intended for use by default if no other log is specified
-// in a config. It writes to stderr, uses the console encoder,
-// and enables INFO-level logs and higher.
-func newDefaultProductionLog() (*defaultCustomLog, error) {
- cl := new(CustomLog)
- cl.writerOpener = StderrWriter{}
- var err error
- cl.writer, err = cl.writerOpener.OpenWriter()
- if err != nil {
- return nil, err
- }
- cl.encoder = newDefaultProductionLogEncoder(true)
- cl.levelEnabler = zapcore.InfoLevel
-
- cl.buildCore()
-
- return &defaultCustomLog{
- CustomLog: cl,
- logger: zap.New(cl.core),
- }, nil
-}
-
-func newDefaultProductionLogEncoder(colorize bool) zapcore.Encoder {
- encCfg := zap.NewProductionEncoderConfig()
- if term.IsTerminal(int(os.Stdout.Fd())) {
- // if interactive terminal, make output more human-readable by default
- encCfg.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) {
- encoder.AppendString(ts.UTC().Format("2006/01/02 15:04:05.000"))
- }
- if colorize {
- encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
- }
- return zapcore.NewConsoleEncoder(encCfg)
- }
- return zapcore.NewJSONEncoder(encCfg)
-}
-
-// Log returns the current default logger.
-func Log() *zap.Logger {
- defaultLoggerMu.RLock()
- defer defaultLoggerMu.RUnlock()
- return defaultLogger.logger
-}
-
-var (
- defaultLogger, _ = newDefaultProductionLog()
- defaultLoggerMu sync.RWMutex
-)
-
-var writers = NewUsagePool()
-
-// Interface guards
-var (
- _ io.WriteCloser = (*notClosable)(nil)
- _ WriterOpener = (*StdoutWriter)(nil)
- _ WriterOpener = (*StderrWriter)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/metrics.go b/vendor/github.com/caddyserver/caddy/v2/metrics.go
deleted file mode 100644
index ab9d7978..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/metrics.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package caddy
-
-import (
- "net/http"
- "strconv"
- "strings"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/collectors"
- "github.com/prometheus/client_golang/prometheus/promauto"
-)
-
-// define and register the metrics used in this package.
-func init() {
- prometheus.MustRegister(collectors.NewBuildInfoCollector())
-
- const ns, sub = "caddy", "admin"
-
- adminMetrics.requestCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Namespace: ns,
- Subsystem: sub,
- Name: "http_requests_total",
- Help: "Counter of requests made to the Admin API's HTTP endpoints.",
- }, []string{"handler", "path", "code", "method"})
- adminMetrics.requestErrors = promauto.NewCounterVec(prometheus.CounterOpts{
- Namespace: ns,
- Subsystem: sub,
- Name: "http_request_errors_total",
- Help: "Number of requests resulting in middleware errors.",
- }, []string{"handler", "path", "method"})
-}
-
-// adminMetrics is a collection of metrics that can be tracked for the admin API.
-var adminMetrics = struct {
- requestCount *prometheus.CounterVec
- requestErrors *prometheus.CounterVec
-}{}
-
-// Similar to promhttp.InstrumentHandlerCounter, but upper-cases method names
-// instead of lower-casing them.
-//
-// Unlike promhttp.InstrumentHandlerCounter, this assumes a "code" and "method"
-// label is present, and will panic otherwise.
-func instrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- d := newDelegator(w)
- next.ServeHTTP(d, r)
- counter.With(prometheus.Labels{
- "code": sanitizeCode(d.status),
- "method": strings.ToUpper(r.Method),
- }).Inc()
- })
-}
-
-func newDelegator(w http.ResponseWriter) *delegator {
- return &delegator{
- ResponseWriter: w,
- }
-}
-
-type delegator struct {
- http.ResponseWriter
- status int
-}
-
-func (d *delegator) WriteHeader(code int) {
- d.status = code
- d.ResponseWriter.WriteHeader(code)
-}
-
-func sanitizeCode(s int) string {
- switch s {
- case 0, 200:
- return "200"
- default:
- return strconv.Itoa(s)
- }
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules.go b/vendor/github.com/caddyserver/caddy/v2/modules.go
deleted file mode 100644
index 0f4a563b..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules.go
+++ /dev/null
@@ -1,366 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddy
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "reflect"
- "sort"
- "strings"
- "sync"
-)
-
-// Module is a type that is used as a Caddy module. In
-// addition to this interface, most modules will implement
-// some interface expected by their host module in order
-// to be useful. To learn which interface(s) to implement,
-// see the documentation for the host module. At a bare
-// minimum, this interface, when implemented, only provides
-// the module's ID and constructor function.
-//
-// Modules will often implement additional interfaces
-// including Provisioner, Validator, and CleanerUpper.
-// If a module implements these interfaces, their
-// methods are called during the module's lifespan.
-//
-// When a module is loaded by a host module, the following
-// happens: 1) ModuleInfo.New() is called to get a new
-// instance of the module. 2) The module's configuration is
-// unmarshaled into that instance. 3) If the module is a
-// Provisioner, the Provision() method is called. 4) If the
-// module is a Validator, the Validate() method is called.
-// 5) The module will probably be type-asserted from
-// interface{} to some other, more useful interface expected
-// by the host module. For example, HTTP handler modules are
-// type-asserted as caddyhttp.MiddlewareHandler values.
-// 6) When a module's containing Context is canceled, if it is
-// a CleanerUpper, its Cleanup() method is called.
-type Module interface {
- // This method indicates that the type is a Caddy
- // module. The returned ModuleInfo must have both
- // a name and a constructor function. This method
- // must not have any side-effects.
- CaddyModule() ModuleInfo
-}
-
-// ModuleInfo represents a registered Caddy module.
-type ModuleInfo struct {
- // ID is the "full name" of the module. It
- // must be unique and properly namespaced.
- ID ModuleID
-
- // New returns a pointer to a new, empty
- // instance of the module's type. This
- // method must not have any side-effects,
- // and no other initialization should
- // occur within it. Any initialization
- // of the returned value should be done
- // in a Provision() method (see the
- // Provisioner interface).
- New func() Module
-}
-
-// ModuleID is a string that uniquely identifies a Caddy module. A
-// module ID is lightly structured. It consists of dot-separated
-// labels which form a simple hierarchy from left to right. The last
-// label is the module name, and the labels before that constitute
-// the namespace (or scope).
-//
-// Thus, a module ID has the form: .
-//
-// An ID with no dot has the empty namespace, which is appropriate
-// for app modules (these are "top-level" modules that Caddy core
-// loads and runs).
-//
-// Module IDs should be lowercase and use underscores (_) instead of
-// spaces.
-//
-// Examples of valid IDs:
-// - http
-// - http.handlers.file_server
-// - caddy.logging.encoders.json
-type ModuleID string
-
-// Namespace returns the namespace (or scope) portion of a module ID,
-// which is all but the last label of the ID. If the ID has only one
-// label, then the namespace is empty.
-func (id ModuleID) Namespace() string {
- lastDot := strings.LastIndex(string(id), ".")
- if lastDot < 0 {
- return ""
- }
- return string(id)[:lastDot]
-}
-
-// Name returns the Name (last element) of a module ID.
-func (id ModuleID) Name() string {
- if id == "" {
- return ""
- }
- parts := strings.Split(string(id), ".")
- return parts[len(parts)-1]
-}
-
-func (mi ModuleInfo) String() string { return string(mi.ID) }
-
-// ModuleMap is a map that can contain multiple modules,
-// where the map key is the module's name. (The namespace
-// is usually read from an associated field's struct tag.)
-// Because the module's name is given as the key in a
-// module map, the name does not have to be given in the
-// json.RawMessage.
-type ModuleMap map[string]json.RawMessage
-
-// RegisterModule registers a module by receiving a
-// plain/empty value of the module. For registration to
-// be properly recorded, this should be called in the
-// init phase of runtime. Typically, the module package
-// will do this as a side-effect of being imported.
-// This function panics if the module's info is
-// incomplete or invalid, or if the module is already
-// registered.
-func RegisterModule(instance Module) {
- mod := instance.CaddyModule()
-
- if mod.ID == "" {
- panic("module ID missing")
- }
- if mod.ID == "caddy" || mod.ID == "admin" {
- panic(fmt.Sprintf("module ID '%s' is reserved", mod.ID))
- }
- if mod.New == nil {
- panic("missing ModuleInfo.New")
- }
- if val := mod.New(); val == nil {
- panic("ModuleInfo.New must return a non-nil module instance")
- }
-
- modulesMu.Lock()
- defer modulesMu.Unlock()
-
- if _, ok := modules[string(mod.ID)]; ok {
- panic(fmt.Sprintf("module already registered: %s", mod.ID))
- }
- modules[string(mod.ID)] = mod
-}
-
-// GetModule returns module information from its ID (full name).
-func GetModule(name string) (ModuleInfo, error) {
- modulesMu.RLock()
- defer modulesMu.RUnlock()
- m, ok := modules[name]
- if !ok {
- return ModuleInfo{}, fmt.Errorf("module not registered: %s", name)
- }
- return m, nil
-}
-
-// GetModuleName returns a module's name (the last label of its ID)
-// from an instance of its value. If the value is not a module, an
-// empty string will be returned.
-func GetModuleName(instance interface{}) string {
- var name string
- if mod, ok := instance.(Module); ok {
- name = mod.CaddyModule().ID.Name()
- }
- return name
-}
-
-// GetModuleID returns a module's ID from an instance of its value.
-// If the value is not a module, an empty string will be returned.
-func GetModuleID(instance interface{}) string {
- var id string
- if mod, ok := instance.(Module); ok {
- id = string(mod.CaddyModule().ID)
- }
- return id
-}
-
-// GetModules returns all modules in the given scope/namespace.
-// For example, a scope of "foo" returns modules named "foo.bar",
-// "foo.loo", but not "bar", "foo.bar.loo", etc. An empty scope
-// returns top-level modules, for example "foo" or "bar". Partial
-// scopes are not matched (i.e. scope "foo.ba" does not match
-// name "foo.bar").
-//
-// Because modules are registered to a map under the hood, the
-// returned slice will be sorted to keep it deterministic.
-func GetModules(scope string) []ModuleInfo {
- modulesMu.RLock()
- defer modulesMu.RUnlock()
-
- scopeParts := strings.Split(scope, ".")
-
- // handle the special case of an empty scope, which
- // should match only the top-level modules
- if scope == "" {
- scopeParts = []string{}
- }
-
- var mods []ModuleInfo
-iterateModules:
- for id, m := range modules {
- modParts := strings.Split(id, ".")
-
- // match only the next level of nesting
- if len(modParts) != len(scopeParts)+1 {
- continue
- }
-
- // specified parts must be exact matches
- for i := range scopeParts {
- if modParts[i] != scopeParts[i] {
- continue iterateModules
- }
- }
-
- mods = append(mods, m)
- }
-
- // make return value deterministic
- sort.Slice(mods, func(i, j int) bool {
- return mods[i].ID < mods[j].ID
- })
-
- return mods
-}
-
-// Modules returns the names of all registered modules
-// in ascending lexicographical order.
-func Modules() []string {
- modulesMu.RLock()
- defer modulesMu.RUnlock()
-
- names := make([]string, 0, len(modules))
- for name := range modules {
- names = append(names, name)
- }
-
- sort.Strings(names)
-
- return names
-}
-
-// getModuleNameInline loads the string value from raw of moduleNameKey,
-// where raw must be a JSON encoding of a map. It returns that value,
-// along with the result of removing that key from raw.
-func getModuleNameInline(moduleNameKey string, raw json.RawMessage) (string, json.RawMessage, error) {
- var tmp map[string]interface{}
- err := json.Unmarshal(raw, &tmp)
- if err != nil {
- return "", nil, err
- }
-
- moduleName, ok := tmp[moduleNameKey].(string)
- if !ok || moduleName == "" {
- return "", nil, fmt.Errorf("module name not specified with key '%s' in %+v", moduleNameKey, tmp)
- }
-
- // remove key from the object, otherwise decoding it later
- // will yield an error because the struct won't recognize it
- // (this is only needed because we strictly enforce that
- // all keys are recognized when loading modules)
- delete(tmp, moduleNameKey)
- result, err := json.Marshal(tmp)
- if err != nil {
- return "", nil, fmt.Errorf("re-encoding module configuration: %v", err)
- }
-
- return moduleName, result, nil
-}
-
-// Provisioner is implemented by modules which may need to perform
-// some additional "setup" steps immediately after being loaded.
-// Provisioning should be fast (imperceptible running time). If
-// any side-effects result in the execution of this function (e.g.
-// creating global state, any other allocations which require
-// garbage collection, opening files, starting goroutines etc.),
-// be sure to clean up properly by implementing the CleanerUpper
-// interface to avoid leaking resources.
-type Provisioner interface {
- Provision(Context) error
-}
-
-// Validator is implemented by modules which can verify that their
-// configurations are valid. This method will be called after
-// Provision() (if implemented). Validation should always be fast
-// (imperceptible running time) and an error must be returned if
-// the module's configuration is invalid.
-type Validator interface {
- Validate() error
-}
-
-// CleanerUpper is implemented by modules which may have side-effects
-// such as opened files, spawned goroutines, or allocated some sort
-// of non-stack state when they were provisioned. This method should
-// deallocate/cleanup those resources to prevent memory leaks. Cleanup
-// should be fast and efficient. Cleanup should work even if Provision
-// returns an error, to allow cleaning up from partial provisionings.
-type CleanerUpper interface {
- Cleanup() error
-}
-
-// ParseStructTag parses a caddy struct tag into its keys and values.
-// It is very simple. The expected syntax is:
-// `caddy:"key1=val1 key2=val2 ..."`
-func ParseStructTag(tag string) (map[string]string, error) {
- results := make(map[string]string)
- pairs := strings.Split(tag, " ")
- for i, pair := range pairs {
- if pair == "" {
- continue
- }
- parts := strings.SplitN(pair, "=", 2)
- if len(parts) != 2 {
- return nil, fmt.Errorf("missing key in '%s' (pair %d)", pair, i)
- }
- results[parts[0]] = parts[1]
- }
- return results, nil
-}
-
-// strictUnmarshalJSON is like json.Unmarshal but returns an error
-// if any of the fields are unrecognized. Useful when decoding
-// module configurations, where you want to be more sure they're
-// correct.
-func strictUnmarshalJSON(data []byte, v interface{}) error {
- dec := json.NewDecoder(bytes.NewReader(data))
- dec.DisallowUnknownFields()
- return dec.Decode(v)
-}
-
-// isJSONRawMessage returns true if the type is encoding/json.RawMessage.
-func isJSONRawMessage(typ reflect.Type) bool {
- return typ.PkgPath() == "encoding/json" && typ.Name() == "RawMessage"
-}
-
-// isModuleMapType returns true if the type is map[string]json.RawMessage.
-// It assumes that the string key is the module name, but this is not
-// always the case. To know for sure, this function must return true, but
-// also the struct tag where this type appears must NOT define an inline_key
-// attribute, which would mean that the module names appear inline with the
-// values, not in the key.
-func isModuleMapType(typ reflect.Type) bool {
- return typ.Kind() == reflect.Map &&
- typ.Key().Kind() == reflect.String &&
- isJSONRawMessage(typ.Elem())
-}
-
-var (
- modules = make(map[string]ModuleInfo)
- modulesMu sync.RWMutex
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/app.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/app.go
deleted file mode 100644
index 8285200f..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/app.go
+++ /dev/null
@@ -1,481 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "net"
- "net/http"
- "strconv"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/modules/caddytls"
- "github.com/lucas-clemente/quic-go/http3"
- "go.uber.org/zap"
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/h2c"
-)
-
-func init() {
- caddy.RegisterModule(App{})
-}
-
-// App is a robust, production-ready HTTP server.
-//
-// HTTPS is enabled by default if host matchers with qualifying names are used
-// in any of routes; certificates are automatically provisioned and renewed.
-// Additionally, automatic HTTPS will also enable HTTPS for servers that listen
-// only on the HTTPS port but which do not have any TLS connection policies
-// defined by adding a good, default TLS connection policy.
-//
-// In HTTP routes, additional placeholders are available (replace any `*`):
-//
-// Placeholder | Description
-// ------------|---------------
-// `{http.request.body}` | The request body (âš ï¸ inefficient; use only for debugging)
-// `{http.request.cookie.*}` | HTTP request cookie
-// `{http.request.duration}` | Time up to now spent handling the request (after decoding headers from client)
-// `{http.request.header.*}` | Specific request header field
-// `{http.request.host.labels.*}` | Request host labels (0-based from right); e.g. for foo.example.com: 0=com, 1=example, 2=foo
-// `{http.request.host}` | The host part of the request's Host header
-// `{http.request.hostport}` | The host and port from the request's Host header
-// `{http.request.method}` | The request method
-// `{http.request.orig_method}` | The request's original method
-// `{http.request.orig_uri.path.dir}` | The request's original directory
-// `{http.request.orig_uri.path.file}` | The request's original filename
-// `{http.request.orig_uri.path}` | The request's original path
-// `{http.request.orig_uri.query}` | The request's original query string (without `?`)
-// `{http.request.orig_uri}` | The request's original URI
-// `{http.request.port}` | The port part of the request's Host header
-// `{http.request.proto}` | The protocol of the request
-// `{http.request.remote.host}` | The host part of the remote client's address
-// `{http.request.remote.port}` | The port part of the remote client's address
-// `{http.request.remote}` | The address of the remote client
-// `{http.request.scheme}` | The request scheme
-// `{http.request.tls.version}` | The TLS version name
-// `{http.request.tls.cipher_suite}` | The TLS cipher suite
-// `{http.request.tls.resumed}` | The TLS connection resumed a previous connection
-// `{http.request.tls.proto}` | The negotiated next protocol
-// `{http.request.tls.proto_mutual}` | The negotiated next protocol was advertised by the server
-// `{http.request.tls.server_name}` | The server name requested by the client, if any
-// `{http.request.tls.client.fingerprint}` | The SHA256 checksum of the client certificate
-// `{http.request.tls.client.public_key}` | The public key of the client certificate.
-// `{http.request.tls.client.public_key_sha256}` | The SHA256 checksum of the client's public key.
-// `{http.request.tls.client.certificate_pem}` | The PEM-encoded value of the certificate.
-// `{http.request.tls.client.issuer}` | The issuer DN of the client certificate
-// `{http.request.tls.client.serial}` | The serial number of the client certificate
-// `{http.request.tls.client.subject}` | The subject DN of the client certificate
-// `{http.request.tls.client.san.dns_names.*}` | SAN DNS names(index optional)
-// `{http.request.tls.client.san.emails.*}` | SAN email addresses (index optional)
-// `{http.request.tls.client.san.ips.*}` | SAN IP addresses (index optional)
-// `{http.request.tls.client.san.uris.*}` | SAN URIs (index optional)
-// `{http.request.uri.path.*}` | Parts of the path, split by `/` (0-based from left)
-// `{http.request.uri.path.dir}` | The directory, excluding leaf filename
-// `{http.request.uri.path.file}` | The filename of the path, excluding directory
-// `{http.request.uri.path}` | The path component of the request URI
-// `{http.request.uri.query.*}` | Individual query string value
-// `{http.request.uri.query}` | The query string (without `?`)
-// `{http.request.uri}` | The full request URI
-// `{http.response.header.*}` | Specific response header field
-// `{http.vars.*}` | Custom variables in the HTTP handler chain
-type App struct {
- // HTTPPort specifies the port to use for HTTP (as opposed to HTTPS),
- // which is used when setting up HTTP->HTTPS redirects or ACME HTTP
- // challenge solvers. Default: 80.
- HTTPPort int `json:"http_port,omitempty"`
-
- // HTTPSPort specifies the port to use for HTTPS, which is used when
- // solving the ACME TLS-ALPN challenges, or whenever HTTPS is needed
- // but no specific port number is given. Default: 443.
- HTTPSPort int `json:"https_port,omitempty"`
-
- // GracePeriod is how long to wait for active connections when shutting
- // down the server. Once the grace period is over, connections will
- // be forcefully closed.
- GracePeriod caddy.Duration `json:"grace_period,omitempty"`
-
- // Servers is the list of servers, keyed by arbitrary names chosen
- // at your discretion for your own convenience; the keys do not
- // affect functionality.
- Servers map[string]*Server `json:"servers,omitempty"`
-
- servers []*http.Server
- h3servers []*http3.Server
- h3listeners []net.PacketConn
-
- ctx caddy.Context
- logger *zap.Logger
- tlsApp *caddytls.TLS
-
- // used temporarily between phases 1 and 2 of auto HTTPS
- allCertDomains []string
-}
-
-// CaddyModule returns the Caddy module information.
-func (App) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http",
- New: func() caddy.Module { return new(App) },
- }
-}
-
-// Provision sets up the app.
-func (app *App) Provision(ctx caddy.Context) error {
- // store some references
- tlsAppIface, err := ctx.App("tls")
- if err != nil {
- return fmt.Errorf("getting tls app: %v", err)
- }
- app.tlsApp = tlsAppIface.(*caddytls.TLS)
- app.ctx = ctx
- app.logger = ctx.Logger(app)
-
- repl := caddy.NewReplacer()
-
- // this provisions the matchers for each route,
- // and prepares auto HTTP->HTTPS redirects, and
- // is required before we provision each server
- err = app.automaticHTTPSPhase1(ctx, repl)
- if err != nil {
- return err
- }
-
- // prepare each server
- for srvName, srv := range app.Servers {
- srv.name = srvName
- srv.tlsApp = app.tlsApp
- srv.logger = app.logger.Named("log")
- srv.errorLogger = app.logger.Named("log.error")
-
- // only enable access logs if configured
- if srv.Logs != nil {
- srv.accessLogger = app.logger.Named("log.access")
- }
-
- // if not explicitly configured by the user, disallow TLS
- // client auth bypass (domain fronting) which could
- // otherwise be exploited by sending an unprotected SNI
- // value during a TLS handshake, then putting a protected
- // domain in the Host header after establishing connection;
- // this is a safe default, but we allow users to override
- // it for example in the case of running a proxy where
- // domain fronting is desired and access is not restricted
- // based on hostname
- if srv.StrictSNIHost == nil && srv.hasTLSClientAuth() {
- app.logger.Warn("enabling strict SNI-Host enforcement because TLS client auth is configured",
- zap.String("server_id", srvName),
- )
- trueBool := true
- srv.StrictSNIHost = &trueBool
- }
-
- // process each listener address
- for i := range srv.Listen {
- lnOut, err := repl.ReplaceOrErr(srv.Listen[i], true, true)
- if err != nil {
- return fmt.Errorf("server %s, listener %d: %v",
- srvName, i, err)
- }
- srv.Listen[i] = lnOut
- }
-
- // set up each listener modifier
- if srv.ListenerWrappersRaw != nil {
- vals, err := ctx.LoadModule(srv, "ListenerWrappersRaw")
- if err != nil {
- return fmt.Errorf("loading listener wrapper modules: %v", err)
- }
- var hasTLSPlaceholder bool
- for i, val := range vals.([]interface{}) {
- if _, ok := val.(*tlsPlaceholderWrapper); ok {
- if i == 0 {
- // putting the tls placeholder wrapper first is nonsensical because
- // that is the default, implicit setting: without it, all wrappers
- // will go after the TLS listener anyway
- return fmt.Errorf("it is unnecessary to specify the TLS listener wrapper in the first position because that is the default")
- }
- if hasTLSPlaceholder {
- return fmt.Errorf("TLS listener wrapper can only be specified once")
- }
- hasTLSPlaceholder = true
- }
- srv.listenerWrappers = append(srv.listenerWrappers, val.(caddy.ListenerWrapper))
- }
- // if any wrappers were configured but the TLS placeholder wrapper is
- // absent, prepend it so all defined wrappers come after the TLS
- // handshake; this simplifies logic when starting the server, since we
- // can simply assume the TLS placeholder will always be there
- if !hasTLSPlaceholder && len(srv.listenerWrappers) > 0 {
- srv.listenerWrappers = append([]caddy.ListenerWrapper{new(tlsPlaceholderWrapper)}, srv.listenerWrappers...)
- }
- }
-
- // pre-compile the primary handler chain, and be sure to wrap it in our
- // route handler so that important security checks are done, etc.
- primaryRoute := emptyHandler
- if srv.Routes != nil {
- err := srv.Routes.ProvisionHandlers(ctx)
- if err != nil {
- return fmt.Errorf("server %s: setting up route handlers: %v", srvName, err)
- }
- primaryRoute = srv.Routes.Compile(emptyHandler)
- }
- srv.primaryHandlerChain = srv.wrapPrimaryRoute(primaryRoute)
-
- // pre-compile the error handler chain
- if srv.Errors != nil {
- err := srv.Errors.Routes.Provision(ctx)
- if err != nil {
- return fmt.Errorf("server %s: setting up server error handling routes: %v", srvName, err)
- }
- srv.errorHandlerChain = srv.Errors.Routes.Compile(errorEmptyHandler)
- }
-
- // prepare the TLS connection policies
- err = srv.TLSConnPolicies.Provision(ctx)
- if err != nil {
- return fmt.Errorf("server %s: setting up TLS connection policies: %v", srvName, err)
- }
-
- // if there is no idle timeout, set a sane default; users have complained
- // before that aggressive CDNs leave connections open until the server
- // closes them, so if we don't close them it leads to resource exhaustion
- if srv.IdleTimeout == 0 {
- srv.IdleTimeout = defaultIdleTimeout
- }
- }
-
- return nil
-}
-
-// Validate ensures the app's configuration is valid.
-func (app *App) Validate() error {
- // each server must use distinct listener addresses
- lnAddrs := make(map[string]string)
- for srvName, srv := range app.Servers {
- for _, addr := range srv.Listen {
- listenAddr, err := caddy.ParseNetworkAddress(addr)
- if err != nil {
- return fmt.Errorf("invalid listener address '%s': %v", addr, err)
- }
- // check that every address in the port range is unique to this server;
- // we do not use <= here because PortRangeSize() adds 1 to EndPort for us
- for i := uint(0); i < listenAddr.PortRangeSize(); i++ {
- addr := caddy.JoinNetworkAddress(listenAddr.Network, listenAddr.Host, strconv.Itoa(int(listenAddr.StartPort+i)))
- if sn, ok := lnAddrs[addr]; ok {
- return fmt.Errorf("server %s: listener address repeated: %s (already claimed by server '%s')", srvName, addr, sn)
- }
- lnAddrs[addr] = srvName
- }
- }
- }
- return nil
-}
-
-// Start runs the app. It finishes automatic HTTPS if enabled,
-// including management of certificates.
-func (app *App) Start() error {
- // get a logger compatible with http.Server
- serverLogger, err := zap.NewStdLogAt(app.logger.Named("stdlib"), zap.DebugLevel)
- if err != nil {
- return fmt.Errorf("failed to set up server logger: %v", err)
- }
-
- for srvName, srv := range app.Servers {
- s := &http.Server{
- ReadTimeout: time.Duration(srv.ReadTimeout),
- ReadHeaderTimeout: time.Duration(srv.ReadHeaderTimeout),
- WriteTimeout: time.Duration(srv.WriteTimeout),
- IdleTimeout: time.Duration(srv.IdleTimeout),
- MaxHeaderBytes: srv.MaxHeaderBytes,
- Handler: srv,
- ErrorLog: serverLogger,
- }
-
- // enable h2c if configured
- if srv.AllowH2C {
- h2server := &http2.Server{
- IdleTimeout: time.Duration(srv.IdleTimeout),
- }
- s.Handler = h2c.NewHandler(srv, h2server)
- }
-
- for _, lnAddr := range srv.Listen {
- listenAddr, err := caddy.ParseNetworkAddress(lnAddr)
- if err != nil {
- return fmt.Errorf("%s: parsing listen address '%s': %v", srvName, lnAddr, err)
- }
- for portOffset := uint(0); portOffset < listenAddr.PortRangeSize(); portOffset++ {
- // create the listener for this socket
- hostport := listenAddr.JoinHostPort(portOffset)
- ln, err := caddy.Listen(listenAddr.Network, hostport)
- if err != nil {
- return fmt.Errorf("%s: listening on %s: %v", listenAddr.Network, hostport, err)
- }
-
- // wrap listener before TLS (up to the TLS placeholder wrapper)
- var lnWrapperIdx int
- for i, lnWrapper := range srv.listenerWrappers {
- if _, ok := lnWrapper.(*tlsPlaceholderWrapper); ok {
- lnWrapperIdx = i + 1 // mark the next wrapper's spot
- break
- }
- ln = lnWrapper.WrapListener(ln)
- }
-
- // enable TLS if there is a policy and if this is not the HTTP port
- useTLS := len(srv.TLSConnPolicies) > 0 && int(listenAddr.StartPort+portOffset) != app.httpPort()
- if useTLS {
- // create TLS listener
- tlsCfg := srv.TLSConnPolicies.TLSConfig(app.ctx)
- ln = tls.NewListener(ln, tlsCfg)
-
- /////////
- // TODO: HTTP/3 support is experimental for now
- if srv.ExperimentalHTTP3 {
- app.logger.Info("enabling experimental HTTP/3 listener",
- zap.String("addr", hostport),
- )
- h3ln, err := caddy.ListenPacket("udp", hostport)
- if err != nil {
- return fmt.Errorf("getting HTTP/3 UDP listener: %v", err)
- }
- h3srv := &http3.Server{
- Server: &http.Server{
- Addr: hostport,
- Handler: srv,
- TLSConfig: tlsCfg,
- ErrorLog: serverLogger,
- },
- }
- //nolint:errcheck
- go h3srv.Serve(h3ln)
- app.h3servers = append(app.h3servers, h3srv)
- app.h3listeners = append(app.h3listeners, h3ln)
- srv.h3server = h3srv
- }
- /////////
- }
-
- // finish wrapping listener where we left off before TLS
- for i := lnWrapperIdx; i < len(srv.listenerWrappers); i++ {
- ln = srv.listenerWrappers[i].WrapListener(ln)
- }
-
- // if binding to port 0, the OS chooses a port for us;
- // but the user won't know the port unless we print it
- if listenAddr.StartPort == 0 && listenAddr.EndPort == 0 {
- app.logger.Info("port 0 listener",
- zap.String("input_address", lnAddr),
- zap.String("actual_address", ln.Addr().String()),
- )
- }
-
- app.logger.Debug("starting server loop",
- zap.String("address", ln.Addr().String()),
- zap.Bool("http3", srv.ExperimentalHTTP3),
- zap.Bool("tls", useTLS),
- )
-
- //nolint:errcheck
- go s.Serve(ln)
- app.servers = append(app.servers, s)
- }
- }
- }
-
- // finish automatic HTTPS by finally beginning
- // certificate management
- err = app.automaticHTTPSPhase2()
- if err != nil {
- return fmt.Errorf("finalizing automatic HTTPS: %v", err)
- }
-
- return nil
-}
-
-// Stop gracefully shuts down the HTTP server.
-func (app *App) Stop() error {
- ctx := context.Background()
- if app.GracePeriod > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, time.Duration(app.GracePeriod))
- defer cancel()
- }
- for _, s := range app.servers {
- err := s.Shutdown(ctx)
- if err != nil {
- return err
- }
- }
-
- // close the http3 servers; it's unclear whether the bug reported in
- // https://github.com/caddyserver/caddy/pull/2727#issuecomment-526856566
- // was ever truly fixed, since it seemed racey/nondeterministic; but
- // recent tests in 2020 were unable to replicate the issue again after
- // repeated attempts (the bug manifested after a config reload; i.e.
- // reusing a http3 server or listener was problematic), but it seems
- // to be working fine now
- for _, s := range app.h3servers {
- // TODO: CloseGracefully, once implemented upstream
- // (see https://github.com/lucas-clemente/quic-go/issues/2103)
- err := s.Close()
- if err != nil {
- return err
- }
- }
-
- // closing an http3.Server does not close their underlying listeners
- // since apparently the listener can be used both by servers and
- // clients at the same time; so we need to manually call Close()
- // on the underlying h3 listeners (see lucas-clemente/quic-go#2103)
- for _, pc := range app.h3listeners {
- err := pc.Close()
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (app *App) httpPort() int {
- if app.HTTPPort == 0 {
- return DefaultHTTPPort
- }
- return app.HTTPPort
-}
-
-func (app *App) httpsPort() int {
- if app.HTTPSPort == 0 {
- return DefaultHTTPSPort
- }
- return app.HTTPSPort
-}
-
-// defaultIdleTimeout is the default HTTP server timeout
-// for closing idle connections; useful to avoid resource
-// exhaustion behind hungry CDNs, for example (we've had
-// several complaints without this).
-const defaultIdleTimeout = caddy.Duration(5 * time.Minute)
-
-// Interface guards
-var (
- _ caddy.App = (*App)(nil)
- _ caddy.Provisioner = (*App)(nil)
- _ caddy.Validator = (*App)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/autohttps.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/autohttps.go
deleted file mode 100644
index da4428db..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/autohttps.go
+++ /dev/null
@@ -1,656 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "fmt"
- "net/http"
- "strconv"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/modules/caddytls"
- "github.com/caddyserver/certmagic"
- "go.uber.org/zap"
-)
-
-// AutoHTTPSConfig is used to disable automatic HTTPS
-// or certain aspects of it for a specific server.
-// HTTPS is enabled automatically and by default when
-// qualifying hostnames are available from the config.
-type AutoHTTPSConfig struct {
- // If true, automatic HTTPS will be entirely disabled.
- Disabled bool `json:"disable,omitempty"`
-
- // If true, only automatic HTTP->HTTPS redirects will
- // be disabled.
- DisableRedir bool `json:"disable_redirects,omitempty"`
-
- // Hosts/domain names listed here will not be included
- // in automatic HTTPS (they will not have certificates
- // loaded nor redirects applied).
- Skip []string `json:"skip,omitempty"`
-
- // Hosts/domain names listed here will still be enabled
- // for automatic HTTPS (unless in the Skip list), except
- // that certificates will not be provisioned and managed
- // for these names.
- SkipCerts []string `json:"skip_certificates,omitempty"`
-
- // By default, automatic HTTPS will obtain and renew
- // certificates for qualifying hostnames. However, if
- // a certificate with a matching SAN is already loaded
- // into the cache, certificate management will not be
- // enabled. To force automated certificate management
- // regardless of loaded certificates, set this to true.
- IgnoreLoadedCerts bool `json:"ignore_loaded_certificates,omitempty"`
-}
-
-// Skipped returns true if name is in skipSlice, which
-// should be either the Skip or SkipCerts field on ahc.
-func (ahc AutoHTTPSConfig) Skipped(name string, skipSlice []string) bool {
- for _, n := range skipSlice {
- if name == n {
- return true
- }
- }
- return false
-}
-
-// automaticHTTPSPhase1 provisions all route matchers, determines
-// which domain names found in the routes qualify for automatic
-// HTTPS, and sets up HTTP->HTTPS redirects. This phase must occur
-// at the beginning of provisioning, because it may add routes and
-// even servers to the app, which still need to be set up with the
-// rest of them during provisioning.
-func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) error {
- // this map acts as a set to store the domain names
- // for which we will manage certificates automatically
- uniqueDomainsForCerts := make(map[string]struct{})
-
- // this maps domain names for automatic HTTP->HTTPS
- // redirects to their destination server addresses
- // (there might be more than 1 if bind is used; see
- // https://github.com/caddyserver/caddy/issues/3443)
- redirDomains := make(map[string][]caddy.NetworkAddress)
-
- for srvName, srv := range app.Servers {
- // as a prerequisite, provision route matchers; this is
- // required for all routes on all servers, and must be
- // done before we attempt to do phase 1 of auto HTTPS,
- // since we have to access the decoded host matchers the
- // handlers will be provisioned later
- if srv.Routes != nil {
- err := srv.Routes.ProvisionMatchers(ctx)
- if err != nil {
- return fmt.Errorf("server %s: setting up route matchers: %v", srvName, err)
- }
- }
-
- // prepare for automatic HTTPS
- if srv.AutoHTTPS == nil {
- srv.AutoHTTPS = new(AutoHTTPSConfig)
- }
- if srv.AutoHTTPS.Disabled {
- continue
- }
-
- // skip if all listeners use the HTTP port
- if !srv.listenersUseAnyPortOtherThan(app.httpPort()) {
- app.logger.Info("server is listening only on the HTTP port, so no automatic HTTPS will be applied to this server",
- zap.String("server_name", srvName),
- zap.Int("http_port", app.httpPort()),
- )
- srv.AutoHTTPS.Disabled = true
- continue
- }
-
- // if all listeners are on the HTTPS port, make sure
- // there is at least one TLS connection policy; it
- // should be obvious that they want to use TLS without
- // needing to specify one empty policy to enable it
- if srv.TLSConnPolicies == nil &&
- !srv.listenersUseAnyPortOtherThan(app.httpsPort()) {
- app.logger.Info("server is listening only on the HTTPS port but has no TLS connection policies; adding one to enable TLS",
- zap.String("server_name", srvName),
- zap.Int("https_port", app.httpsPort()),
- )
- srv.TLSConnPolicies = caddytls.ConnectionPolicies{new(caddytls.ConnectionPolicy)}
- }
-
- // find all qualifying domain names (deduplicated) in this server
- // (this is where we need the provisioned, decoded request matchers)
- serverDomainSet := make(map[string]struct{})
- for routeIdx, route := range srv.Routes {
- for matcherSetIdx, matcherSet := range route.MatcherSets {
- for matcherIdx, m := range matcherSet {
- if hm, ok := m.(*MatchHost); ok {
- for hostMatcherIdx, d := range *hm {
- var err error
- d, err = repl.ReplaceOrErr(d, true, false)
- if err != nil {
- return fmt.Errorf("%s: route %d, matcher set %d, matcher %d, host matcher %d: %v",
- srvName, routeIdx, matcherSetIdx, matcherIdx, hostMatcherIdx, err)
- }
- if !srv.AutoHTTPS.Skipped(d, srv.AutoHTTPS.Skip) {
- serverDomainSet[d] = struct{}{}
- }
- }
- }
- }
- }
- }
-
- // nothing more to do here if there are no domains that qualify for
- // automatic HTTPS and there are no explicit TLS connection policies:
- // if there is at least one domain but no TLS conn policy (F&&T), we'll
- // add one below; if there are no domains but at least one TLS conn
- // policy (meaning TLS is enabled) (T&&F), it could be a catch-all with
- // on-demand TLS -- and in that case we would still need HTTP->HTTPS
- // redirects, which we set up below; hence these two conditions
- if len(serverDomainSet) == 0 && len(srv.TLSConnPolicies) == 0 {
- continue
- }
-
- // for all the hostnames we found, filter them so we have
- // a deduplicated list of names for which to obtain certs
- for d := range serverDomainSet {
- if certmagic.SubjectQualifiesForCert(d) &&
- !srv.AutoHTTPS.Skipped(d, srv.AutoHTTPS.SkipCerts) {
- // if a certificate for this name is already loaded,
- // don't obtain another one for it, unless we are
- // supposed to ignore loaded certificates
- if !srv.AutoHTTPS.IgnoreLoadedCerts &&
- len(app.tlsApp.AllMatchingCertificates(d)) > 0 {
- app.logger.Info("skipping automatic certificate management because one or more matching certificates are already loaded",
- zap.String("domain", d),
- zap.String("server_name", srvName),
- )
- continue
- }
-
- // most clients don't accept wildcards like *.tld... we
- // can handle that, but as a courtesy, warn the user
- if strings.Contains(d, "*") &&
- strings.Count(strings.Trim(d, "."), ".") == 1 {
- app.logger.Warn("most clients do not trust second-level wildcard certificates (*.tld)",
- zap.String("domain", d))
- }
-
- uniqueDomainsForCerts[d] = struct{}{}
- }
- }
-
- // tell the server to use TLS if it is not already doing so
- if srv.TLSConnPolicies == nil {
- srv.TLSConnPolicies = caddytls.ConnectionPolicies{new(caddytls.ConnectionPolicy)}
- }
-
- // nothing left to do if auto redirects are disabled
- if srv.AutoHTTPS.DisableRedir {
- continue
- }
-
- app.logger.Info("enabling automatic HTTP->HTTPS redirects",
- zap.String("server_name", srvName),
- )
-
- // create HTTP->HTTPS redirects
- for _, addr := range srv.Listen {
- // figure out the address we will redirect to...
- addr, err := caddy.ParseNetworkAddress(addr)
- if err != nil {
- return fmt.Errorf("%s: invalid listener address: %v", srvName, addr)
- }
-
- // this address might not have a hostname, i.e. might be a
- // catch-all address for a particular port; we need to keep
- // track if it is, so we can set up redirects for it anyway
- // (e.g. the user might have enabled on-demand TLS); we use
- // an empty string to indicate a catch-all, which we have to
- // treat special later
- if len(serverDomainSet) == 0 {
- redirDomains[""] = append(redirDomains[""], addr)
- continue
- }
-
- // ...and associate it with each domain in this server
- for d := range serverDomainSet {
- // if this domain is used on more than one HTTPS-enabled
- // port, we'll have to choose one, so prefer the HTTPS port
- if _, ok := redirDomains[d]; !ok ||
- addr.StartPort == uint(app.httpsPort()) {
- redirDomains[d] = append(redirDomains[d], addr)
- }
- }
- }
- }
-
- // we now have a list of all the unique names for which we need certs;
- // turn the set into a slice so that phase 2 can use it
- app.allCertDomains = make([]string, 0, len(uniqueDomainsForCerts))
- var internal []string
-uniqueDomainsLoop:
- for d := range uniqueDomainsForCerts {
- // whether or not there is already an automation policy for this
- // name, we should add it to the list to manage a cert for it
- app.allCertDomains = append(app.allCertDomains, d)
-
- // some names we've found might already have automation policies
- // explicitly specified for them; we should exclude those from
- // our hidden/implicit policy, since applying a name to more than
- // one automation policy would be confusing and an error
- if app.tlsApp.Automation != nil {
- for _, ap := range app.tlsApp.Automation.Policies {
- for _, apHost := range ap.Subjects {
- if apHost == d {
- continue uniqueDomainsLoop
- }
- }
- }
- }
-
- // if no automation policy exists for the name yet, we
- // will associate it with an implicit one
- if !certmagic.SubjectQualifiesForPublicCert(d) {
- internal = append(internal, d)
- }
- }
-
- // ensure there is an automation policy to handle these certs
- err := app.createAutomationPolicies(ctx, internal)
- if err != nil {
- return err
- }
-
- // we need to reduce the mapping, i.e. group domains by address
- // since new routes are appended to servers by their address
- domainsByAddr := make(map[string][]string)
- for domain, addrs := range redirDomains {
- for _, addr := range addrs {
- addrStr := addr.String()
- domainsByAddr[addrStr] = append(domainsByAddr[addrStr], domain)
- }
- }
-
- // these keep track of the redirect server address(es)
- // and the routes for those servers which actually
- // respond with the redirects
- redirServerAddrs := make(map[string]struct{})
- redirServers := make(map[string][]Route)
- var redirRoutes RouteList
-
- for addrStr, domains := range domainsByAddr {
- // build the matcher set for this redirect route; (note that we happen
- // to bypass Provision and Validate steps for these matcher modules)
- matcherSet := MatcherSet{MatchProtocol("http")}
- // match on known domain names, unless it's our special case of a
- // catch-all which is an empty string (common among catch-all sites
- // that enable on-demand TLS for yet-unknown domain names)
- if !(len(domains) == 1 && domains[0] == "") {
- matcherSet = append(matcherSet, MatchHost(domains))
- }
-
- addr, err := caddy.ParseNetworkAddress(addrStr)
- if err != nil {
- return err
- }
- redirRoute := app.makeRedirRoute(addr.StartPort, matcherSet)
-
- // use the network/host information from the address,
- // but change the port to the HTTP port then rebuild
- redirAddr := addr
- redirAddr.StartPort = uint(app.httpPort())
- redirAddr.EndPort = redirAddr.StartPort
- redirAddrStr := redirAddr.String()
-
- redirServers[redirAddrStr] = append(redirServers[redirAddrStr], redirRoute)
- }
-
- // on-demand TLS means that hostnames may be used which are not
- // explicitly defined in the config, and we still need to redirect
- // those; so we can append a single catch-all route (notice there
- // is no Host matcher) after the other redirect routes which will
- // allow us to handle unexpected/new hostnames... however, it's
- // not entirely clear what the redirect destination should be,
- // so I'm going to just hard-code the app's HTTPS port and call
- // it good for now...
- // TODO: This implies that all plaintext requests will be blindly
- // redirected to their HTTPS equivalent, even if this server
- // doesn't handle that hostname at all; I don't think this is a
- // bad thing, and it also obscures the actual hostnames that this
- // server is configured to match on, which may be desirable, but
- // it's not something that should be relied on. We can change this
- // if we want to.
- appendCatchAll := func(routes []Route) []Route {
- return append(routes, app.makeRedirRoute(uint(app.httpsPort()), MatcherSet{MatchProtocol("http")}))
- }
-
-redirServersLoop:
- for redirServerAddr, routes := range redirServers {
- // for each redirect listener, see if there's already a
- // server configured to listen on that exact address; if so,
- // insert the redirect route to the end of its route list
- // after any other routes with host matchers; otherwise,
- // we'll create a new server for all the listener addresses
- // that are unused and serve the remaining redirects from it
- for _, srv := range app.Servers {
- if srv.hasListenerAddress(redirServerAddr) {
- // find the index of the route after the last route with a host
- // matcher, then insert the redirects there, but before any
- // user-defined catch-all routes
- // see https://github.com/caddyserver/caddy/issues/3212
- insertIndex := srv.findLastRouteWithHostMatcher()
- srv.Routes = append(srv.Routes[:insertIndex], append(routes, srv.Routes[insertIndex:]...)...)
-
- // append our catch-all route in case the user didn't define their own
- srv.Routes = appendCatchAll(srv.Routes)
-
- continue redirServersLoop
- }
- }
-
- // no server with this listener address exists;
- // save this address and route for custom server
- redirServerAddrs[redirServerAddr] = struct{}{}
- redirRoutes = append(redirRoutes, routes...)
- }
-
- // if there are routes remaining which do not belong
- // in any existing server, make our own to serve the
- // rest of the redirects
- if len(redirServerAddrs) > 0 {
- redirServerAddrsList := make([]string, 0, len(redirServerAddrs))
- for a := range redirServerAddrs {
- redirServerAddrsList = append(redirServerAddrsList, a)
- }
- app.Servers["remaining_auto_https_redirects"] = &Server{
- Listen: redirServerAddrsList,
- Routes: appendCatchAll(redirRoutes),
- }
- }
-
- return nil
-}
-
-func (app *App) makeRedirRoute(redirToPort uint, matcherSet MatcherSet) Route {
- redirTo := "https://{http.request.host}"
-
- // since this is an external redirect, we should only append an explicit
- // port if we know it is not the officially standardized HTTPS port, and,
- // notably, also not the port that Caddy thinks is the HTTPS port (the
- // configurable HTTPSPort parameter) - we can't change the standard HTTPS
- // port externally, so that config parameter is for internal use only;
- // we also do not append the port if it happens to be the HTTP port as
- // well, obviously (for example, user defines the HTTP port explicitly
- // in the list of listen addresses for a server)
- if redirToPort != uint(app.httpPort()) &&
- redirToPort != uint(app.httpsPort()) &&
- redirToPort != DefaultHTTPPort &&
- redirToPort != DefaultHTTPSPort {
- redirTo += ":" + strconv.Itoa(int(redirToPort))
- }
-
- redirTo += "{http.request.uri}"
- return Route{
- MatcherSets: []MatcherSet{matcherSet},
- Handlers: []MiddlewareHandler{
- StaticResponse{
- StatusCode: WeakString(strconv.Itoa(http.StatusPermanentRedirect)),
- Headers: http.Header{
- "Location": []string{redirTo},
- },
- Close: true,
- },
- },
- }
-}
-
-// createAutomationPolicy ensures that automated certificates for this
-// app are managed properly. This adds up to two automation policies:
-// one for the public names, and one for the internal names. If a catch-all
-// automation policy exists, it will be shallow-copied and used as the
-// base for the new ones (this is important for preserving behavior the
-// user intends to be "defaults").
-func (app *App) createAutomationPolicies(ctx caddy.Context, internalNames []string) error {
- // before we begin, loop through the existing automation policies
- // and, for any ACMEIssuers we find, make sure they're filled in
- // with default values that might be specified in our HTTP app; also
- // look for a base (or "catch-all" / default) automation policy,
- // which we're going to essentially require, to make sure it has
- // those defaults, too
- var basePolicy *caddytls.AutomationPolicy
- var foundBasePolicy bool
- if app.tlsApp.Automation == nil {
- // we will expect this to not be nil from now on
- app.tlsApp.Automation = new(caddytls.AutomationConfig)
- }
- for _, ap := range app.tlsApp.Automation.Policies {
- // set up default issuer -- honestly, this is only
- // really necessary because the HTTP app is opinionated
- // and has settings which could be inferred as new
- // defaults for the ACMEIssuer in the TLS app (such as
- // what the HTTP and HTTPS ports are)
- if ap.Issuers == nil {
- var err error
- ap.Issuers, err = caddytls.DefaultIssuersProvisioned(ctx)
- if err != nil {
- return err
- }
- }
- for _, iss := range ap.Issuers {
- if acmeIssuer, ok := iss.(acmeCapable); ok {
- err := app.fillInACMEIssuer(acmeIssuer.GetACMEIssuer())
- if err != nil {
- return err
- }
- }
- }
-
- // while we're here, is this the catch-all/base policy?
- if !foundBasePolicy && len(ap.Subjects) == 0 {
- basePolicy = ap
- foundBasePolicy = true
- }
- }
-
- if basePolicy == nil {
- // no base policy found, we will make one!
- basePolicy = new(caddytls.AutomationPolicy)
- }
-
- // if the basePolicy has an existing ACMEIssuer (particularly to
- // include any type that embeds/wraps an ACMEIssuer), let's use it
- // (I guess we just use the first one?), otherwise we'll make one
- var baseACMEIssuer *caddytls.ACMEIssuer
- for _, iss := range basePolicy.Issuers {
- if acmeWrapper, ok := iss.(acmeCapable); ok {
- baseACMEIssuer = acmeWrapper.GetACMEIssuer()
- break
- }
- }
- if baseACMEIssuer == nil {
- // note that this happens if basePolicy.Issuer is nil
- // OR if it is not nil but is not an ACMEIssuer
- baseACMEIssuer = new(caddytls.ACMEIssuer)
- }
-
- // if there was a base policy to begin with, we already
- // filled in its issuer's defaults; if there wasn't, we
- // still need to do that
- if !foundBasePolicy {
- err := app.fillInACMEIssuer(baseACMEIssuer)
- if err != nil {
- return err
- }
- }
-
- // never overwrite any other issuer that might already be configured
- if basePolicy.Issuers == nil {
- var err error
- basePolicy.Issuers, err = caddytls.DefaultIssuersProvisioned(ctx)
- if err != nil {
- return err
- }
- for _, iss := range basePolicy.Issuers {
- if acmeIssuer, ok := iss.(acmeCapable); ok {
- err := app.fillInACMEIssuer(acmeIssuer.GetACMEIssuer())
- if err != nil {
- return err
- }
- }
- }
- }
-
- if !foundBasePolicy {
- // there was no base policy to begin with, so add
- // our base/catch-all policy - this will serve the
- // public-looking names as well as any other names
- // that don't match any other policy
- err := app.tlsApp.AddAutomationPolicy(basePolicy)
- if err != nil {
- return err
- }
- } else {
- // a base policy already existed; we might have
- // changed it, so re-provision it
- err := basePolicy.Provision(app.tlsApp)
- if err != nil {
- return err
- }
- }
-
- // public names will be taken care of by the base (catch-all)
- // policy, which we've ensured exists if not already specified;
- // internal names, however, need to be handled by an internal
- // issuer, which we need to make a new policy for, scoped to
- // just those names (yes, this logic is a bit asymmetric, but
- // it works, because our assumed/natural default issuer is an
- // ACME issuer)
- if len(internalNames) > 0 {
- internalIssuer := new(caddytls.InternalIssuer)
-
- // shallow-copy the base policy; we want to inherit
- // from it, not replace it... this takes two lines to
- // overrule compiler optimizations
- policyCopy := *basePolicy
- newPolicy := &policyCopy
-
- // very important to provision the issuer, since we
- // are bypassing the JSON-unmarshaling step
- if err := internalIssuer.Provision(ctx); err != nil {
- return err
- }
-
- // this policy should apply only to the given names
- // and should use our issuer -- yes, this overrides
- // any issuer that may have been set in the base
- // policy, but we do this because these names do not
- // already have a policy associated with them, which
- // is easy to do; consider the case of a Caddyfile
- // that has only "localhost" as a name, but sets the
- // default/global ACME CA to the Let's Encrypt staging
- // endpoint... they probably don't intend to change the
- // fundamental set of names that setting applies to,
- // rather they just want to change the CA for the set
- // of names that would normally use the production API;
- // anyway, that gets into the weeds a bit...
- newPolicy.Subjects = internalNames
- newPolicy.Issuers = []certmagic.Issuer{internalIssuer}
- err := app.tlsApp.AddAutomationPolicy(newPolicy)
- if err != nil {
- return err
- }
- }
-
- // we just changed a lot of stuff, so double-check that it's all good
- err := app.tlsApp.Validate()
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// fillInACMEIssuer fills in default values into acmeIssuer that
-// are defined in app; these values at time of writing are just
-// app.HTTPPort and app.HTTPSPort, which are used by ACMEIssuer.
-// Sure, we could just use the global/CertMagic defaults, but if
-// a user has configured those ports in the HTTP app, it makes
-// sense to use them in the TLS app too, even if they forgot (or
-// were too lazy, like me) to set it in each automation policy
-// that uses it -- this just makes things a little less tedious
-// for the user, so they don't have to repeat those ports in
-// potentially many places. This function never steps on existing
-// config values. If any changes are made, acmeIssuer is
-// reprovisioned. acmeIssuer must not be nil.
-func (app *App) fillInACMEIssuer(acmeIssuer *caddytls.ACMEIssuer) error {
- if app.HTTPPort > 0 || app.HTTPSPort > 0 {
- if acmeIssuer.Challenges == nil {
- acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
- }
- }
- if app.HTTPPort > 0 {
- if acmeIssuer.Challenges.HTTP == nil {
- acmeIssuer.Challenges.HTTP = new(caddytls.HTTPChallengeConfig)
- }
- // don't overwrite existing explicit config
- if acmeIssuer.Challenges.HTTP.AlternatePort == 0 {
- acmeIssuer.Challenges.HTTP.AlternatePort = app.HTTPPort
- }
- }
- if app.HTTPSPort > 0 {
- if acmeIssuer.Challenges.TLSALPN == nil {
- acmeIssuer.Challenges.TLSALPN = new(caddytls.TLSALPNChallengeConfig)
- }
- // don't overwrite existing explicit config
- if acmeIssuer.Challenges.TLSALPN.AlternatePort == 0 {
- acmeIssuer.Challenges.TLSALPN.AlternatePort = app.HTTPSPort
- }
- }
- // we must provision all ACME issuers, even if nothing
- // was changed, because we don't know if they are new
- // and haven't been provisioned yet; if an ACME issuer
- // never gets provisioned, its Agree field stays false,
- // which leads to, um, problems later on
- return acmeIssuer.Provision(app.ctx)
-}
-
-// automaticHTTPSPhase2 begins certificate management for
-// all names in the qualifying domain set for each server.
-// This phase must occur after provisioning and at the end
-// of app start, after all the servers have been started.
-// Doing this last ensures that there won't be any race
-// for listeners on the HTTP or HTTPS ports when management
-// is async (if CertMagic's solvers bind to those ports
-// first, then our servers would fail to bind to them,
-// which would be bad, since CertMagic's bindings are
-// temporary and don't serve the user's sites!).
-func (app *App) automaticHTTPSPhase2() error {
- if len(app.allCertDomains) == 0 {
- return nil
- }
- app.logger.Info("enabling automatic TLS certificate management",
- zap.Strings("domains", app.allCertDomains),
- )
- err := app.tlsApp.Manage(app.allCertDomains)
- if err != nil {
- return fmt.Errorf("managing certificates for %v: %s", app.allCertDomains, err)
- }
- app.allCertDomains = nil // no longer needed; allow GC to deallocate
- return nil
-}
-
-type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer }
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/caddyhttp.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/caddyhttp.go
deleted file mode 100644
index 784b2b90..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/caddyhttp.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "bytes"
- "encoding/json"
- "io"
- "net"
- "net/http"
- "path/filepath"
- "strconv"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
-)
-
-func init() {
- caddy.RegisterModule(tlsPlaceholderWrapper{})
-}
-
-// RequestMatcher is a type that can match to a request.
-// A route matcher MUST NOT modify the request, with the
-// only exception being its context.
-type RequestMatcher interface {
- Match(*http.Request) bool
-}
-
-// Handler is like http.Handler except ServeHTTP may return an error.
-//
-// If any handler encounters an error, it should be returned for proper
-// handling. Return values should be propagated down the middleware chain
-// by returning it unchanged. Returned errors should not be re-wrapped
-// if they are already HandlerError values.
-type Handler interface {
- ServeHTTP(http.ResponseWriter, *http.Request) error
-}
-
-// HandlerFunc is a convenience type like http.HandlerFunc.
-type HandlerFunc func(http.ResponseWriter, *http.Request) error
-
-// ServeHTTP implements the Handler interface.
-func (f HandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error {
- return f(w, r)
-}
-
-// Middleware chains one Handler to the next by being passed
-// the next Handler in the chain.
-type Middleware func(Handler) Handler
-
-// MiddlewareHandler is like Handler except it takes as a third
-// argument the next handler in the chain. The next handler will
-// never be nil, but may be a no-op handler if this is the last
-// handler in the chain. Handlers which act as middleware should
-// call the next handler's ServeHTTP method so as to propagate
-// the request down the chain properly. Handlers which act as
-// responders (content origins) need not invoke the next handler,
-// since the last handler in the chain should be the first to
-// write the response.
-type MiddlewareHandler interface {
- ServeHTTP(http.ResponseWriter, *http.Request, Handler) error
-}
-
-// emptyHandler is used as a no-op handler.
-var emptyHandler Handler = HandlerFunc(func(http.ResponseWriter, *http.Request) error { return nil })
-
-// An implicit suffix middleware that, if reached, sets the StatusCode to the
-// error stored in the ErrorCtxKey. This is to prevent situations where the
-// Error chain does not actually handle the error (for instance, it matches only
-// on some errors). See #3053
-var errorEmptyHandler Handler = HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
- httpError := r.Context().Value(ErrorCtxKey)
- if handlerError, ok := httpError.(HandlerError); ok {
- w.WriteHeader(handlerError.StatusCode)
- } else {
- w.WriteHeader(http.StatusInternalServerError)
- }
- return nil
-})
-
-// ResponseHandler pairs a response matcher with custom handling
-// logic. Either the status code can be changed to something else
-// while using the original response body, or, if a status code
-// is not set, it can execute a custom route list; this is useful
-// for executing handler routes based on the properties of an HTTP
-// response that has not been written out to the client yet.
-//
-// To use this type, provision it at module load time, then when
-// ready to use, match the response against its matcher; if it
-// matches (or doesn't have a matcher), change the status code on
-// the response if configured; otherwise invoke the routes by
-// calling `rh.Routes.Compile(next).ServeHTTP(rw, req)` (or similar).
-type ResponseHandler struct {
- // The response matcher for this handler. If empty/nil,
- // it always matches.
- Match *ResponseMatcher `json:"match,omitempty"`
-
- // To write the original response body but with a different
- // status code, set this field to the desired status code.
- // If set, this takes priority over routes.
- StatusCode WeakString `json:"status_code,omitempty"`
-
- // The list of HTTP routes to execute if no status code is
- // specified. If evaluated, the original response body
- // will not be written.
- Routes RouteList `json:"routes,omitempty"`
-}
-
-// Provision sets up the routse in rh.
-func (rh *ResponseHandler) Provision(ctx caddy.Context) error {
- if rh.Routes != nil {
- err := rh.Routes.Provision(ctx)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// WeakString is a type that unmarshals any JSON value
-// as a string literal, with the following exceptions:
-//
-// 1. actual string values are decoded as strings; and
-// 2. null is decoded as empty string;
-//
-// and provides methods for getting the value as various
-// primitive types. However, using this type removes any
-// type safety as far as deserializing JSON is concerned.
-type WeakString string
-
-// UnmarshalJSON satisfies json.Unmarshaler according to
-// this type's documentation.
-func (ws *WeakString) UnmarshalJSON(b []byte) error {
- if len(b) == 0 {
- return io.EOF
- }
- if b[0] == byte('"') && b[len(b)-1] == byte('"') {
- var s string
- err := json.Unmarshal(b, &s)
- if err != nil {
- return err
- }
- *ws = WeakString(s)
- return nil
- }
- if bytes.Equal(b, []byte("null")) {
- return nil
- }
- *ws = WeakString(b)
- return nil
-}
-
-// MarshalJSON marshals was a boolean if true or false,
-// a number if an integer, or a string otherwise.
-func (ws WeakString) MarshalJSON() ([]byte, error) {
- if ws == "true" {
- return []byte("true"), nil
- }
- if ws == "false" {
- return []byte("false"), nil
- }
- if num, err := strconv.Atoi(string(ws)); err == nil {
- return json.Marshal(num)
- }
- return json.Marshal(string(ws))
-}
-
-// Int returns ws as an integer. If ws is not an
-// integer, 0 is returned.
-func (ws WeakString) Int() int {
- num, _ := strconv.Atoi(string(ws))
- return num
-}
-
-// Float64 returns ws as a float64. If ws is not a
-// float value, the zero value is returned.
-func (ws WeakString) Float64() float64 {
- num, _ := strconv.ParseFloat(string(ws), 64)
- return num
-}
-
-// Bool returns ws as a boolean. If ws is not a
-// boolean, false is returned.
-func (ws WeakString) Bool() bool {
- return string(ws) == "true"
-}
-
-// String returns ws as a string.
-func (ws WeakString) String() string {
- return string(ws)
-}
-
-// StatusCodeMatches returns true if a real HTTP status code matches
-// the configured status code, which may be either a real HTTP status
-// code or an integer representing a class of codes (e.g. 4 for all
-// 4xx statuses).
-func StatusCodeMatches(actual, configured int) bool {
- if actual == configured {
- return true
- }
- if configured < 100 &&
- actual >= configured*100 &&
- actual < (configured+1)*100 {
- return true
- }
- return false
-}
-
-// SanitizedPathJoin performs filepath.Join(root, reqPath) that
-// is safe against directory traversal attacks. It uses logic
-// similar to that in the Go standard library, specifically
-// in the implementation of http.Dir. The root is assumed to
-// be a trusted path, but reqPath is not; and the output will
-// never be outside of root. The resulting path can be used
-// with the local file system.
-func SanitizedPathJoin(root, reqPath string) string {
- if root == "" {
- root = "."
- }
-
- path := filepath.Join(root, filepath.Clean("/"+reqPath))
-
- // filepath.Join also cleans the path, and cleaning strips
- // the trailing slash, so we need to re-add it afterwards.
- // if the length is 1, then it's a path to the root,
- // and that should return ".", so we don't append the separator.
- if strings.HasSuffix(reqPath, "/") && len(reqPath) > 1 {
- path += separator
- }
-
- return path
-}
-
-// tlsPlaceholderWrapper is a no-op listener wrapper that marks
-// where the TLS listener should be in a chain of listener wrappers.
-// It should only be used if another listener wrapper must be placed
-// in front of the TLS handshake.
-type tlsPlaceholderWrapper struct{}
-
-func (tlsPlaceholderWrapper) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "caddy.listeners.tls",
- New: func() caddy.Module { return new(tlsPlaceholderWrapper) },
- }
-}
-
-func (tlsPlaceholderWrapper) WrapListener(ln net.Listener) net.Listener { return ln }
-
-func (tlsPlaceholderWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { return nil }
-
-const (
- // DefaultHTTPPort is the default port for HTTP.
- DefaultHTTPPort = 80
-
- // DefaultHTTPSPort is the default port for HTTPS.
- DefaultHTTPSPort = 443
-)
-
-const separator = string(filepath.Separator)
-
-// Interface guard
-var _ caddy.ListenerWrapper = (*tlsPlaceholderWrapper)(nil)
-var _ caddyfile.Unmarshaler = (*tlsPlaceholderWrapper)(nil)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/celmatcher.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/celmatcher.go
deleted file mode 100644
index d7d55d84..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/celmatcher.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "crypto/x509/pkix"
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "regexp"
- "strings"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/google/cel-go/cel"
- "github.com/google/cel-go/checker/decls"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
- "github.com/google/cel-go/ext"
- "github.com/google/cel-go/interpreter/functions"
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
- "google.golang.org/protobuf/proto"
-)
-
-func init() {
- caddy.RegisterModule(MatchExpression{})
-}
-
-// MatchExpression matches requests by evaluating a
-// [CEL](https://github.com/google/cel-spec) expression.
-// This enables complex logic to be expressed using a comfortable,
-// familiar syntax. Please refer to
-// [the standard definitions of CEL functions and operators](https://github.com/google/cel-spec/blob/master/doc/langdef.md#standard-definitions).
-//
-// This matcher's JSON interface is actually a string, not a struct.
-// The generated docs are not correct because this type has custom
-// marshaling logic.
-//
-// COMPATIBILITY NOTE: This module is still experimental and is not
-// subject to Caddy's compatibility guarantee.
-type MatchExpression struct {
- // The CEL expression to evaluate. Any Caddy placeholders
- // will be expanded and situated into proper CEL function
- // calls before evaluating.
- Expr string
-
- expandedExpr string
- prg cel.Program
- ta ref.TypeAdapter
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchExpression) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.expression",
- New: func() caddy.Module { return new(MatchExpression) },
- }
-}
-
-// MarshalJSON marshals m's expression.
-func (m MatchExpression) MarshalJSON() ([]byte, error) {
- return json.Marshal(m.Expr)
-}
-
-// UnmarshalJSON unmarshals m's expression.
-func (m *MatchExpression) UnmarshalJSON(data []byte) error {
- return json.Unmarshal(data, &m.Expr)
-}
-
-// Provision sets ups m.
-func (m *MatchExpression) Provision(_ caddy.Context) error {
- // replace placeholders with a function call - this is just some
- // light (and possibly naïve) syntactic sugar
- m.expandedExpr = placeholderRegexp.ReplaceAllString(m.Expr, placeholderExpansion)
-
- // our type adapter expands CEL's standard type support
- m.ta = celTypeAdapter{}
-
- // create the CEL environment
- env, err := cel.NewEnv(
- cel.Declarations(
- decls.NewVar("request", httpRequestObjectType),
- decls.NewFunction(placeholderFuncName,
- decls.NewOverload(placeholderFuncName+"_httpRequest_string",
- []*exprpb.Type{httpRequestObjectType, decls.String},
- decls.Any)),
- ),
- cel.CustomTypeAdapter(m.ta),
- ext.Strings(),
- )
- if err != nil {
- return fmt.Errorf("setting up CEL environment: %v", err)
- }
-
- // parse and type-check the expression
- checked, issues := env.Compile(m.expandedExpr)
- if issues != nil && issues.Err() != nil {
- return fmt.Errorf("compiling CEL program: %s", issues.Err())
- }
-
- // request matching is a boolean operation, so we don't really know
- // what to do if the expression returns a non-boolean type
- if !proto.Equal(checked.ResultType(), decls.Bool) {
- return fmt.Errorf("CEL request matcher expects return type of bool, not %s", checked.ResultType())
- }
-
- // compile the "program"
- m.prg, err = env.Program(checked,
- cel.Functions(
- &functions.Overload{
- Operator: placeholderFuncName,
- Binary: m.caddyPlaceholderFunc,
- },
- ),
- )
-
- if err != nil {
- return fmt.Errorf("compiling CEL program: %s", err)
- }
- return nil
-}
-
-// Match returns true if r matches m.
-func (m MatchExpression) Match(r *http.Request) bool {
- out, _, _ := m.prg.Eval(map[string]interface{}{
- "request": celHTTPRequest{r},
- })
- if outBool, ok := out.Value().(bool); ok {
- return outBool
- }
- return false
-
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchExpression) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- m.Expr = strings.Join(d.RemainingArgs(), " ")
- }
- return nil
-}
-
-// caddyPlaceholderFunc implements the custom CEL function that accesses the
-// Replacer on a request and gets values from it.
-func (m MatchExpression) caddyPlaceholderFunc(lhs, rhs ref.Val) ref.Val {
- celReq, ok := lhs.(celHTTPRequest)
- if !ok {
- return types.NewErr(
- "invalid request of type '%v' to "+placeholderFuncName+"(request, placeholderVarName)",
- lhs.Type())
- }
- phStr, ok := rhs.(types.String)
- if !ok {
- return types.NewErr(
- "invalid placeholder variable name of type '%v' to "+placeholderFuncName+"(request, placeholderVarName)",
- rhs.Type())
- }
-
- repl := celReq.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- val, _ := repl.Get(string(phStr))
-
- return m.ta.NativeToValue(val)
-}
-
-// httpRequestCELType is the type representation of a native HTTP request.
-var httpRequestCELType = types.NewTypeValue("http.Request", traits.ReceiverType)
-
-// cellHTTPRequest wraps an http.Request with
-// methods to satisfy the ref.Val interface.
-type celHTTPRequest struct{ *http.Request }
-
-func (cr celHTTPRequest) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- return cr.Request, nil
-}
-func (celHTTPRequest) ConvertToType(typeVal ref.Type) ref.Val {
- panic("not implemented")
-}
-func (cr celHTTPRequest) Equal(other ref.Val) ref.Val {
- if o, ok := other.Value().(celHTTPRequest); ok {
- return types.Bool(o.Request == cr.Request)
- }
- return types.ValOrErr(other, "%v is not comparable type", other)
-}
-func (celHTTPRequest) Type() ref.Type { return httpRequestCELType }
-func (cr celHTTPRequest) Value() interface{} { return cr }
-
-var pkixNameCELType = types.NewTypeValue("pkix.Name", traits.ReceiverType)
-
-// celPkixName wraps an pkix.Name with
-// methods to satisfy the ref.Val interface.
-type celPkixName struct{ *pkix.Name }
-
-func (pn celPkixName) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- return pn.Name, nil
-}
-func (celPkixName) ConvertToType(typeVal ref.Type) ref.Val {
- panic("not implemented")
-}
-func (pn celPkixName) Equal(other ref.Val) ref.Val {
- if o, ok := other.Value().(string); ok {
- return types.Bool(pn.Name.String() == o)
- }
- return types.ValOrErr(other, "%v is not comparable type", other)
-}
-func (celPkixName) Type() ref.Type { return pkixNameCELType }
-func (pn celPkixName) Value() interface{} { return pn }
-
-// celTypeAdapter can adapt our custom types to a CEL value.
-type celTypeAdapter struct{}
-
-func (celTypeAdapter) NativeToValue(value interface{}) ref.Val {
- switch v := value.(type) {
- case celHTTPRequest:
- return v
- case pkix.Name:
- return celPkixName{&v}
- case time.Time:
- return types.Timestamp{Time: v}
- case error:
- types.NewErr(v.Error())
- }
- return types.DefaultTypeAdapter.NativeToValue(value)
-}
-
-// Variables used for replacing Caddy placeholders in CEL
-// expressions with a proper CEL function call; this is
-// just for syntactic sugar.
-var (
- placeholderRegexp = regexp.MustCompile(`{([\w.-]+)}`)
- placeholderExpansion = `caddyPlaceholder(request, "${1}")`
-)
-
-var httpRequestObjectType = decls.NewObjectType("http.Request")
-
-// The name of the CEL function which accesses Replacer values.
-const placeholderFuncName = "caddyPlaceholder"
-
-// Interface guards
-var (
- _ caddy.Provisioner = (*MatchExpression)(nil)
- _ RequestMatcher = (*MatchExpression)(nil)
- _ caddyfile.Unmarshaler = (*MatchExpression)(nil)
- _ json.Marshaler = (*MatchExpression)(nil)
- _ json.Unmarshaler = (*MatchExpression)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/errors.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/errors.go
deleted file mode 100644
index 85dc3dfb..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/errors.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "fmt"
- weakrand "math/rand"
- "path"
- "runtime"
- "strings"
- "time"
-
- "github.com/caddyserver/caddy/v2"
-)
-
-func init() {
- weakrand.Seed(time.Now().UnixNano())
-}
-
-// Error is a convenient way for a Handler to populate the
-// essential fields of a HandlerError. If err is itself a
-// HandlerError, then any essential fields that are not
-// set will be populated.
-func Error(statusCode int, err error) HandlerError {
- const idLen = 9
- if he, ok := err.(HandlerError); ok {
- if he.ID == "" {
- he.ID = randString(idLen, true)
- }
- if he.Trace == "" {
- he.Trace = trace()
- }
- if he.StatusCode == 0 {
- he.StatusCode = statusCode
- }
- return he
- }
- return HandlerError{
- ID: randString(idLen, true),
- StatusCode: statusCode,
- Err: err,
- Trace: trace(),
- }
-}
-
-// HandlerError is a serializable representation of
-// an error from within an HTTP handler.
-type HandlerError struct {
- Err error // the original error value and message
- StatusCode int // the HTTP status code to associate with this error
-
- ID string // generated; for identifying this error in logs
- Trace string // produced from call stack
-}
-
-func (e HandlerError) Error() string {
- var s string
- if e.ID != "" {
- s += fmt.Sprintf("{id=%s}", e.ID)
- }
- if e.Trace != "" {
- s += " " + e.Trace
- }
- if e.StatusCode != 0 {
- s += fmt.Sprintf(": HTTP %d", e.StatusCode)
- }
- if e.Err != nil {
- s += ": " + e.Err.Error()
- }
- return strings.TrimSpace(s)
-}
-
-// randString returns a string of n random characters.
-// It is not even remotely secure OR a proper distribution.
-// But it's good enough for some things. It excludes certain
-// confusing characters like I, l, 1, 0, O, etc. If sameCase
-// is true, then uppercase letters are excluded.
-func randString(n int, sameCase bool) string {
- if n <= 0 {
- return ""
- }
- dict := []byte("abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRTUVWXY23456789")
- if sameCase {
- dict = []byte("abcdefghijkmnpqrstuvwxyz0123456789")
- }
- b := make([]byte, n)
- for i := range b {
- //nolint:gosec
- b[i] = dict[weakrand.Int63()%int64(len(dict))]
- }
- return string(b)
-}
-
-func trace() string {
- if pc, file, line, ok := runtime.Caller(2); ok {
- filename := path.Base(file)
- pkgAndFuncName := path.Base(runtime.FuncForPC(pc).Name())
- return fmt.Sprintf("%s (%s:%d)", pkgAndFuncName, filename, line)
- }
- return ""
-}
-
-// ErrorCtxKey is the context key to use when storing
-// an error (for use with context.Context).
-const ErrorCtxKey = caddy.CtxKey("handler_chain_error")
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/caddyfile.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/caddyfile.go
deleted file mode 100644
index c6ea2fb0..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/caddyfile.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package headers
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "strings"
-
- "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
-)
-
-func init() {
- httpcaddyfile.RegisterDirective("header", parseCaddyfile)
- httpcaddyfile.RegisterDirective("request_header", parseReqHdrCaddyfile)
-}
-
-// parseCaddyfile sets up the handler for response headers from
-// Caddyfile tokens. Syntax:
-//
-// header [] [[+|-|?] [] []] {
-// [+] [ []]
-// ?
-// -
-// [defer]
-// }
-//
-// Either a block can be opened or a single header field can be configured
-// in the first line, but not both in the same directive. Header operations
-// are deferred to write-time if any headers are being deleted or if the
-// 'defer' subdirective is used. + appends a header value, - deletes a field,
-// and ? conditionally sets a value only if the header field is not already
-// set.
-func parseCaddyfile(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) {
- if !h.Next() {
- return nil, h.ArgErr()
- }
-
- matcherSet, err := h.ExtractMatcherSet()
- if err != nil {
- return nil, err
- }
-
- makeHandler := func() Handler {
- return Handler{
- Response: &RespHeaderOps{
- HeaderOps: &HeaderOps{},
- },
- }
- }
- handler, handlerWithRequire := makeHandler(), makeHandler()
-
- for h.Next() {
- // first see if headers are in the initial line
- var hasArgs bool
- if h.NextArg() {
- hasArgs = true
- field := h.Val()
- var value, replacement string
- if h.NextArg() {
- value = h.Val()
- }
- if h.NextArg() {
- replacement = h.Val()
- }
- err := applyHeaderOp(
- handler.Response.HeaderOps,
- handler.Response,
- field,
- value,
- replacement,
- )
- if err != nil {
- return nil, h.Err(err.Error())
- }
- if len(handler.Response.HeaderOps.Delete) > 0 {
- handler.Response.Deferred = true
- }
- }
-
- // if not, they should be in a block
- for h.NextBlock(0) {
- field := h.Val()
- if field == "defer" {
- handler.Response.Deferred = true
- continue
- }
- if hasArgs {
- return nil, h.Err("cannot specify headers in both arguments and block") // because it would be weird
- }
-
- // sometimes it is habitual for users to suffix a field name with a colon,
- // as if they were writing a curl command or something; see
- // https://caddy.community/t/v2-reverse-proxy-please-add-cors-example-to-the-docs/7349/19
- field = strings.TrimSuffix(field, ":")
-
- var value, replacement string
- if h.NextArg() {
- value = h.Val()
- }
- if h.NextArg() {
- replacement = h.Val()
- }
-
- handlerToUse := handler
- if strings.HasPrefix(field, "?") {
- handlerToUse = handlerWithRequire
- }
-
- err := applyHeaderOp(
- handlerToUse.Response.HeaderOps,
- handlerToUse.Response,
- field,
- value,
- replacement,
- )
- if err != nil {
- return nil, h.Err(err.Error())
- }
- }
- }
-
- var configValues []httpcaddyfile.ConfigValue
- if !reflect.DeepEqual(handler, makeHandler()) {
- configValues = append(configValues, h.NewRoute(matcherSet, handler)...)
- }
- if !reflect.DeepEqual(handlerWithRequire, makeHandler()) {
- configValues = append(configValues, h.NewRoute(matcherSet, handlerWithRequire)...)
- }
-
- return configValues, nil
-}
-
-// parseReqHdrCaddyfile sets up the handler for request headers
-// from Caddyfile tokens. Syntax:
-//
-// request_header [] [[+|-] [] []]
-//
-func parseReqHdrCaddyfile(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) {
- if !h.Next() {
- return nil, h.ArgErr()
- }
-
- matcherSet, err := h.ExtractMatcherSet()
- if err != nil {
- return nil, err
- }
-
- configValues := []httpcaddyfile.ConfigValue{}
-
- for h.Next() {
- if !h.NextArg() {
- return nil, h.ArgErr()
- }
- field := h.Val()
-
- hdr := Handler{
- Request: &HeaderOps{},
- }
-
- // sometimes it is habitual for users to suffix a field name with a colon,
- // as if they were writing a curl command or something; see
- // https://caddy.community/t/v2-reverse-proxy-please-add-cors-example-to-the-docs/7349/19
- field = strings.TrimSuffix(field, ":")
-
- var value, replacement string
- if h.NextArg() {
- value = h.Val()
- }
- if h.NextArg() {
- replacement = h.Val()
- if h.NextArg() {
- return nil, h.ArgErr()
- }
- }
-
- if hdr.Request == nil {
- hdr.Request = new(HeaderOps)
- }
- if err := CaddyfileHeaderOp(hdr.Request, field, value, replacement); err != nil {
- return nil, h.Err(err.Error())
- }
-
- configValues = append(configValues, h.NewRoute(matcherSet, hdr)...)
-
- if h.NextArg() {
- return nil, h.ArgErr()
- }
- }
- return configValues, nil
-}
-
-// CaddyfileHeaderOp applies a new header operation according to
-// field, value, and replacement. The field can be prefixed with
-// "+" or "-" to specify adding or removing; otherwise, the value
-// will be set (overriding any previous value). If replacement is
-// non-empty, value will be treated as a regular expression which
-// will be used to search and then replacement will be used to
-// complete the substring replacement; in that case, any + or -
-// prefix to field will be ignored.
-func CaddyfileHeaderOp(ops *HeaderOps, field, value, replacement string) error {
- return applyHeaderOp(ops, nil, field, value, replacement)
-}
-
-func applyHeaderOp(ops *HeaderOps, respHeaderOps *RespHeaderOps, field, value, replacement string) error {
- switch {
- case strings.HasPrefix(field, "+"): // append
- if ops.Add == nil {
- ops.Add = make(http.Header)
- }
- ops.Add.Set(field[1:], value)
-
- case strings.HasPrefix(field, "-"): // delete
- ops.Delete = append(ops.Delete, field[1:])
- if respHeaderOps != nil {
- respHeaderOps.Deferred = true
- }
-
- case strings.HasPrefix(field, "?"): // default (conditional on not existing) - response headers only
- if respHeaderOps == nil {
- return fmt.Errorf("%v: the default header modifier ('?') can only be used on response headers; for conditional manipulation of request headers, use matchers", field)
- }
- if respHeaderOps.Require == nil {
- respHeaderOps.Require = &caddyhttp.ResponseMatcher{
- Headers: make(http.Header),
- }
- }
- field = strings.TrimPrefix(field, "?")
- respHeaderOps.Require.Headers[field] = nil
- if respHeaderOps.Set == nil {
- respHeaderOps.Set = make(http.Header)
- }
- respHeaderOps.Set.Set(field, value)
-
- case replacement != "": // replace
- if ops.Replace == nil {
- ops.Replace = make(map[string][]Replacement)
- }
- field = strings.TrimLeft(field, "+-?")
- ops.Replace[field] = append(
- ops.Replace[field],
- Replacement{
- SearchRegexp: value,
- Replace: replacement,
- },
- )
-
- default: // set (overwrite)
- if ops.Set == nil {
- ops.Set = make(http.Header)
- }
- ops.Set.Set(field, value)
- }
-
- return nil
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/headers.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/headers.go
deleted file mode 100644
index 3571dd92..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/headers.go
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package headers
-
-import (
- "fmt"
- "net/http"
- "regexp"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
-)
-
-func init() {
- caddy.RegisterModule(Handler{})
-}
-
-// Handler is a middleware which modifies request and response headers.
-//
-// Changes to headers are applied immediately, except for the response
-// headers when Deferred is true or when Required is set. In those cases,
-// the changes are applied when the headers are written to the response.
-// Note that deferred changes do not take effect if an error occurs later
-// in the middleware chain.
-//
-// Properties in this module accept placeholders.
-//
-// Response header operations can be conditioned upon response status code
-// and/or other header values.
-type Handler struct {
- Request *HeaderOps `json:"request,omitempty"`
- Response *RespHeaderOps `json:"response,omitempty"`
-}
-
-// CaddyModule returns the Caddy module information.
-func (Handler) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.handlers.headers",
- New: func() caddy.Module { return new(Handler) },
- }
-}
-
-// Provision sets up h's configuration.
-func (h *Handler) Provision(ctx caddy.Context) error {
- if h.Request != nil {
- err := h.Request.Provision(ctx)
- if err != nil {
- return err
- }
- }
- if h.Response != nil {
- err := h.Response.Provision(ctx)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// Validate ensures h's configuration is valid.
-func (h Handler) Validate() error {
- if h.Request != nil {
- err := h.Request.validate()
- if err != nil {
- return err
- }
- }
- if h.Response != nil {
- err := h.Response.validate()
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
-
- if h.Request != nil {
- h.Request.ApplyToRequest(r)
- }
-
- if h.Response != nil {
- if h.Response.Deferred || h.Response.Require != nil {
- w = &responseWriterWrapper{
- ResponseWriterWrapper: &caddyhttp.ResponseWriterWrapper{ResponseWriter: w},
- replacer: repl,
- require: h.Response.Require,
- headerOps: h.Response.HeaderOps,
- }
- } else {
- h.Response.ApplyTo(w.Header(), repl)
- }
- }
-
- return next.ServeHTTP(w, r)
-}
-
-// HeaderOps defines manipulations for HTTP headers.
-type HeaderOps struct {
- // Adds HTTP headers; does not replace any existing header fields.
- Add http.Header `json:"add,omitempty"`
-
- // Sets HTTP headers; replaces existing header fields.
- Set http.Header `json:"set,omitempty"`
-
- // Names of HTTP header fields to delete.
- Delete []string `json:"delete,omitempty"`
-
- // Performs substring replacements of HTTP headers in-situ.
- Replace map[string][]Replacement `json:"replace,omitempty"`
-}
-
-// Provision sets up the header operations.
-func (ops *HeaderOps) Provision(_ caddy.Context) error {
- for fieldName, replacements := range ops.Replace {
- for i, r := range replacements {
- if r.SearchRegexp != "" {
- re, err := regexp.Compile(r.SearchRegexp)
- if err != nil {
- return fmt.Errorf("replacement %d for header field '%s': %v", i, fieldName, err)
- }
- replacements[i].re = re
- }
- }
- }
- return nil
-}
-
-func (ops HeaderOps) validate() error {
- for fieldName, replacements := range ops.Replace {
- for _, r := range replacements {
- if r.Search != "" && r.SearchRegexp != "" {
- return fmt.Errorf("cannot specify both a substring search and a regular expression search for field '%s'", fieldName)
- }
- }
- }
- return nil
-}
-
-// Replacement describes a string replacement,
-// either a simple and fast substring search
-// or a slower but more powerful regex search.
-type Replacement struct {
- // The substring to search for.
- Search string `json:"search,omitempty"`
-
- // The regular expression to search with.
- SearchRegexp string `json:"search_regexp,omitempty"`
-
- // The string with which to replace matches.
- Replace string `json:"replace,omitempty"`
-
- re *regexp.Regexp
-}
-
-// RespHeaderOps defines manipulations for response headers.
-type RespHeaderOps struct {
- *HeaderOps
-
- // If set, header operations will be deferred until
- // they are written out and only performed if the
- // response matches these criteria.
- Require *caddyhttp.ResponseMatcher `json:"require,omitempty"`
-
- // If true, header operations will be deferred until
- // they are written out. Superceded if Require is set.
- // Usually you will need to set this to true if any
- // fields are being deleted.
- Deferred bool `json:"deferred,omitempty"`
-}
-
-// ApplyTo applies ops to hdr using repl.
-func (ops HeaderOps) ApplyTo(hdr http.Header, repl *caddy.Replacer) {
- // add
- for fieldName, vals := range ops.Add {
- fieldName = repl.ReplaceAll(fieldName, "")
- for _, v := range vals {
- hdr.Add(fieldName, repl.ReplaceAll(v, ""))
- }
- }
-
- // set
- for fieldName, vals := range ops.Set {
- fieldName = repl.ReplaceAll(fieldName, "")
- var newVals []string
- for i := range vals {
- // append to new slice so we don't overwrite
- // the original values in ops.Set
- newVals = append(newVals, repl.ReplaceAll(vals[i], ""))
- }
- hdr.Set(fieldName, strings.Join(newVals, ","))
- }
-
- // delete
- for _, fieldName := range ops.Delete {
- hdr.Del(repl.ReplaceAll(fieldName, ""))
- }
-
- // replace
- for fieldName, replacements := range ops.Replace {
- fieldName = repl.ReplaceAll(fieldName, "")
-
- // all fields...
- if fieldName == "*" {
- for _, r := range replacements {
- search := repl.ReplaceAll(r.Search, "")
- replace := repl.ReplaceAll(r.Replace, "")
- for fieldName, vals := range hdr {
- for i := range vals {
- if r.re != nil {
- hdr[fieldName][i] = r.re.ReplaceAllString(hdr[fieldName][i], replace)
- } else {
- hdr[fieldName][i] = strings.ReplaceAll(hdr[fieldName][i], search, replace)
- }
- }
- }
- }
- continue
- }
-
- // ...or only with the named field
- for _, r := range replacements {
- search := repl.ReplaceAll(r.Search, "")
- replace := repl.ReplaceAll(r.Replace, "")
- for i := range hdr[fieldName] {
- if r.re != nil {
- hdr[fieldName][i] = r.re.ReplaceAllString(hdr[fieldName][i], replace)
- } else {
- hdr[fieldName][i] = strings.ReplaceAll(hdr[fieldName][i], search, replace)
- }
- }
- }
- }
-}
-
-// ApplyToRequest applies ops to r, specially handling the Host
-// header which the standard library does not include with the
-// header map with all the others. This method mutates r.Host.
-func (ops HeaderOps) ApplyToRequest(r *http.Request) {
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
-
- // capture the current Host header so we can
- // reset to it when we're done
- origHost, hadHost := r.Header["Host"]
-
- // append r.Host; this way, we know that our value
- // was last in the list, and if an Add operation
- // appended something else after it, that's probably
- // fine because it's weird to have multiple Host
- // headers anyway and presumably the one they added
- // is the one they wanted
- r.Header["Host"] = append(r.Header["Host"], r.Host)
-
- // apply header operations
- ops.ApplyTo(r.Header, repl)
-
- // retrieve the last Host value (likely the one we appended)
- if len(r.Header["Host"]) > 0 {
- r.Host = r.Header["Host"][len(r.Header["Host"])-1]
- } else {
- r.Host = ""
- }
-
- // reset the Host header slice
- if hadHost {
- r.Header["Host"] = origHost
- } else {
- delete(r.Header, "Host")
- }
-}
-
-// responseWriterWrapper defers response header
-// operations until WriteHeader is called.
-type responseWriterWrapper struct {
- *caddyhttp.ResponseWriterWrapper
- replacer *caddy.Replacer
- require *caddyhttp.ResponseMatcher
- headerOps *HeaderOps
- wroteHeader bool
-}
-
-func (rww *responseWriterWrapper) WriteHeader(status int) {
- if rww.wroteHeader {
- return
- }
- rww.wroteHeader = true
- if rww.require == nil || rww.require.Match(status, rww.ResponseWriterWrapper.Header()) {
- if rww.headerOps != nil {
- rww.headerOps.ApplyTo(rww.ResponseWriterWrapper.Header(), rww.replacer)
- }
- }
- rww.ResponseWriterWrapper.WriteHeader(status)
-}
-
-func (rww *responseWriterWrapper) Write(d []byte) (int, error) {
- if !rww.wroteHeader {
- rww.WriteHeader(http.StatusOK)
- }
- return rww.ResponseWriterWrapper.Write(d)
-}
-
-// Interface guards
-var (
- _ caddy.Provisioner = (*Handler)(nil)
- _ caddyhttp.MiddlewareHandler = (*Handler)(nil)
- _ caddyhttp.HTTPInterfaces = (*responseWriterWrapper)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/marshalers.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/marshalers.go
deleted file mode 100644
index 8001bd8f..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/marshalers.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "crypto/tls"
- "net/http"
-
- "go.uber.org/zap/zapcore"
-)
-
-// LoggableHTTPRequest makes an HTTP request loggable with zap.Object().
-type LoggableHTTPRequest struct{ *http.Request }
-
-// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
-func (r LoggableHTTPRequest) MarshalLogObject(enc zapcore.ObjectEncoder) error {
- enc.AddString("remote_addr", r.RemoteAddr)
- enc.AddString("proto", r.Proto)
- enc.AddString("method", r.Method)
- enc.AddString("host", r.Host)
- enc.AddString("uri", r.RequestURI)
- enc.AddObject("headers", LoggableHTTPHeader(r.Header))
- if r.TLS != nil {
- enc.AddObject("tls", LoggableTLSConnState(*r.TLS))
- }
- return nil
-}
-
-// LoggableHTTPHeader makes an HTTP header loggable with zap.Object().
-type LoggableHTTPHeader http.Header
-
-// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
-func (h LoggableHTTPHeader) MarshalLogObject(enc zapcore.ObjectEncoder) error {
- if h == nil {
- return nil
- }
- for key, val := range h {
- enc.AddArray(key, LoggableStringArray(val))
- }
- return nil
-}
-
-// LoggableStringArray makes a slice of strings marshalable for logging.
-type LoggableStringArray []string
-
-// MarshalLogArray satisfies the zapcore.ArrayMarshaler interface.
-func (sa LoggableStringArray) MarshalLogArray(enc zapcore.ArrayEncoder) error {
- if sa == nil {
- return nil
- }
- for _, s := range sa {
- enc.AppendString(s)
- }
- return nil
-}
-
-// LoggableTLSConnState makes a TLS connection state loggable with zap.Object().
-type LoggableTLSConnState tls.ConnectionState
-
-// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
-func (t LoggableTLSConnState) MarshalLogObject(enc zapcore.ObjectEncoder) error {
- enc.AddBool("resumed", t.DidResume)
- enc.AddUint16("version", t.Version)
- enc.AddUint16("cipher_suite", t.CipherSuite)
- enc.AddString("proto", t.NegotiatedProtocol)
- // NegotiatedProtocolIsMutual is deprecated - it's always true
- enc.AddBool("proto_mutual", true)
- enc.AddString("server_name", t.ServerName)
- if len(t.PeerCertificates) > 0 {
- enc.AddString("client_common_name", t.PeerCertificates[0].Subject.CommonName)
- enc.AddString("client_serial", t.PeerCertificates[0].SerialNumber.String())
- }
- return nil
-}
-
-// Interface guards
-var (
- _ zapcore.ObjectMarshaler = (*LoggableHTTPRequest)(nil)
- _ zapcore.ObjectMarshaler = (*LoggableHTTPHeader)(nil)
- _ zapcore.ArrayMarshaler = (*LoggableStringArray)(nil)
- _ zapcore.ObjectMarshaler = (*LoggableTLSConnState)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/matchers.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/matchers.go
deleted file mode 100644
index b452d48e..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/matchers.go
+++ /dev/null
@@ -1,1023 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "encoding/json"
- "fmt"
- "net"
- "net/http"
- "net/textproto"
- "net/url"
- "path/filepath"
- "regexp"
- "sort"
- "strconv"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "go.uber.org/zap"
-)
-
-type (
- // MatchHost matches requests by the Host value (case-insensitive).
- //
- // When used in a top-level HTTP route,
- // [qualifying domain names](/docs/automatic-https#hostname-requirements)
- // may trigger [automatic HTTPS](/docs/automatic-https), which automatically
- // provisions and renews certificates for you. Before doing this, you
- // should ensure that DNS records for these domains are properly configured,
- // especially A/AAAA pointed at your server.
- //
- // Automatic HTTPS can be
- // [customized or disabled](/docs/modules/http#servers/automatic_https).
- //
- // Wildcards (`*`) may be used to represent exactly one label of the
- // hostname, in accordance with RFC 1034 (because host matchers are also
- // used for automatic HTTPS which influences TLS certificates). Thus,
- // a host of `*` matches hosts like `localhost` or `internal` but not
- // `example.com`. To catch all hosts, omit the host matcher entirely.
- //
- // The wildcard can be useful for matching all subdomains, for example:
- // `*.example.com` matches `foo.example.com` but not `foo.bar.example.com`.
- //
- // Duplicate entries will return an error.
- MatchHost []string
-
- // MatchPath matches requests by the URI's path (case-insensitive). Path
- // matches are exact, but wildcards may be used:
- //
- // - At the end, for a prefix match (`/prefix/*`)
- // - At the beginning, for a suffix match (`*.suffix`)
- // - On both sides, for a substring match (`*/contains/*`)
- // - In the middle, for a globular match (`/accounts/*/info`)
- //
- // This matcher is fast, so it does not support regular expressions or
- // capture groups. For slower but more powerful matching, use the
- // path_regexp matcher.
- MatchPath []string
-
- // MatchPathRE matches requests by a regular expression on the URI's path.
- //
- // Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
- // where `name` is the regular expression's name, and `capture_group` is either
- // the named or positional capture group from the expression itself. If no name
- // is given, then the placeholder omits the name: `{http.regexp.capture_group}`
- // (potentially leading to collisions).
- MatchPathRE struct{ MatchRegexp }
-
- // MatchMethod matches requests by the method.
- MatchMethod []string
-
- // MatchQuery matches requests by the URI's query string. It takes a JSON object
- // keyed by the query keys, with an array of string values to match for that key.
- // Query key matches are exact, but wildcards may be used for value matches. Both
- // keys and values may be placeholders.
- // An example of the structure to match `?key=value&topic=api&query=something` is:
- //
- // ```json
- // {
- // "key": ["value"],
- // "topic": ["api"],
- // "query": ["*"]
- // }
- // ```
- MatchQuery url.Values
-
- // MatchHeader matches requests by header fields. It performs fast,
- // exact string comparisons of the field values. Fast prefix, suffix,
- // and substring matches can also be done by suffixing, prefixing, or
- // surrounding the value with the wildcard `*` character, respectively.
- // If a list is null, the header must not exist. If the list is empty,
- // the field must simply exist, regardless of its value.
- MatchHeader http.Header
-
- // MatchHeaderRE matches requests by a regular expression on header fields.
- //
- // Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
- // where `name` is the regular expression's name, and `capture_group` is either
- // the named or positional capture group from the expression itself. If no name
- // is given, then the placeholder omits the name: `{http.regexp.capture_group}`
- // (potentially leading to collisions).
- MatchHeaderRE map[string]*MatchRegexp
-
- // MatchProtocol matches requests by protocol.
- MatchProtocol string
-
- // MatchRemoteIP matches requests by client IP (or CIDR range).
- MatchRemoteIP struct {
- // The IPs or CIDR ranges to match.
- Ranges []string `json:"ranges,omitempty"`
-
- // If true, prefer the first IP in the request's X-Forwarded-For
- // header, if present, rather than the immediate peer's IP, as
- // the reference IP against which to match. Note that it is easy
- // to spoof request headers. Default: false
- Forwarded bool `json:"forwarded,omitempty"`
-
- cidrs []*net.IPNet
- logger *zap.Logger
- }
-
- // MatchNot matches requests by negating the results of its matcher
- // sets. A single "not" matcher takes one or more matcher sets. Each
- // matcher set is OR'ed; in other words, if any matcher set returns
- // true, the final result of the "not" matcher is false. Individual
- // matchers within a set work the same (i.e. different matchers in
- // the same set are AND'ed).
- //
- // Note that the generated docs which describe the structure of
- // this module are wrong because of how this type unmarshals JSON
- // in a custom way. The correct structure is:
- //
- // ```json
- // [
- // {},
- // {}
- // ]
- // ```
- //
- // where each of the array elements is a matcher set, i.e. an
- // object keyed by matcher name.
- MatchNot struct {
- MatcherSetsRaw []caddy.ModuleMap `json:"-" caddy:"namespace=http.matchers"`
- MatcherSets []MatcherSet `json:"-"`
- }
-)
-
-func init() {
- caddy.RegisterModule(MatchHost{})
- caddy.RegisterModule(MatchPath{})
- caddy.RegisterModule(MatchPathRE{})
- caddy.RegisterModule(MatchMethod{})
- caddy.RegisterModule(MatchQuery{})
- caddy.RegisterModule(MatchHeader{})
- caddy.RegisterModule(MatchHeaderRE{})
- caddy.RegisterModule(new(MatchProtocol))
- caddy.RegisterModule(MatchRemoteIP{})
- caddy.RegisterModule(MatchNot{})
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchHost) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.host",
- New: func() caddy.Module { return new(MatchHost) },
- }
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchHost) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- *m = append(*m, d.RemainingArgs()...)
- if d.NextBlock(0) {
- return d.Err("malformed host matcher: blocks are not supported")
- }
- }
- return nil
-}
-
-// Provision sets up and validates m, including making it more efficient for large lists.
-func (m MatchHost) Provision(_ caddy.Context) error {
- // check for duplicates; they are nonsensical and reduce efficiency
- // (we could just remove them, but the user should know their config is erroneous)
- seen := make(map[string]int)
- for i, h := range m {
- h = strings.ToLower(h)
- if firstI, ok := seen[h]; ok {
- return fmt.Errorf("host at index %d is repeated at index %d: %s", firstI, i, h)
- }
- seen[h] = i
- }
-
- if m.large() {
- // sort the slice lexicographically, grouping "fuzzy" entries (wildcards and placeholders)
- // at the front of the list; this allows us to use binary search for exact matches, which
- // we have seen from experience is the most common kind of value in large lists; and any
- // other kinds of values (wildcards and placeholders) are grouped in front so the linear
- // search should find a match fairly quickly
- sort.Slice(m, func(i, j int) bool {
- iInexact, jInexact := m.fuzzy(m[i]), m.fuzzy(m[j])
- if iInexact && !jInexact {
- return true
- }
- if !iInexact && jInexact {
- return false
- }
- return m[i] < m[j]
- })
- }
-
- return nil
-}
-
-// Match returns true if r matches m.
-func (m MatchHost) Match(r *http.Request) bool {
- reqHost, _, err := net.SplitHostPort(r.Host)
- if err != nil {
- // OK; probably didn't have a port
- reqHost = r.Host
-
- // make sure we strip the brackets from IPv6 addresses
- reqHost = strings.TrimPrefix(reqHost, "[")
- reqHost = strings.TrimSuffix(reqHost, "]")
- }
-
- if m.large() {
- // fast path: locate exact match using binary search (about 100-1000x faster for large lists)
- pos := sort.Search(len(m), func(i int) bool {
- if m.fuzzy(m[i]) {
- return false
- }
- return m[i] >= reqHost
- })
- if pos < len(m) && m[pos] == reqHost {
- return true
- }
- }
-
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
-
-outer:
- for _, host := range m {
- // fast path: if matcher is large, we already know we don't have an exact
- // match, so we're only looking for fuzzy match now, which should be at the
- // front of the list; if we have reached a value that is not fuzzy, there
- // will be no match and we can short-circuit for efficiency
- if m.large() && !m.fuzzy(host) {
- break
- }
-
- host = repl.ReplaceAll(host, "")
- if strings.Contains(host, "*") {
- patternParts := strings.Split(host, ".")
- incomingParts := strings.Split(reqHost, ".")
- if len(patternParts) != len(incomingParts) {
- continue
- }
- for i := range patternParts {
- if patternParts[i] == "*" {
- continue
- }
- if !strings.EqualFold(patternParts[i], incomingParts[i]) {
- continue outer
- }
- }
- return true
- } else if strings.EqualFold(reqHost, host) {
- return true
- }
- }
-
- return false
-}
-
-// fuzzy returns true if the given hostname h is not a specific
-// hostname, e.g. has placeholders or wildcards.
-func (MatchHost) fuzzy(h string) bool { return strings.ContainsAny(h, "{*") }
-
-// large returns true if m is considered to be large. Optimizing
-// the matcher for smaller lists has diminishing returns.
-// See related benchmark function in test file to conduct experiments.
-func (m MatchHost) large() bool { return len(m) > 100 }
-
-// CaddyModule returns the Caddy module information.
-func (MatchPath) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.path",
- New: func() caddy.Module { return new(MatchPath) },
- }
-}
-
-// Provision lower-cases the paths in m to ensure case-insensitive matching.
-func (m MatchPath) Provision(_ caddy.Context) error {
- for i := range m {
- m[i] = strings.ToLower(m[i])
- }
- return nil
-}
-
-// Match returns true if r matches m.
-func (m MatchPath) Match(r *http.Request) bool {
- lowerPath := strings.ToLower(r.URL.Path)
-
- // see #2917; Windows ignores trailing dots and spaces
- // when accessing files (sigh), potentially causing a
- // security risk (cry) if PHP files end up being served
- // as static files, exposing the source code, instead of
- // being matched by *.php to be treated as PHP scripts
- lowerPath = strings.TrimRight(lowerPath, ". ")
-
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
-
- for _, matchPath := range m {
- matchPath = repl.ReplaceAll(matchPath, "")
-
- // special case: whole path is wildcard; this is unnecessary
- // as it matches all requests, which is the same as no matcher
- if matchPath == "*" {
- return true
- }
-
- // special case: first and last characters are wildcard,
- // treat it as a fast substring match
- if len(matchPath) > 1 &&
- strings.HasPrefix(matchPath, "*") &&
- strings.HasSuffix(matchPath, "*") {
- if strings.Contains(lowerPath, matchPath[1:len(matchPath)-1]) {
- return true
- }
- continue
- }
-
- // special case: first character is a wildcard,
- // treat it as a fast suffix match
- if strings.HasPrefix(matchPath, "*") {
- if strings.HasSuffix(lowerPath, matchPath[1:]) {
- return true
- }
- continue
- }
-
- // special case: last character is a wildcard,
- // treat it as a fast prefix match
- if strings.HasSuffix(matchPath, "*") {
- if strings.HasPrefix(lowerPath, matchPath[:len(matchPath)-1]) {
- return true
- }
- continue
- }
-
- // for everything else, try globular matching, which also
- // is exact matching if there are no glob/wildcard chars;
- // can ignore error here because we can't handle it anyway
- matches, _ := filepath.Match(matchPath, lowerPath)
- if matches {
- return true
- }
- }
- return false
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchPath) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- *m = append(*m, d.RemainingArgs()...)
- if d.NextBlock(0) {
- return d.Err("malformed path matcher: blocks are not supported")
- }
- }
- return nil
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchPathRE) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.path_regexp",
- New: func() caddy.Module { return new(MatchPathRE) },
- }
-}
-
-// Match returns true if r matches m.
-func (m MatchPathRE) Match(r *http.Request) bool {
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- return m.MatchRegexp.Match(r.URL.Path, repl)
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchMethod) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.method",
- New: func() caddy.Module { return new(MatchMethod) },
- }
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchMethod) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- *m = append(*m, d.RemainingArgs()...)
- if d.NextBlock(0) {
- return d.Err("malformed method matcher: blocks are not supported")
- }
- }
- return nil
-}
-
-// Match returns true if r matches m.
-func (m MatchMethod) Match(r *http.Request) bool {
- for _, method := range m {
- if r.Method == method {
- return true
- }
- }
- return false
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchQuery) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.query",
- New: func() caddy.Module { return new(MatchQuery) },
- }
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchQuery) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- if *m == nil {
- *m = make(map[string][]string)
- }
- for d.Next() {
- for _, query := range d.RemainingArgs() {
- if query == "" {
- continue
- }
- parts := strings.SplitN(query, "=", 2)
- if len(parts) != 2 {
- return d.Errf("malformed query matcher token: %s; must be in param=val format", d.Val())
- }
- url.Values(*m).Add(parts[0], parts[1])
- }
- if d.NextBlock(0) {
- return d.Err("malformed query matcher: blocks are not supported")
- }
- }
- return nil
-}
-
-// Match returns true if r matches m. An empty m matches an empty query string.
-func (m MatchQuery) Match(r *http.Request) bool {
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- for param, vals := range m {
- param = repl.ReplaceAll(param, "")
- paramVal, found := r.URL.Query()[param]
- if found {
- for _, v := range vals {
- v = repl.ReplaceAll(v, "")
- if paramVal[0] == v || v == "*" {
- return true
- }
- }
- }
- }
- return len(m) == 0 && len(r.URL.Query()) == 0
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchHeader) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.header",
- New: func() caddy.Module { return new(MatchHeader) },
- }
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchHeader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- if *m == nil {
- *m = make(map[string][]string)
- }
- for d.Next() {
- var field, val string
- if !d.Args(&field) {
- return d.Errf("malformed header matcher: expected field")
- }
-
- if strings.HasPrefix(field, "!") {
- if len(field) == 1 {
- return d.Errf("malformed header matcher: must have field name following ! character")
- }
-
- field = field[1:]
- headers := *m
- headers[field] = nil
- m = &headers
- if d.NextArg() {
- return d.Errf("malformed header matcher: null matching headers cannot have a field value")
- }
- } else {
- if !d.NextArg() {
- return d.Errf("malformed header matcher: expected both field and value")
- }
-
- // If multiple header matchers with the same header field are defined,
- // we want to add the existing to the list of headers (will be OR'ed)
- val = d.Val()
- http.Header(*m).Add(field, val)
- }
-
- if d.NextBlock(0) {
- return d.Err("malformed header matcher: blocks are not supported")
- }
- }
- return nil
-}
-
-// Match returns true if r matches m.
-func (m MatchHeader) Match(r *http.Request) bool {
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- return matchHeaders(r.Header, http.Header(m), r.Host, repl)
-}
-
-// getHeaderFieldVals returns the field values for the given fieldName from input.
-// The host parameter should be obtained from the http.Request.Host field since
-// net/http removes it from the header map.
-func getHeaderFieldVals(input http.Header, fieldName, host string) []string {
- fieldName = textproto.CanonicalMIMEHeaderKey(fieldName)
- if fieldName == "Host" && host != "" {
- return []string{host}
- }
- return input[fieldName]
-}
-
-// matchHeaders returns true if input matches the criteria in against without regex.
-// The host parameter should be obtained from the http.Request.Host field since
-// net/http removes it from the header map.
-func matchHeaders(input, against http.Header, host string, repl *caddy.Replacer) bool {
- for field, allowedFieldVals := range against {
- actualFieldVals := getHeaderFieldVals(input, field, host)
- if allowedFieldVals != nil && len(allowedFieldVals) == 0 && actualFieldVals != nil {
- // a non-nil but empty list of allowed values means
- // match if the header field exists at all
- continue
- }
- if allowedFieldVals == nil && actualFieldVals == nil {
- // a nil list means match if the header does not exist at all
- continue
- }
- var match bool
- fieldVals:
- for _, actualFieldVal := range actualFieldVals {
- for _, allowedFieldVal := range allowedFieldVals {
- if repl != nil {
- allowedFieldVal = repl.ReplaceAll(allowedFieldVal, "")
- }
- switch {
- case allowedFieldVal == "*":
- match = true
- case strings.HasPrefix(allowedFieldVal, "*") && strings.HasSuffix(allowedFieldVal, "*"):
- match = strings.Contains(actualFieldVal, allowedFieldVal[1:len(allowedFieldVal)-1])
- case strings.HasPrefix(allowedFieldVal, "*"):
- match = strings.HasSuffix(actualFieldVal, allowedFieldVal[1:])
- case strings.HasSuffix(allowedFieldVal, "*"):
- match = strings.HasPrefix(actualFieldVal, allowedFieldVal[:len(allowedFieldVal)-1])
- default:
- match = actualFieldVal == allowedFieldVal
- }
- if match {
- break fieldVals
- }
- }
- }
- if !match {
- return false
- }
- }
- return true
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchHeaderRE) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.header_regexp",
- New: func() caddy.Module { return new(MatchHeaderRE) },
- }
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchHeaderRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- if *m == nil {
- *m = make(map[string]*MatchRegexp)
- }
- for d.Next() {
- var first, second, third string
- if !d.Args(&first, &second) {
- return d.ArgErr()
- }
-
- var name, field, val string
- if d.Args(&third) {
- name = first
- field = second
- val = third
- } else {
- field = first
- val = second
- }
-
- (*m)[field] = &MatchRegexp{Pattern: val, Name: name}
-
- if d.NextBlock(0) {
- return d.Err("malformed header_regexp matcher: blocks are not supported")
- }
- }
- return nil
-}
-
-// Match returns true if r matches m.
-func (m MatchHeaderRE) Match(r *http.Request) bool {
- for field, rm := range m {
- actualFieldVals := getHeaderFieldVals(r.Header, field, r.Host)
- match := false
- fieldVal:
- for _, actualFieldVal := range actualFieldVals {
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- if rm.Match(actualFieldVal, repl) {
- match = true
- break fieldVal
- }
- }
- if !match {
- return false
- }
- }
- return true
-}
-
-// Provision compiles m's regular expressions.
-func (m MatchHeaderRE) Provision(ctx caddy.Context) error {
- for _, rm := range m {
- err := rm.Provision(ctx)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// Validate validates m's regular expressions.
-func (m MatchHeaderRE) Validate() error {
- for _, rm := range m {
- err := rm.Validate()
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchProtocol) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.protocol",
- New: func() caddy.Module { return new(MatchProtocol) },
- }
-}
-
-// Match returns true if r matches m.
-func (m MatchProtocol) Match(r *http.Request) bool {
- switch string(m) {
- case "grpc":
- return strings.HasPrefix(r.Header.Get("content-type"), "application/grpc")
- case "https":
- return r.TLS != nil
- case "http":
- return r.TLS == nil
- }
- return false
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchProtocol) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- var proto string
- if !d.Args(&proto) {
- return d.Err("expected exactly one protocol")
- }
- *m = MatchProtocol(proto)
- }
- return nil
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchNot) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.not",
- New: func() caddy.Module { return new(MatchNot) },
- }
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- // first, unmarshal each matcher in the set from its tokens
- type matcherPair struct {
- raw caddy.ModuleMap
- decoded MatcherSet
- }
- for d.Next() {
- var mp matcherPair
- matcherMap := make(map[string]RequestMatcher)
-
- // in case there are multiple instances of the same matcher, concatenate
- // their tokens (we expect that UnmarshalCaddyfile should be able to
- // handle more than one segment); otherwise, we'd overwrite other
- // instances of the matcher in this set
- tokensByMatcherName := make(map[string][]caddyfile.Token)
- for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
- matcherName := d.Val()
- tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
- }
- for matcherName, tokens := range tokensByMatcherName {
- mod, err := caddy.GetModule("http.matchers." + matcherName)
- if err != nil {
- return d.Errf("getting matcher module '%s': %v", matcherName, err)
- }
- unm, ok := mod.New().(caddyfile.Unmarshaler)
- if !ok {
- return d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
- }
- err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
- if err != nil {
- return err
- }
- rm, ok := unm.(RequestMatcher)
- if !ok {
- return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
- }
- matcherMap[matcherName] = rm
- mp.decoded = append(mp.decoded, rm)
- }
-
- // we should now have a functional 'not' matcher, but we also
- // need to be able to marshal as JSON, otherwise config
- // adaptation will be missing the matchers!
- mp.raw = make(caddy.ModuleMap)
- for name, matcher := range matcherMap {
- jsonBytes, err := json.Marshal(matcher)
- if err != nil {
- return fmt.Errorf("marshaling %T matcher: %v", matcher, err)
- }
- mp.raw[name] = jsonBytes
- }
- m.MatcherSetsRaw = append(m.MatcherSetsRaw, mp.raw)
- }
- return nil
-}
-
-// UnmarshalJSON satisfies json.Unmarshaler. It puts the JSON
-// bytes directly into m's MatcherSetsRaw field.
-func (m *MatchNot) UnmarshalJSON(data []byte) error {
- return json.Unmarshal(data, &m.MatcherSetsRaw)
-}
-
-// MarshalJSON satisfies json.Marshaler by marshaling
-// m's raw matcher sets.
-func (m MatchNot) MarshalJSON() ([]byte, error) {
- return json.Marshal(m.MatcherSetsRaw)
-}
-
-// Provision loads the matcher modules to be negated.
-func (m *MatchNot) Provision(ctx caddy.Context) error {
- matcherSets, err := ctx.LoadModule(m, "MatcherSetsRaw")
- if err != nil {
- return fmt.Errorf("loading matcher sets: %v", err)
- }
- for _, modMap := range matcherSets.([]map[string]interface{}) {
- var ms MatcherSet
- for _, modIface := range modMap {
- ms = append(ms, modIface.(RequestMatcher))
- }
- m.MatcherSets = append(m.MatcherSets, ms)
- }
- return nil
-}
-
-// Match returns true if r matches m. Since this matcher negates
-// the embedded matchers, false is returned if any of its matcher
-// sets return true.
-func (m MatchNot) Match(r *http.Request) bool {
- for _, ms := range m.MatcherSets {
- if ms.Match(r) {
- return false
- }
- }
- return true
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.remote_ip",
- New: func() caddy.Module { return new(MatchRemoteIP) },
- }
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchRemoteIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- for d.NextArg() {
- if d.Val() == "forwarded" {
- if len(m.Ranges) > 0 {
- return d.Err("if used, 'forwarded' must be first argument")
- }
- m.Forwarded = true
- continue
- }
- m.Ranges = append(m.Ranges, d.Val())
- }
- if d.NextBlock(0) {
- return d.Err("malformed remote_ip matcher: blocks are not supported")
- }
- }
- return nil
-}
-
-// Provision parses m's IP ranges, either from IP or CIDR expressions.
-func (m *MatchRemoteIP) Provision(ctx caddy.Context) error {
- m.logger = ctx.Logger(m)
- for _, str := range m.Ranges {
- if strings.Contains(str, "/") {
- _, ipNet, err := net.ParseCIDR(str)
- if err != nil {
- return fmt.Errorf("parsing CIDR expression: %v", err)
- }
- m.cidrs = append(m.cidrs, ipNet)
- } else {
- ip := net.ParseIP(str)
- if ip == nil {
- return fmt.Errorf("invalid IP address: %s", str)
- }
- mask := len(ip) * 8
- m.cidrs = append(m.cidrs, &net.IPNet{
- IP: ip,
- Mask: net.CIDRMask(mask, mask),
- })
- }
- }
- return nil
-}
-
-func (m MatchRemoteIP) getClientIP(r *http.Request) (net.IP, error) {
- remote := r.RemoteAddr
- if m.Forwarded {
- if fwdFor := r.Header.Get("X-Forwarded-For"); fwdFor != "" {
- remote = strings.TrimSpace(strings.Split(fwdFor, ",")[0])
- }
- }
- ipStr, _, err := net.SplitHostPort(remote)
- if err != nil {
- ipStr = remote // OK; probably didn't have a port
- }
- ip := net.ParseIP(ipStr)
- if ip == nil {
- return nil, fmt.Errorf("invalid client IP address: %s", ipStr)
- }
- return ip, nil
-}
-
-// Match returns true if r matches m.
-func (m MatchRemoteIP) Match(r *http.Request) bool {
- clientIP, err := m.getClientIP(r)
- if err != nil {
- m.logger.Error("getting client IP", zap.Error(err))
- return false
- }
- for _, ipRange := range m.cidrs {
- if ipRange.Contains(clientIP) {
- return true
- }
- }
- return false
-}
-
-// MatchRegexp is an embedable type for matching
-// using regular expressions. It adds placeholders
-// to the request's replacer.
-type MatchRegexp struct {
- // A unique name for this regular expression. Optional,
- // but useful to prevent overwriting captures from other
- // regexp matchers.
- Name string `json:"name,omitempty"`
-
- // The regular expression to evaluate, in RE2 syntax,
- // which is the same general syntax used by Go, Perl,
- // and Python. For details, see
- // [Go's regexp package](https://golang.org/pkg/regexp/).
- // Captures are accessible via placeholders. Unnamed
- // capture groups are exposed as their numeric, 1-based
- // index, while named capture groups are available by
- // the capture group name.
- Pattern string `json:"pattern"`
-
- compiled *regexp.Regexp
- phPrefix string
-}
-
-// Provision compiles the regular expression.
-func (mre *MatchRegexp) Provision(caddy.Context) error {
- re, err := regexp.Compile(mre.Pattern)
- if err != nil {
- return fmt.Errorf("compiling matcher regexp %s: %v", mre.Pattern, err)
- }
- mre.compiled = re
- mre.phPrefix = regexpPlaceholderPrefix
- if mre.Name != "" {
- mre.phPrefix += "." + mre.Name
- }
- return nil
-}
-
-// Validate ensures mre is set up correctly.
-func (mre *MatchRegexp) Validate() error {
- if mre.Name != "" && !wordRE.MatchString(mre.Name) {
- return fmt.Errorf("invalid regexp name (must contain only word characters): %s", mre.Name)
- }
- return nil
-}
-
-// Match returns true if input matches the compiled regular
-// expression in mre. It sets values on the replacer repl
-// associated with capture groups, using the given scope
-// (namespace).
-func (mre *MatchRegexp) Match(input string, repl *caddy.Replacer) bool {
- matches := mre.compiled.FindStringSubmatch(input)
- if matches == nil {
- return false
- }
-
- // save all capture groups, first by index
- for i, match := range matches {
- key := mre.phPrefix + "." + strconv.Itoa(i)
- repl.Set(key, match)
- }
-
- // then by name
- for i, name := range mre.compiled.SubexpNames() {
- if i != 0 && name != "" {
- key := mre.phPrefix + "." + name
- repl.Set(key, matches[i])
- }
- }
-
- return true
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- args := d.RemainingArgs()
- switch len(args) {
- case 1:
- mre.Pattern = args[0]
- case 2:
- mre.Name = args[0]
- mre.Pattern = args[1]
- default:
- return d.ArgErr()
- }
- if d.NextBlock(0) {
- return d.Err("malformed path_regexp matcher: blocks are not supported")
- }
- }
- return nil
-}
-
-var wordRE = regexp.MustCompile(`\w+`)
-
-const regexpPlaceholderPrefix = "http.regexp"
-
-// Interface guards
-var (
- _ RequestMatcher = (*MatchHost)(nil)
- _ caddy.Provisioner = (*MatchHost)(nil)
- _ RequestMatcher = (*MatchPath)(nil)
- _ RequestMatcher = (*MatchPathRE)(nil)
- _ caddy.Provisioner = (*MatchPathRE)(nil)
- _ RequestMatcher = (*MatchMethod)(nil)
- _ RequestMatcher = (*MatchQuery)(nil)
- _ RequestMatcher = (*MatchHeader)(nil)
- _ RequestMatcher = (*MatchHeaderRE)(nil)
- _ caddy.Provisioner = (*MatchHeaderRE)(nil)
- _ RequestMatcher = (*MatchProtocol)(nil)
- _ RequestMatcher = (*MatchRemoteIP)(nil)
- _ caddy.Provisioner = (*MatchRemoteIP)(nil)
- _ RequestMatcher = (*MatchNot)(nil)
- _ caddy.Provisioner = (*MatchNot)(nil)
- _ caddy.Provisioner = (*MatchRegexp)(nil)
-
- _ caddyfile.Unmarshaler = (*MatchHost)(nil)
- _ caddyfile.Unmarshaler = (*MatchPath)(nil)
- _ caddyfile.Unmarshaler = (*MatchPathRE)(nil)
- _ caddyfile.Unmarshaler = (*MatchMethod)(nil)
- _ caddyfile.Unmarshaler = (*MatchQuery)(nil)
- _ caddyfile.Unmarshaler = (*MatchHeader)(nil)
- _ caddyfile.Unmarshaler = (*MatchHeaderRE)(nil)
- _ caddyfile.Unmarshaler = (*MatchProtocol)(nil)
- _ caddyfile.Unmarshaler = (*MatchRemoteIP)(nil)
- _ caddyfile.Unmarshaler = (*VarsMatcher)(nil)
- _ caddyfile.Unmarshaler = (*MatchVarsRE)(nil)
-
- _ json.Marshaler = (*MatchNot)(nil)
- _ json.Unmarshaler = (*MatchNot)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/metrics.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/metrics.go
deleted file mode 100644
index 3e5d6396..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/metrics.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package caddyhttp
-
-import (
- "context"
- "net/http"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
-)
-
-var httpMetrics = struct {
- init sync.Once
- requestInFlight *prometheus.GaugeVec
- requestCount *prometheus.CounterVec
- requestErrors *prometheus.CounterVec
- requestDuration *prometheus.HistogramVec
- requestSize *prometheus.HistogramVec
- responseSize *prometheus.HistogramVec
- responseDuration *prometheus.HistogramVec
-}{
- init: sync.Once{},
-}
-
-func initHTTPMetrics() {
- const ns, sub = "caddy", "http"
-
- basicLabels := []string{"server", "handler"}
- httpMetrics.requestInFlight = promauto.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: ns,
- Subsystem: sub,
- Name: "requests_in_flight",
- Help: "Number of requests currently handled by this server.",
- }, basicLabels)
- httpMetrics.requestErrors = promauto.NewCounterVec(prometheus.CounterOpts{
- Namespace: ns,
- Subsystem: sub,
- Name: "request_errors_total",
- Help: "Number of requests resulting in middleware errors.",
- }, basicLabels)
- httpMetrics.requestCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Namespace: ns,
- Subsystem: sub,
- Name: "requests_total",
- Help: "Counter of HTTP(S) requests made.",
- }, basicLabels)
-
- // TODO: allow these to be customized in the config
- durationBuckets := prometheus.DefBuckets
- sizeBuckets := prometheus.ExponentialBuckets(256, 4, 8)
-
- httpLabels := []string{"server", "handler", "code", "method"}
- httpMetrics.requestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: ns,
- Subsystem: sub,
- Name: "request_duration_seconds",
- Help: "Histogram of round-trip request durations.",
- Buckets: durationBuckets,
- }, httpLabels)
- httpMetrics.requestSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: ns,
- Subsystem: sub,
- Name: "request_size_bytes",
- Help: "Total size of the request. Includes body",
- Buckets: sizeBuckets,
- }, httpLabels)
- httpMetrics.responseSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: ns,
- Subsystem: sub,
- Name: "response_size_bytes",
- Help: "Size of the returned response.",
- Buckets: sizeBuckets,
- }, httpLabels)
- httpMetrics.responseDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: ns,
- Subsystem: sub,
- Name: "response_duration_seconds",
- Help: "Histogram of times to first byte in response bodies.",
- Buckets: durationBuckets,
- }, httpLabels)
-}
-
-// serverNameFromContext extracts the current server name from the context.
-// Returns "UNKNOWN" if none is available (should probably never happen).
-func serverNameFromContext(ctx context.Context) string {
- srv, ok := ctx.Value(ServerCtxKey).(*Server)
- if !ok || srv == nil || srv.name == "" {
- return "UNKNOWN"
- }
- return srv.name
-}
-
-type metricsInstrumentedHandler struct {
- handler string
- mh MiddlewareHandler
-}
-
-func newMetricsInstrumentedHandler(handler string, mh MiddlewareHandler) *metricsInstrumentedHandler {
- httpMetrics.init.Do(func() {
- initHTTPMetrics()
- })
-
- return &metricsInstrumentedHandler{handler, mh}
-}
-
-func (h *metricsInstrumentedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
- server := serverNameFromContext(r.Context())
- labels := prometheus.Labels{"server": server, "handler": h.handler}
- method := strings.ToUpper(r.Method)
- // the "code" value is set later, but initialized here to eliminate the possibility
- // of a panic
- statusLabels := prometheus.Labels{"server": server, "handler": h.handler, "method": method, "code": ""}
-
- inFlight := httpMetrics.requestInFlight.With(labels)
- inFlight.Inc()
- defer inFlight.Dec()
-
- start := time.Now()
-
- // This is a _bit_ of a hack - it depends on the ShouldBufferFunc always
- // being called when the headers are written.
- // Effectively the same behaviour as promhttp.InstrumentHandlerTimeToWriteHeader.
- writeHeaderRecorder := ShouldBufferFunc(func(status int, header http.Header) bool {
- statusLabels["code"] = sanitizeCode(status)
- ttfb := time.Since(start).Seconds()
- httpMetrics.responseDuration.With(statusLabels).Observe(ttfb)
- return false
- })
- wrec := NewResponseRecorder(w, nil, writeHeaderRecorder)
- err := h.mh.ServeHTTP(wrec, r, next)
- dur := time.Since(start).Seconds()
- httpMetrics.requestCount.With(labels).Inc()
- if err != nil {
- httpMetrics.requestErrors.With(labels).Inc()
- return err
- }
-
- // If the code hasn't been set yet, and we didn't encounter an error, we're
- // probably falling through with an empty handler.
- if statusLabels["code"] == "" {
- // we still sanitize it, even though it's likely to be 0. A 200 is
- // returned on fallthrough so we want to reflect that.
- statusLabels["code"] = sanitizeCode(wrec.Status())
- }
-
- httpMetrics.requestDuration.With(statusLabels).Observe(dur)
- httpMetrics.requestSize.With(statusLabels).Observe(float64(computeApproximateRequestSize(r)))
- httpMetrics.responseSize.With(statusLabels).Observe(float64(wrec.Size()))
-
- return nil
-}
-
-func sanitizeCode(code int) string {
- if code == 0 {
- return "200"
- }
- return strconv.Itoa(code)
-}
-
-// taken from https://github.com/prometheus/client_golang/blob/6007b2b5cae01203111de55f753e76d8dac1f529/prometheus/promhttp/instrument_server.go#L298
-func computeApproximateRequestSize(r *http.Request) int {
- s := 0
- if r.URL != nil {
- s += len(r.URL.String())
- }
-
- s += len(r.Method)
- s += len(r.Proto)
- for name, values := range r.Header {
- s += len(name)
- for _, value := range values {
- s += len(value)
- }
- }
- s += len(r.Host)
-
- // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
-
- if r.ContentLength != -1 {
- s += int(r.ContentLength)
- }
- return s
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/replacer.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/replacer.go
deleted file mode 100644
index d0767f09..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/replacer.go
+++ /dev/null
@@ -1,410 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "bytes"
- "context"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rsa"
- "crypto/sha256"
- "crypto/tls"
- "crypto/x509"
- "encoding/asn1"
- "encoding/pem"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/textproto"
- "net/url"
- "path"
- "strconv"
- "strings"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/modules/caddytls"
-)
-
-// NewTestReplacer creates a replacer for an http.Request
-// for use in tests that are not in this package
-func NewTestReplacer(req *http.Request) *caddy.Replacer {
- repl := caddy.NewReplacer()
- ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
- *req = *req.WithContext(ctx)
- addHTTPVarsToReplacer(repl, req, nil)
- return repl
-}
-
-func addHTTPVarsToReplacer(repl *caddy.Replacer, req *http.Request, w http.ResponseWriter) {
- SetVar(req.Context(), "start_time", time.Now())
-
- httpVars := func(key string) (interface{}, bool) {
- if req != nil {
- // query string parameters
- if strings.HasPrefix(key, reqURIQueryReplPrefix) {
- vals := req.URL.Query()[key[len(reqURIQueryReplPrefix):]]
- // always return true, since the query param might
- // be present only in some requests
- return strings.Join(vals, ","), true
- }
-
- // request header fields
- if strings.HasPrefix(key, reqHeaderReplPrefix) {
- field := key[len(reqHeaderReplPrefix):]
- vals := req.Header[textproto.CanonicalMIMEHeaderKey(field)]
- // always return true, since the header field might
- // be present only in some requests
- return strings.Join(vals, ","), true
- }
-
- // cookies
- if strings.HasPrefix(key, reqCookieReplPrefix) {
- name := key[len(reqCookieReplPrefix):]
- for _, cookie := range req.Cookies() {
- if strings.EqualFold(name, cookie.Name) {
- // always return true, since the cookie might
- // be present only in some requests
- return cookie.Value, true
- }
- }
- }
-
- // http.request.tls.*
- if strings.HasPrefix(key, reqTLSReplPrefix) {
- return getReqTLSReplacement(req, key)
- }
-
- switch key {
- case "http.request.method":
- return req.Method, true
- case "http.request.scheme":
- if req.TLS != nil {
- return "https", true
- }
- return "http", true
- case "http.request.proto":
- return req.Proto, true
- case "http.request.host":
- host, _, err := net.SplitHostPort(req.Host)
- if err != nil {
- return req.Host, true // OK; there probably was no port
- }
- return host, true
- case "http.request.port":
- _, port, _ := net.SplitHostPort(req.Host)
- if portNum, err := strconv.Atoi(port); err == nil {
- return portNum, true
- }
- return port, true
- case "http.request.hostport":
- return req.Host, true
- case "http.request.remote":
- return req.RemoteAddr, true
- case "http.request.remote.host":
- host, _, err := net.SplitHostPort(req.RemoteAddr)
- if err != nil {
- return req.RemoteAddr, true
- }
- return host, true
- case "http.request.remote.port":
- _, port, _ := net.SplitHostPort(req.RemoteAddr)
- if portNum, err := strconv.Atoi(port); err == nil {
- return portNum, true
- }
- return port, true
-
- // current URI, including any internal rewrites
- case "http.request.uri":
- return req.URL.RequestURI(), true
- case "http.request.uri.path":
- return req.URL.Path, true
- case "http.request.uri.path.file":
- _, file := path.Split(req.URL.Path)
- return file, true
- case "http.request.uri.path.dir":
- dir, _ := path.Split(req.URL.Path)
- return dir, true
- case "http.request.uri.query":
- return req.URL.RawQuery, true
- case "http.request.duration":
- start := GetVar(req.Context(), "start_time").(time.Time)
- return time.Since(start), true
- case "http.request.body":
- if req.Body == nil {
- return "", true
- }
- // normally net/http will close the body for us, but since we
- // are replacing it with a fake one, we have to ensure we close
- // the real body ourselves when we're done
- defer req.Body.Close()
- // read the request body into a buffer (can't pool because we
- // don't know its lifetime and would have to make a copy anyway)
- buf := new(bytes.Buffer)
- _, err := io.Copy(buf, req.Body)
- if err != nil {
- return "", true
- }
- // replace real body with buffered data
- req.Body = ioutil.NopCloser(buf)
- return buf.String(), true
-
- // original request, before any internal changes
- case "http.request.orig_method":
- or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
- return or.Method, true
- case "http.request.orig_uri":
- or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
- return or.RequestURI, true
- case "http.request.orig_uri.path":
- or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
- return or.URL.Path, true
- case "http.request.orig_uri.path.file":
- or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
- _, file := path.Split(or.URL.Path)
- return file, true
- case "http.request.orig_uri.path.dir":
- or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
- dir, _ := path.Split(or.URL.Path)
- return dir, true
- case "http.request.orig_uri.query":
- or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
- return or.URL.RawQuery, true
- }
-
- // hostname labels
- if strings.HasPrefix(key, reqHostLabelsReplPrefix) {
- idxStr := key[len(reqHostLabelsReplPrefix):]
- idx, err := strconv.Atoi(idxStr)
- if err != nil || idx < 0 {
- return "", false
- }
- reqHost, _, err := net.SplitHostPort(req.Host)
- if err != nil {
- reqHost = req.Host // OK; assume there was no port
- }
- hostLabels := strings.Split(reqHost, ".")
- if idx >= len(hostLabels) {
- return "", true
- }
- return hostLabels[len(hostLabels)-idx-1], true
- }
-
- // path parts
- if strings.HasPrefix(key, reqURIPathReplPrefix) {
- idxStr := key[len(reqURIPathReplPrefix):]
- idx, err := strconv.Atoi(idxStr)
- if err != nil {
- return "", false
- }
- pathParts := strings.Split(req.URL.Path, "/")
- if len(pathParts) > 0 && pathParts[0] == "" {
- pathParts = pathParts[1:]
- }
- if idx < 0 {
- return "", false
- }
- if idx >= len(pathParts) {
- return "", true
- }
- return pathParts[idx], true
- }
-
- // middleware variables
- if strings.HasPrefix(key, varsReplPrefix) {
- varName := key[len(varsReplPrefix):]
- tbl := req.Context().Value(VarsCtxKey).(map[string]interface{})
- raw := tbl[varName]
- // variables can be dynamic, so always return true
- // even when it may not be set; treat as empty then
- return raw, true
- }
- }
-
- if w != nil {
- // response header fields
- if strings.HasPrefix(key, respHeaderReplPrefix) {
- field := key[len(respHeaderReplPrefix):]
- vals := w.Header()[textproto.CanonicalMIMEHeaderKey(field)]
- // always return true, since the header field might
- // be present only in some responses
- return strings.Join(vals, ","), true
- }
- }
-
- return nil, false
- }
-
- repl.Map(httpVars)
-}
-
-func getReqTLSReplacement(req *http.Request, key string) (interface{}, bool) {
- if req == nil || req.TLS == nil {
- return nil, false
- }
-
- if len(key) < len(reqTLSReplPrefix) {
- return nil, false
- }
-
- field := strings.ToLower(key[len(reqTLSReplPrefix):])
-
- if strings.HasPrefix(field, "client.") {
- cert := getTLSPeerCert(req.TLS)
- if cert == nil {
- return nil, false
- }
-
- // subject alternate names (SANs)
- if strings.HasPrefix(field, "client.san.") {
- field = field[len("client.san."):]
- var fieldName string
- var fieldValue interface{}
- switch {
- case strings.HasPrefix(field, "dns_names"):
- fieldName = "dns_names"
- fieldValue = cert.DNSNames
- case strings.HasPrefix(field, "emails"):
- fieldName = "emails"
- fieldValue = cert.EmailAddresses
- case strings.HasPrefix(field, "ips"):
- fieldName = "ips"
- fieldValue = cert.IPAddresses
- case strings.HasPrefix(field, "uris"):
- fieldName = "uris"
- fieldValue = cert.URIs
- default:
- return nil, false
- }
- field = field[len(fieldName):]
-
- // if no index was specified, return the whole list
- if field == "" {
- return fieldValue, true
- }
- if len(field) < 2 || field[0] != '.' {
- return nil, false
- }
- field = field[1:] // trim '.' between field name and index
-
- // get the numeric index
- idx, err := strconv.Atoi(field)
- if err != nil || idx < 0 {
- return nil, false
- }
-
- // access the indexed element and return it
- switch v := fieldValue.(type) {
- case []string:
- if idx >= len(v) {
- return nil, true
- }
- return v[idx], true
- case []net.IP:
- if idx >= len(v) {
- return nil, true
- }
- return v[idx], true
- case []*url.URL:
- if idx >= len(v) {
- return nil, true
- }
- return v[idx], true
- }
- }
-
- switch field {
- case "client.fingerprint":
- return fmt.Sprintf("%x", sha256.Sum256(cert.Raw)), true
- case "client.public_key", "client.public_key_sha256":
- if cert.PublicKey == nil {
- return nil, true
- }
- pubKeyBytes, err := marshalPublicKey(cert.PublicKey)
- if err != nil {
- return nil, true
- }
- if strings.HasSuffix(field, "_sha256") {
- return fmt.Sprintf("%x", sha256.Sum256(pubKeyBytes)), true
- }
- return fmt.Sprintf("%x", pubKeyBytes), true
- case "client.issuer":
- return cert.Issuer, true
- case "client.serial":
- return cert.SerialNumber, true
- case "client.subject":
- return cert.Subject, true
- case "client.certificate_pem":
- block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
- return pem.EncodeToMemory(&block), true
- default:
- return nil, false
- }
- }
-
- switch field {
- case "version":
- return caddytls.ProtocolName(req.TLS.Version), true
- case "cipher_suite":
- return tls.CipherSuiteName(req.TLS.CipherSuite), true
- case "resumed":
- return req.TLS.DidResume, true
- case "proto":
- return req.TLS.NegotiatedProtocol, true
- case "proto_mutual":
- // req.TLS.NegotiatedProtocolIsMutual is deprecated - it's always true.
- return true, true
- case "server_name":
- return req.TLS.ServerName, true
- }
- return nil, false
-}
-
-// marshalPublicKey returns the byte encoding of pubKey.
-func marshalPublicKey(pubKey interface{}) ([]byte, error) {
- switch key := pubKey.(type) {
- case *rsa.PublicKey:
- return asn1.Marshal(key)
- case *ecdsa.PublicKey:
- return elliptic.Marshal(key.Curve, key.X, key.Y), nil
- case ed25519.PublicKey:
- return key, nil
- }
- return nil, fmt.Errorf("unrecognized public key type: %T", pubKey)
-}
-
-// getTLSPeerCert retrieves the first peer certificate from a TLS session.
-// Returns nil if no peer cert is in use.
-func getTLSPeerCert(cs *tls.ConnectionState) *x509.Certificate {
- if len(cs.PeerCertificates) == 0 {
- return nil
- }
- return cs.PeerCertificates[0]
-}
-
-const (
- reqCookieReplPrefix = "http.request.cookie."
- reqHeaderReplPrefix = "http.request.header."
- reqHostLabelsReplPrefix = "http.request.host.labels."
- reqTLSReplPrefix = "http.request.tls."
- reqURIPathReplPrefix = "http.request.uri.path."
- reqURIQueryReplPrefix = "http.request.uri.query."
- respHeaderReplPrefix = "http.response.header."
- varsReplPrefix = "http.vars."
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsematchers.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsematchers.go
deleted file mode 100644
index d9ad8480..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsematchers.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "net/http"
- "strconv"
- "strings"
-
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
-)
-
-// ResponseMatcher is a type which can determine if an
-// HTTP response matches some criteria.
-type ResponseMatcher struct {
- // If set, one of these status codes would be required.
- // A one-digit status can be used to represent all codes
- // in that class (e.g. 3 for all 3xx codes).
- StatusCode []int `json:"status_code,omitempty"`
-
- // If set, each header specified must be one of the
- // specified values, with the same logic used by the
- // request header matcher.
- Headers http.Header `json:"headers,omitempty"`
-}
-
-// Match returns true if the given statusCode and hdr match rm.
-func (rm ResponseMatcher) Match(statusCode int, hdr http.Header) bool {
- if !rm.matchStatusCode(statusCode) {
- return false
- }
- return matchHeaders(hdr, rm.Headers, "", nil)
-}
-
-func (rm ResponseMatcher) matchStatusCode(statusCode int) bool {
- if rm.StatusCode == nil {
- return true
- }
- for _, code := range rm.StatusCode {
- if StatusCodeMatches(statusCode, code) {
- return true
- }
- }
- return false
-}
-
-// ParseNamedResponseMatcher parses the tokens of a named response matcher.
-//
-// @name {
-// header []
-// status
-// }
-//
-// Or, single line syntax:
-//
-// @name [header []] | [status ]
-//
-func ParseNamedResponseMatcher(d *caddyfile.Dispenser, matchers map[string]ResponseMatcher) error {
- for d.Next() {
- definitionName := d.Val()
-
- if _, ok := matchers[definitionName]; ok {
- return d.Errf("matcher is defined more than once: %s", definitionName)
- }
-
- matcher := ResponseMatcher{}
- for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
- switch d.Val() {
- case "header":
- if matcher.Headers == nil {
- matcher.Headers = http.Header{}
- }
-
- // reuse the header request matcher's unmarshaler
- headerMatcher := MatchHeader(matcher.Headers)
- err := headerMatcher.UnmarshalCaddyfile(d.NewFromNextSegment())
- if err != nil {
- return err
- }
-
- matcher.Headers = http.Header(headerMatcher)
- case "status":
- if matcher.StatusCode == nil {
- matcher.StatusCode = []int{}
- }
-
- args := d.RemainingArgs()
- if len(args) == 0 {
- return d.ArgErr()
- }
-
- for _, arg := range args {
- if len(arg) == 3 && strings.HasSuffix(arg, "xx") {
- arg = arg[:1]
- }
- statusNum, err := strconv.Atoi(arg)
- if err != nil {
- return d.Errf("bad status value '%s': %v", arg, err)
- }
- matcher.StatusCode = append(matcher.StatusCode, statusNum)
- }
- default:
- return d.Errf("unrecognized response matcher %s", d.Val())
- }
- }
-
- matchers[definitionName] = matcher
- }
- return nil
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsewriter.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsewriter.go
deleted file mode 100644
index 0ffb9320..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsewriter.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "net"
- "net/http"
-)
-
-// ResponseWriterWrapper wraps an underlying ResponseWriter and
-// promotes its Pusher/Flusher/Hijacker methods as well. To use
-// this type, embed a pointer to it within your own struct type
-// that implements the http.ResponseWriter interface, then call
-// methods on the embedded value. You can make sure your type
-// wraps correctly by asserting that it implements the
-// HTTPInterfaces interface.
-type ResponseWriterWrapper struct {
- http.ResponseWriter
-}
-
-// Hijack implements http.Hijacker. It simply calls the underlying
-// ResponseWriter's Hijack method if there is one, or returns
-// ErrNotImplemented otherwise.
-func (rww *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- if hj, ok := rww.ResponseWriter.(http.Hijacker); ok {
- return hj.Hijack()
- }
- return nil, nil, ErrNotImplemented
-}
-
-// Flush implements http.Flusher. It simply calls the underlying
-// ResponseWriter's Flush method if there is one.
-func (rww *ResponseWriterWrapper) Flush() {
- if f, ok := rww.ResponseWriter.(http.Flusher); ok {
- f.Flush()
- }
-}
-
-// Push implements http.Pusher. It simply calls the underlying
-// ResponseWriter's Push method if there is one, or returns
-// ErrNotImplemented otherwise.
-func (rww *ResponseWriterWrapper) Push(target string, opts *http.PushOptions) error {
- if pusher, ok := rww.ResponseWriter.(http.Pusher); ok {
- return pusher.Push(target, opts)
- }
- return ErrNotImplemented
-}
-
-// HTTPInterfaces mix all the interfaces that middleware ResponseWriters need to support.
-type HTTPInterfaces interface {
- http.ResponseWriter
- http.Pusher
- http.Flusher
- http.Hijacker
-}
-
-// ErrNotImplemented is returned when an underlying
-// ResponseWriter does not implement the required method.
-var ErrNotImplemented = fmt.Errorf("method not implemented")
-
-type responseRecorder struct {
- *ResponseWriterWrapper
- statusCode int
- buf *bytes.Buffer
- shouldBuffer ShouldBufferFunc
- size int
- wroteHeader bool
- stream bool
-}
-
-// NewResponseRecorder returns a new ResponseRecorder that can be
-// used instead of a standard http.ResponseWriter. The recorder is
-// useful for middlewares which need to buffer a response and
-// potentially process its entire body before actually writing the
-// response to the underlying writer. Of course, buffering the entire
-// body has a memory overhead, but sometimes there is no way to avoid
-// buffering the whole response, hence the existence of this type.
-// Still, if at all practical, handlers should strive to stream
-// responses by wrapping Write and WriteHeader methods instead of
-// buffering whole response bodies.
-//
-// Buffering is actually optional. The shouldBuffer function will
-// be called just before the headers are written. If it returns
-// true, the headers and body will be buffered by this recorder
-// and not written to the underlying writer; if false, the headers
-// will be written immediately and the body will be streamed out
-// directly to the underlying writer. If shouldBuffer is nil,
-// the response will never be buffered and will always be streamed
-// directly to the writer.
-//
-// You can know if shouldBuffer returned true by calling Buffered().
-//
-// The provided buffer buf should be obtained from a pool for best
-// performance (see the sync.Pool type).
-//
-// Proper usage of a recorder looks like this:
-//
-// rec := caddyhttp.NewResponseRecorder(w, buf, shouldBuffer)
-// err := next.ServeHTTP(rec, req)
-// if err != nil {
-// return err
-// }
-// if !rec.Buffered() {
-// return nil
-// }
-// // process the buffered response here
-//
-// The header map is not buffered; i.e. the ResponseRecorder's Header()
-// method returns the same header map of the underlying ResponseWriter.
-// This is a crucial design decision to allow HTTP trailers to be
-// flushed properly (https://github.com/caddyserver/caddy/issues/3236).
-//
-// Once you are ready to write the response, there are two ways you can
-// do it. The easier way is to have the recorder do it:
-//
-// rec.WriteResponse()
-//
-// This writes the recorded response headers as well as the buffered body.
-// Or, you may wish to do it yourself, especially if you manipulated the
-// buffered body. First you will need to write the headers with the
-// recorded status code, then write the body (this example writes the
-// recorder's body buffer, but you might have your own body to write
-// instead):
-//
-// w.WriteHeader(rec.Status())
-// io.Copy(w, rec.Buffer())
-//
-func NewResponseRecorder(w http.ResponseWriter, buf *bytes.Buffer, shouldBuffer ShouldBufferFunc) ResponseRecorder {
- return &responseRecorder{
- ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: w},
- buf: buf,
- shouldBuffer: shouldBuffer,
- }
-}
-
-func (rr *responseRecorder) WriteHeader(statusCode int) {
- if rr.wroteHeader {
- return
- }
- rr.statusCode = statusCode
- rr.wroteHeader = true
-
- // decide whether we should buffer the response
- if rr.shouldBuffer == nil {
- rr.stream = true
- } else {
- rr.stream = !rr.shouldBuffer(rr.statusCode, rr.ResponseWriterWrapper.Header())
- }
-
- // if not buffered, immediately write header
- if rr.stream {
- rr.ResponseWriterWrapper.WriteHeader(rr.statusCode)
- }
-}
-
-func (rr *responseRecorder) Write(data []byte) (int, error) {
- rr.WriteHeader(http.StatusOK)
- var n int
- var err error
- if rr.stream {
- n, err = rr.ResponseWriterWrapper.Write(data)
- } else {
- n, err = rr.buf.Write(data)
- }
- if err == nil {
- rr.size += n
- }
- return n, err
-}
-
-// Status returns the status code that was written, if any.
-func (rr *responseRecorder) Status() int {
- return rr.statusCode
-}
-
-// Size returns the number of bytes written,
-// not including the response headers.
-func (rr *responseRecorder) Size() int {
- return rr.size
-}
-
-// Buffer returns the body buffer that rr was created with.
-// You should still have your original pointer, though.
-func (rr *responseRecorder) Buffer() *bytes.Buffer {
- return rr.buf
-}
-
-// Buffered returns whether rr has decided to buffer the response.
-func (rr *responseRecorder) Buffered() bool {
- return !rr.stream
-}
-
-func (rr *responseRecorder) WriteResponse() error {
- if rr.stream {
- return nil
- }
- if rr.statusCode == 0 {
- // could happen if no handlers actually wrote anything,
- // and this prevents a panic; status must be > 0
- rr.statusCode = http.StatusOK
- }
- rr.ResponseWriterWrapper.WriteHeader(rr.statusCode)
- _, err := io.Copy(rr.ResponseWriterWrapper, rr.buf)
- return err
-}
-
-// ResponseRecorder is a http.ResponseWriter that records
-// responses instead of writing them to the client. See
-// docs for NewResponseRecorder for proper usage.
-type ResponseRecorder interface {
- HTTPInterfaces
- Status() int
- Buffer() *bytes.Buffer
- Buffered() bool
- Size() int
- WriteResponse() error
-}
-
-// ShouldBufferFunc is a function that returns true if the
-// response should be buffered, given the pending HTTP status
-// code and response headers.
-type ShouldBufferFunc func(status int, header http.Header) bool
-
-// Interface guards
-var (
- _ HTTPInterfaces = (*ResponseWriterWrapper)(nil)
- _ ResponseRecorder = (*responseRecorder)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/admin.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/admin.go
deleted file mode 100644
index 25685a3a..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/admin.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reverseproxy
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
-
- "github.com/caddyserver/caddy/v2"
-)
-
-func init() {
- caddy.RegisterModule(adminUpstreams{})
-}
-
-// adminUpstreams is a module that provides the
-// /reverse_proxy/upstreams endpoint for the Caddy admin
-// API. This allows for checking the health of configured
-// reverse proxy upstreams in the pool.
-type adminUpstreams struct{}
-
-// upstreamResults holds the status of a particular upstream
-type upstreamStatus struct {
- Address string `json:"address"`
- Healthy bool `json:"healthy"`
- NumRequests int `json:"num_requests"`
- Fails int `json:"fails"`
-}
-
-// CaddyModule returns the Caddy module information.
-func (adminUpstreams) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "admin.api.reverse_proxy",
- New: func() caddy.Module { return new(adminUpstreams) },
- }
-}
-
-// Routes returns a route for the /reverse_proxy/upstreams endpoint.
-func (al adminUpstreams) Routes() []caddy.AdminRoute {
- return []caddy.AdminRoute{
- {
- Pattern: "/reverse_proxy/upstreams",
- Handler: caddy.AdminHandlerFunc(al.handleUpstreams),
- },
- }
-}
-
-// handleUpstreams reports the status of the reverse proxy
-// upstream pool.
-func (adminUpstreams) handleUpstreams(w http.ResponseWriter, r *http.Request) error {
- if r.Method != http.MethodGet {
- return caddy.APIError{
- HTTPStatus: http.StatusMethodNotAllowed,
- Err: fmt.Errorf("method not allowed"),
- }
- }
-
- // Prep for a JSON response
- w.Header().Set("Content-Type", "application/json")
- enc := json.NewEncoder(w)
-
- // Collect the results to respond with
- results := []upstreamStatus{}
-
- // Iterate over the upstream pool (needs to be fast)
- var rangeErr error
- hosts.Range(func(key, val interface{}) bool {
- address, ok := key.(string)
- if !ok {
- rangeErr = caddy.APIError{
- HTTPStatus: http.StatusInternalServerError,
- Err: fmt.Errorf("could not type assert upstream address"),
- }
- return false
- }
-
- upstream, ok := val.(*upstreamHost)
- if !ok {
- rangeErr = caddy.APIError{
- HTTPStatus: http.StatusInternalServerError,
- Err: fmt.Errorf("could not type assert upstream struct"),
- }
- return false
- }
-
- results = append(results, upstreamStatus{
- Address: address,
- Healthy: !upstream.Unhealthy(),
- NumRequests: upstream.NumRequests(),
- Fails: upstream.Fails(),
- })
- return true
- })
-
- // If an error happened during the range, return it
- if rangeErr != nil {
- return rangeErr
- }
-
- err := enc.Encode(results)
- if err != nil {
- return caddy.APIError{
- HTTPStatus: http.StatusInternalServerError,
- Err: err,
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/caddyfile.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/caddyfile.go
deleted file mode 100644
index c7f555f8..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/caddyfile.go
+++ /dev/null
@@ -1,1033 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reverseproxy
-
-import (
- "log"
- "net"
- "net/http"
- "net/url"
- "reflect"
- "strconv"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers"
- "github.com/dustin/go-humanize"
-)
-
-func init() {
- httpcaddyfile.RegisterHandlerDirective("reverse_proxy", parseCaddyfile)
-}
-
-func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
- rp := new(Handler)
- err := rp.UnmarshalCaddyfile(h.Dispenser)
- if err != nil {
- return nil, err
- }
- err = rp.FinalizeUnmarshalCaddyfile(h)
- if err != nil {
- return nil, err
- }
- return rp, nil
-}
-
-// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
-//
-// reverse_proxy [] [] {
-// # upstreams
-// to
-//
-// # load balancing
-// lb_policy []
-// lb_try_duration
-// lb_try_interval
-//
-// # active health checking
-// health_uri
-// health_port
-// health_interval
-// health_timeout
-// health_status
-// health_body
-// health_headers {
-// []
-// }
-//
-// # passive health checking
-// max_fails
-// fail_duration
-// max_conns
-// unhealthy_status
-// unhealthy_latency
-//
-// # streaming
-// flush_interval
-// buffer_requests
-//
-// # header manipulation
-// header_up [+|-] [ []]
-// header_down [+|-] [ []]
-//
-// # round trip
-// transport {
-// ...
-// }
-//
-// # handle responses
-// @name {
-// status
-// header []
-// }
-// handle_response [] [status_code] {
-//
-// }
-// }
-//
-// Proxy upstream addresses should be network dial addresses such
-// as `host:port`, or a URL such as `scheme://host:port`. Scheme
-// and port may be inferred from other parts of the address/URL; if
-// either are missing, defaults to HTTP.
-//
-// The FinalizeUnmarshalCaddyfile method should be called after this
-// to finalize parsing of "handle_response" blocks, if possible.
-func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- // currently, all backends must use the same scheme/protocol (the
- // underlying JSON does not yet support per-backend transports)
- var commonScheme string
-
- // we'll wait until the very end of parsing before
- // validating and encoding the transport
- var transport http.RoundTripper
- var transportModuleName string
-
- // collect the response matchers defined as subdirectives
- // prefixed with "@" for use with "handle_response" blocks
- h.responseMatchers = make(map[string]caddyhttp.ResponseMatcher)
-
- // TODO: the logic in this function is kind of sensitive, we need
- // to write tests before making any more changes to it
- upstreamDialAddress := func(upstreamAddr string) (string, error) {
- var network, scheme, host, port string
-
- if strings.Contains(upstreamAddr, "://") {
- // we get a parsing error if a placeholder is specified
- // so we return a more user-friendly error message instead
- // to explain what to do instead
- if strings.Contains(upstreamAddr, "{") {
- return "", d.Err("due to parsing difficulties, placeholders are not allowed when an upstream address contains a scheme")
- }
-
- toURL, err := url.Parse(upstreamAddr)
- if err != nil {
- return "", d.Errf("parsing upstream URL: %v", err)
- }
-
- // there is currently no way to perform a URL rewrite between choosing
- // a backend and proxying to it, so we cannot allow extra components
- // in backend URLs
- if toURL.Path != "" || toURL.RawQuery != "" || toURL.Fragment != "" {
- return "", d.Err("for now, URLs for proxy upstreams only support scheme, host, and port components")
- }
-
- // ensure the port and scheme aren't in conflict
- urlPort := toURL.Port()
- if toURL.Scheme == "http" && urlPort == "443" {
- return "", d.Err("upstream address has conflicting scheme (http://) and port (:443, the HTTPS port)")
- }
- if toURL.Scheme == "https" && urlPort == "80" {
- return "", d.Err("upstream address has conflicting scheme (https://) and port (:80, the HTTP port)")
- }
- if toURL.Scheme == "h2c" && urlPort == "443" {
- return "", d.Err("upstream address has conflicting scheme (h2c://) and port (:443, the HTTPS port)")
- }
-
- // if port is missing, attempt to infer from scheme
- if toURL.Port() == "" {
- var toPort string
- switch toURL.Scheme {
- case "", "http", "h2c":
- toPort = "80"
- case "https":
- toPort = "443"
- }
- toURL.Host = net.JoinHostPort(toURL.Hostname(), toPort)
- }
-
- scheme, host, port = toURL.Scheme, toURL.Hostname(), toURL.Port()
- } else {
- // extract network manually, since caddy.ParseNetworkAddress() will always add one
- if idx := strings.Index(upstreamAddr, "/"); idx >= 0 {
- network = strings.ToLower(strings.TrimSpace(upstreamAddr[:idx]))
- upstreamAddr = upstreamAddr[idx+1:]
- }
- var err error
- host, port, err = net.SplitHostPort(upstreamAddr)
- if err != nil {
- host = upstreamAddr
- }
- // we can assume a port if only a hostname is specified, but use of a
- // placeholder without a port likely means a port will be filled in
- if port == "" && !strings.Contains(host, "{") {
- port = "80"
- }
- }
-
- // the underlying JSON does not yet support different
- // transports (protocols or schemes) to each backend,
- // so we remember the last one we see and compare them
- if commonScheme != "" && scheme != commonScheme {
- return "", d.Errf("for now, all proxy upstreams must use the same scheme (transport protocol); expecting '%s://' but got '%s://'",
- commonScheme, scheme)
- }
- commonScheme = scheme
-
- // for simplest possible config, we only need to include
- // the network portion if the user specified one
- if network != "" {
- return caddy.JoinNetworkAddress(network, host, port), nil
- }
-
- // if the host is a placeholder, then we don't want to join with an empty port,
- // because that would just append an extra ':' at the end of the address.
- if port == "" && strings.Contains(host, "{") {
- return host, nil
- }
-
- return net.JoinHostPort(host, port), nil
- }
-
- // appendUpstream creates an upstream for address and adds
- // it to the list. If the address starts with "srv+" it is
- // treated as a SRV-based upstream, and any port will be
- // dropped.
- appendUpstream := func(address string) error {
- isSRV := strings.HasPrefix(address, "srv+")
- if isSRV {
- address = strings.TrimPrefix(address, "srv+")
- }
- dialAddr, err := upstreamDialAddress(address)
- if err != nil {
- return err
- }
- if isSRV {
- if host, _, err := net.SplitHostPort(dialAddr); err == nil {
- dialAddr = host
- }
- h.Upstreams = append(h.Upstreams, &Upstream{LookupSRV: dialAddr})
- } else {
- h.Upstreams = append(h.Upstreams, &Upstream{Dial: dialAddr})
- }
- return nil
- }
-
- for d.Next() {
- for _, up := range d.RemainingArgs() {
- err := appendUpstream(up)
- if err != nil {
- return err
- }
- }
-
- for d.NextBlock(0) {
- // if the subdirective has an "@" prefix then we
- // parse it as a response matcher for use with "handle_response"
- if strings.HasPrefix(d.Val(), matcherPrefix) {
- err := caddyhttp.ParseNamedResponseMatcher(d.NewFromNextSegment(), h.responseMatchers)
- if err != nil {
- return err
- }
- continue
- }
-
- switch d.Val() {
- case "to":
- args := d.RemainingArgs()
- if len(args) == 0 {
- return d.ArgErr()
- }
- for _, up := range args {
- err := appendUpstream(up)
- if err != nil {
- return err
- }
- }
-
- case "lb_policy":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.LoadBalancing != nil && h.LoadBalancing.SelectionPolicyRaw != nil {
- return d.Err("load balancing selection policy already specified")
- }
- name := d.Val()
- modID := "http.reverse_proxy.selection_policies." + name
- unm, err := caddyfile.UnmarshalModule(d, modID)
- if err != nil {
- return err
- }
- sel, ok := unm.(Selector)
- if !ok {
- return d.Errf("module %s (%T) is not a reverseproxy.Selector", modID, unm)
- }
- if h.LoadBalancing == nil {
- h.LoadBalancing = new(LoadBalancing)
- }
- h.LoadBalancing.SelectionPolicyRaw = caddyconfig.JSONModuleObject(sel, "policy", name, nil)
-
- case "lb_try_duration":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.LoadBalancing == nil {
- h.LoadBalancing = new(LoadBalancing)
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad duration value %s: %v", d.Val(), err)
- }
- h.LoadBalancing.TryDuration = caddy.Duration(dur)
-
- case "lb_try_interval":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.LoadBalancing == nil {
- h.LoadBalancing = new(LoadBalancing)
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad interval value '%s': %v", d.Val(), err)
- }
- h.LoadBalancing.TryInterval = caddy.Duration(dur)
-
- case "health_uri":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Active == nil {
- h.HealthChecks.Active = new(ActiveHealthChecks)
- }
- h.HealthChecks.Active.URI = d.Val()
-
- case "health_path":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Active == nil {
- h.HealthChecks.Active = new(ActiveHealthChecks)
- }
- h.HealthChecks.Active.Path = d.Val()
- caddy.Log().Named("config.adapter.caddyfile").Warn("the 'health_path' subdirective is deprecated, please use 'health_uri' instead!")
-
- case "health_port":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Active == nil {
- h.HealthChecks.Active = new(ActiveHealthChecks)
- }
- portNum, err := strconv.Atoi(d.Val())
- if err != nil {
- return d.Errf("bad port number '%s': %v", d.Val(), err)
- }
- h.HealthChecks.Active.Port = portNum
-
- case "health_headers":
- healthHeaders := make(http.Header)
- for d.Next() {
- for d.NextBlock(0) {
- key := d.Val()
- values := d.RemainingArgs()
- if len(values) == 0 {
- values = append(values, "")
- }
- healthHeaders[key] = values
- }
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Active == nil {
- h.HealthChecks.Active = new(ActiveHealthChecks)
- }
- h.HealthChecks.Active.Headers = healthHeaders
-
- case "health_interval":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Active == nil {
- h.HealthChecks.Active = new(ActiveHealthChecks)
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad interval value %s: %v", d.Val(), err)
- }
- h.HealthChecks.Active.Interval = caddy.Duration(dur)
-
- case "health_timeout":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Active == nil {
- h.HealthChecks.Active = new(ActiveHealthChecks)
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad timeout value %s: %v", d.Val(), err)
- }
- h.HealthChecks.Active.Timeout = caddy.Duration(dur)
-
- case "health_status":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Active == nil {
- h.HealthChecks.Active = new(ActiveHealthChecks)
- }
- val := d.Val()
- if len(val) == 3 && strings.HasSuffix(val, "xx") {
- val = val[:1]
- }
- statusNum, err := strconv.Atoi(val)
- if err != nil {
- return d.Errf("bad status value '%s': %v", d.Val(), err)
- }
- h.HealthChecks.Active.ExpectStatus = statusNum
-
- case "health_body":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Active == nil {
- h.HealthChecks.Active = new(ActiveHealthChecks)
- }
- h.HealthChecks.Active.ExpectBody = d.Val()
-
- case "max_fails":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Passive == nil {
- h.HealthChecks.Passive = new(PassiveHealthChecks)
- }
- maxFails, err := strconv.Atoi(d.Val())
- if err != nil {
- return d.Errf("invalid maximum fail count '%s': %v", d.Val(), err)
- }
- h.HealthChecks.Passive.MaxFails = maxFails
-
- case "fail_duration":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Passive == nil {
- h.HealthChecks.Passive = new(PassiveHealthChecks)
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad duration value '%s': %v", d.Val(), err)
- }
- h.HealthChecks.Passive.FailDuration = caddy.Duration(dur)
-
- case "unhealthy_request_count":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Passive == nil {
- h.HealthChecks.Passive = new(PassiveHealthChecks)
- }
- maxConns, err := strconv.Atoi(d.Val())
- if err != nil {
- return d.Errf("invalid maximum connection count '%s': %v", d.Val(), err)
- }
- h.HealthChecks.Passive.UnhealthyRequestCount = maxConns
-
- case "unhealthy_status":
- args := d.RemainingArgs()
- if len(args) == 0 {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Passive == nil {
- h.HealthChecks.Passive = new(PassiveHealthChecks)
- }
- for _, arg := range args {
- if len(arg) == 3 && strings.HasSuffix(arg, "xx") {
- arg = arg[:1]
- }
- statusNum, err := strconv.Atoi(arg)
- if err != nil {
- return d.Errf("bad status value '%s': %v", d.Val(), err)
- }
- h.HealthChecks.Passive.UnhealthyStatus = append(h.HealthChecks.Passive.UnhealthyStatus, statusNum)
- }
-
- case "unhealthy_latency":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.HealthChecks == nil {
- h.HealthChecks = new(HealthChecks)
- }
- if h.HealthChecks.Passive == nil {
- h.HealthChecks.Passive = new(PassiveHealthChecks)
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad duration value '%s': %v", d.Val(), err)
- }
- h.HealthChecks.Passive.UnhealthyLatency = caddy.Duration(dur)
-
- case "flush_interval":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if fi, err := strconv.Atoi(d.Val()); err == nil {
- h.FlushInterval = caddy.Duration(fi)
- } else {
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad duration value '%s': %v", d.Val(), err)
- }
- h.FlushInterval = caddy.Duration(dur)
- }
-
- case "buffer_requests":
- if d.NextArg() {
- return d.ArgErr()
- }
- h.BufferRequests = true
-
- case "buffer_responses":
- if d.NextArg() {
- return d.ArgErr()
- }
- h.BufferResponses = true
-
- case "max_buffer_size":
- if !d.NextArg() {
- return d.ArgErr()
- }
- size, err := strconv.Atoi(d.Val())
- if err != nil {
- return d.Errf("invalid size (bytes): %s", d.Val())
- }
- if d.NextArg() {
- return d.ArgErr()
- }
- h.MaxBufferSize = int64(size)
-
- case "header_up":
- var err error
-
- if h.Headers == nil {
- h.Headers = new(headers.Handler)
- }
- if h.Headers.Request == nil {
- h.Headers.Request = new(headers.HeaderOps)
- }
- args := d.RemainingArgs()
-
- switch len(args) {
- case 1:
- err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], "", "")
- case 2:
- // some lint checks, I guess
- if strings.EqualFold(args[0], "host") && (args[1] == "{hostport}" || args[1] == "{http.request.hostport}") {
- log.Printf("[WARNING] Unnecessary header_up ('Host' field): the reverse proxy's default behavior is to pass headers to the upstream")
- }
- if strings.EqualFold(args[0], "x-forwarded-proto") && (args[1] == "{scheme}" || args[1] == "{http.request.scheme}") {
- log.Printf("[WARNING] Unnecessary header_up ('X-Forwarded-Proto' field): the reverse proxy's default behavior is to pass headers to the upstream")
- }
- err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], args[1], "")
- case 3:
- err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], args[1], args[2])
- default:
- return d.ArgErr()
- }
-
- if err != nil {
- return d.Err(err.Error())
- }
-
- case "header_down":
- var err error
-
- if h.Headers == nil {
- h.Headers = new(headers.Handler)
- }
- if h.Headers.Response == nil {
- h.Headers.Response = &headers.RespHeaderOps{
- HeaderOps: new(headers.HeaderOps),
- }
- }
- args := d.RemainingArgs()
- switch len(args) {
- case 1:
- err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], "", "")
- case 2:
- err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], args[1], "")
- case 3:
- err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], args[1], args[2])
- default:
- return d.ArgErr()
- }
-
- if err != nil {
- return d.Err(err.Error())
- }
-
- case "transport":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.TransportRaw != nil {
- return d.Err("transport already specified")
- }
- transportModuleName = d.Val()
- modID := "http.reverse_proxy.transport." + transportModuleName
- unm, err := caddyfile.UnmarshalModule(d, modID)
- if err != nil {
- return err
- }
- rt, ok := unm.(http.RoundTripper)
- if !ok {
- return d.Errf("module %s (%T) is not a RoundTripper", modID, unm)
- }
- transport = rt
-
- case "handle_response":
- // delegate the parsing of handle_response to the caller,
- // since we need the httpcaddyfile.Helper to parse subroutes.
- // See h.FinalizeUnmarshalCaddyfile
- h.handleResponseSegments = append(h.handleResponseSegments, d.NewFromNextSegment())
-
- default:
- return d.Errf("unrecognized subdirective %s", d.Val())
- }
- }
- }
-
- // if the scheme inferred from the backends' addresses is
- // HTTPS, we will need a non-nil transport to enable TLS,
- // or if H2C, to set the transport versions.
- if (commonScheme == "https" || commonScheme == "h2c") && transport == nil {
- transport = new(HTTPTransport)
- transportModuleName = "http"
- }
-
- // verify transport configuration, and finally encode it
- if transport != nil {
- if te, ok := transport.(TLSTransport); ok {
- if commonScheme == "https" && !te.TLSEnabled() {
- err := te.EnableTLS(new(TLSConfig))
- if err != nil {
- return err
- }
- }
- if commonScheme == "http" && te.TLSEnabled() {
- return d.Errf("upstream address scheme is HTTP but transport is configured for HTTP+TLS (HTTPS)")
- }
- if te, ok := transport.(*HTTPTransport); ok && commonScheme == "h2c" {
- te.Versions = []string{"h2c", "2"}
- }
- } else if commonScheme == "https" {
- return d.Errf("upstreams are configured for HTTPS but transport module does not support TLS: %T", transport)
- }
-
- // no need to encode empty default transport
- if !reflect.DeepEqual(transport, new(HTTPTransport)) {
- h.TransportRaw = caddyconfig.JSONModuleObject(transport, "protocol", transportModuleName, nil)
- }
- }
-
- return nil
-}
-
-// FinalizeUnmarshalCaddyfile finalizes the Caddyfile parsing which
-// requires having an httpcaddyfile.Helper to function, to parse subroutes.
-func (h *Handler) FinalizeUnmarshalCaddyfile(helper httpcaddyfile.Helper) error {
- for _, d := range h.handleResponseSegments {
- // consume the "handle_response" token
- d.Next()
-
- var matcher *caddyhttp.ResponseMatcher
- args := d.RemainingArgs()
-
- // the first arg should be a matcher (optional)
- // the second arg should be a status code (optional)
- // any more than that isn't currently supported
- if len(args) > 2 {
- return d.Errf("too many arguments for 'handle_response': %s", args)
- }
-
- // the first arg should always be a matcher.
- // it doesn't really make sense to support status code without a matcher.
- if len(args) > 0 {
- if !strings.HasPrefix(args[0], matcherPrefix) {
- return d.Errf("must use a named response matcher, starting with '@'")
- }
-
- foundMatcher, ok := h.responseMatchers[args[0]]
- if !ok {
- return d.Errf("no named response matcher defined with name '%s'", args[0][1:])
- }
- matcher = &foundMatcher
- }
-
- // a second arg should be a status code, in which case
- // we skip parsing the block for routes
- if len(args) == 2 {
- _, err := strconv.Atoi(args[1])
- if err != nil {
- return d.Errf("bad integer value '%s': %v", args[1], err)
- }
-
- // make sure there's no block, cause it doesn't make sense
- if d.NextBlock(1) {
- return d.Errf("cannot define routes for 'handle_response' when changing the status code")
- }
-
- h.HandleResponse = append(
- h.HandleResponse,
- caddyhttp.ResponseHandler{
- Match: matcher,
- StatusCode: caddyhttp.WeakString(args[1]),
- },
- )
- continue
- }
-
- // parse the block as routes
- handler, err := httpcaddyfile.ParseSegmentAsSubroute(helper.WithDispenser(d.NewFromNextSegment()))
- if err != nil {
- return err
- }
- subroute, ok := handler.(*caddyhttp.Subroute)
- if !ok {
- return helper.Errf("segment was not parsed as a subroute")
- }
- h.HandleResponse = append(
- h.HandleResponse,
- caddyhttp.ResponseHandler{
- Match: matcher,
- Routes: subroute.Routes,
- },
- )
- }
-
- // move the handle_response entries without a matcher to the end.
- // we can't use sort.SliceStable because it will reorder the rest of the
- // entries which may be undesirable because we don't have a good
- // heuristic to use for sorting.
- withoutMatchers := []caddyhttp.ResponseHandler{}
- withMatchers := []caddyhttp.ResponseHandler{}
- for _, hr := range h.HandleResponse {
- if hr.Match == nil {
- withoutMatchers = append(withoutMatchers, hr)
- } else {
- withMatchers = append(withMatchers, hr)
- }
- }
- h.HandleResponse = append(withMatchers, withoutMatchers...)
-
- // clean up the bits we only needed for adapting
- h.handleResponseSegments = nil
- h.responseMatchers = nil
-
- return nil
-}
-
-// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
-//
-// transport http {
-// read_buffer
-// write_buffer
-// max_response_header
-// dial_timeout
-// dial_fallback_delay
-// response_header_timeout
-// expect_continue_timeout
-// tls
-// tls_client_auth |
-// tls_insecure_skip_verify
-// tls_timeout
-// tls_trusted_ca_certs
-// tls_server_name
-// keepalive [off|]
-// keepalive_idle_conns
-// versions
-// compression off
-// max_conns_per_host
-// max_idle_conns_per_host
-// }
-//
-func (h *HTTPTransport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- for d.NextBlock(0) {
- switch d.Val() {
- case "read_buffer":
- if !d.NextArg() {
- return d.ArgErr()
- }
- size, err := humanize.ParseBytes(d.Val())
- if err != nil {
- return d.Errf("invalid read buffer size '%s': %v", d.Val(), err)
- }
- h.ReadBufferSize = int(size)
-
- case "write_buffer":
- if !d.NextArg() {
- return d.ArgErr()
- }
- size, err := humanize.ParseBytes(d.Val())
- if err != nil {
- return d.Errf("invalid write buffer size '%s': %v", d.Val(), err)
- }
- h.WriteBufferSize = int(size)
-
- case "max_response_header":
- if !d.NextArg() {
- return d.ArgErr()
- }
- size, err := humanize.ParseBytes(d.Val())
- if err != nil {
- return d.Errf("invalid max response header size '%s': %v", d.Val(), err)
- }
- h.MaxResponseHeaderSize = int64(size)
-
- case "dial_timeout":
- if !d.NextArg() {
- return d.ArgErr()
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad timeout value '%s': %v", d.Val(), err)
- }
- h.DialTimeout = caddy.Duration(dur)
-
- case "dial_fallback_delay":
- if !d.NextArg() {
- return d.ArgErr()
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad fallback delay value '%s': %v", d.Val(), err)
- }
- h.FallbackDelay = caddy.Duration(dur)
-
- case "response_header_timeout":
- if !d.NextArg() {
- return d.ArgErr()
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad timeout value '%s': %v", d.Val(), err)
- }
- h.ResponseHeaderTimeout = caddy.Duration(dur)
-
- case "expect_continue_timeout":
- if !d.NextArg() {
- return d.ArgErr()
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad timeout value '%s': %v", d.Val(), err)
- }
- h.ExpectContinueTimeout = caddy.Duration(dur)
-
- case "tls_client_auth":
- if h.TLS == nil {
- h.TLS = new(TLSConfig)
- }
- args := d.RemainingArgs()
- switch len(args) {
- case 1:
- h.TLS.ClientCertificateAutomate = args[0]
- case 2:
- h.TLS.ClientCertificateFile = args[0]
- h.TLS.ClientCertificateKeyFile = args[1]
- default:
- return d.ArgErr()
- }
-
- case "tls":
- if h.TLS == nil {
- h.TLS = new(TLSConfig)
- }
-
- case "tls_insecure_skip_verify":
- if d.NextArg() {
- return d.ArgErr()
- }
- if h.TLS == nil {
- h.TLS = new(TLSConfig)
- }
- h.TLS.InsecureSkipVerify = true
-
- case "tls_timeout":
- if !d.NextArg() {
- return d.ArgErr()
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad timeout value '%s': %v", d.Val(), err)
- }
- if h.TLS == nil {
- h.TLS = new(TLSConfig)
- }
- h.TLS.HandshakeTimeout = caddy.Duration(dur)
-
- case "tls_trusted_ca_certs":
- args := d.RemainingArgs()
- if len(args) == 0 {
- return d.ArgErr()
- }
- if h.TLS == nil {
- h.TLS = new(TLSConfig)
- }
- h.TLS.RootCAPEMFiles = args
-
- case "tls_server_name":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.TLS == nil {
- h.TLS = new(TLSConfig)
- }
- h.TLS.ServerName = d.Val()
-
- case "keepalive":
- if !d.NextArg() {
- return d.ArgErr()
- }
- if h.KeepAlive == nil {
- h.KeepAlive = new(KeepAlive)
- }
- if d.Val() == "off" {
- var disable bool
- h.KeepAlive.Enabled = &disable
- break
- }
- dur, err := caddy.ParseDuration(d.Val())
- if err != nil {
- return d.Errf("bad duration value '%s': %v", d.Val(), err)
- }
- h.KeepAlive.IdleConnTimeout = caddy.Duration(dur)
-
- case "keepalive_idle_conns":
- if !d.NextArg() {
- return d.ArgErr()
- }
- num, err := strconv.Atoi(d.Val())
- if err != nil {
- return d.Errf("bad integer value '%s': %v", d.Val(), err)
- }
- if h.KeepAlive == nil {
- h.KeepAlive = new(KeepAlive)
- }
- h.KeepAlive.MaxIdleConns = num
-
- case "keepalive_idle_conns_per_host":
- if !d.NextArg() {
- return d.ArgErr()
- }
- num, err := strconv.Atoi(d.Val())
- if err != nil {
- return d.Errf("bad integer value '%s': %v", d.Val(), err)
- }
- if h.KeepAlive == nil {
- h.KeepAlive = new(KeepAlive)
- }
- h.KeepAlive.MaxIdleConnsPerHost = num
-
- case "versions":
- h.Versions = d.RemainingArgs()
- if len(h.Versions) == 0 {
- return d.ArgErr()
- }
-
- case "compression":
- if d.NextArg() {
- if d.Val() == "off" {
- var disable bool
- h.Compression = &disable
- }
- }
-
- case "max_conns_per_host":
- if !d.NextArg() {
- return d.ArgErr()
- }
- num, err := strconv.Atoi(d.Val())
- if err != nil {
- return d.Errf("bad integer value '%s': %v", d.Val(), err)
- }
- h.MaxConnsPerHost = num
-
- default:
- return d.Errf("unrecognized subdirective %s", d.Val())
- }
- }
- }
- return nil
-}
-
-const matcherPrefix = "@"
-
-// Interface guards
-var (
- _ caddyfile.Unmarshaler = (*Handler)(nil)
- _ caddyfile.Unmarshaler = (*HTTPTransport)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/command.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/command.go
deleted file mode 100644
index 4a6739e5..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/command.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reverseproxy
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "net"
- "net/http"
- "strconv"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
- caddycmd "github.com/caddyserver/caddy/v2/cmd"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers"
-)
-
-func init() {
- caddycmd.RegisterCommand(caddycmd.Command{
- Name: "reverse-proxy",
- Func: cmdReverseProxy,
- Usage: "[--from ] [--to ] [--change-host-header]",
- Short: "A quick and production-ready reverse proxy",
- Long: `
-A simple but production-ready reverse proxy. Useful for quick deployments,
-demos, and development.
-
-Simply shuttles HTTP(S) traffic from the --from address to the --to address.
-
-Unless otherwise specified in the addresses, the --from address will be
-assumed to be HTTPS if a hostname is given, and the --to address will be
-assumed to be HTTP.
-
-If the --from address has a host or IP, Caddy will attempt to serve the
-proxy over HTTPS with a certificate (unless overridden by the HTTP scheme
-or port).
-
-If --change-host-header is set, the Host header on the request will be modified
-from its original incoming value to the address of the upstream. (Otherwise, by
-default, all incoming headers are passed through unmodified.)
-`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("reverse-proxy", flag.ExitOnError)
- fs.String("from", "localhost", "Address on which to receive traffic")
- fs.String("to", "", "Upstream address to which to to proxy traffic")
- fs.Bool("change-host-header", false, "Set upstream Host header to address of upstream")
- fs.Bool("insecure", false, "Disable TLS verification (WARNING: DISABLES SECURITY, WHY ARE YOU EVEN USING TLS?)")
- return fs
- }(),
- })
-}
-
-func cmdReverseProxy(fs caddycmd.Flags) (int, error) {
- caddy.TrapSignals()
-
- from := fs.String("from")
- to := fs.String("to")
- changeHost := fs.Bool("change-host-header")
- insecure := fs.Bool("insecure")
-
- httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
- httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort)
-
- if to == "" {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("--to is required")
- }
-
- // set up the downstream address; assume missing information from given parts
- fromAddr, err := httpcaddyfile.ParseAddress(from)
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid downstream address %s: %v", from, err)
- }
- if fromAddr.Path != "" {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("paths are not allowed: %s", from)
- }
- if fromAddr.Scheme == "" {
- if fromAddr.Port == httpPort || fromAddr.Host == "" {
- fromAddr.Scheme = "http"
- } else {
- fromAddr.Scheme = "https"
- }
- }
- if fromAddr.Port == "" {
- if fromAddr.Scheme == "http" {
- fromAddr.Port = httpPort
- } else if fromAddr.Scheme == "https" {
- fromAddr.Port = httpsPort
- }
- }
-
- // set up the upstream address; assume missing information from given parts
- toAddr, err := httpcaddyfile.ParseAddress(to)
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid upstream address %s: %v", to, err)
- }
- if toAddr.Path != "" {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("paths are not allowed: %s", to)
- }
- if toAddr.Scheme == "" {
- if toAddr.Port == httpsPort {
- toAddr.Scheme = "https"
- } else {
- toAddr.Scheme = "http"
- }
- }
- if toAddr.Port == "" {
- if toAddr.Scheme == "http" {
- toAddr.Port = httpPort
- } else if toAddr.Scheme == "https" {
- toAddr.Port = httpsPort
- }
- }
-
- // proceed to build the handler and server
-
- ht := HTTPTransport{}
- if toAddr.Scheme == "https" {
- ht.TLS = new(TLSConfig)
- if insecure {
- ht.TLS.InsecureSkipVerify = true
- }
- }
-
- handler := Handler{
- TransportRaw: caddyconfig.JSONModuleObject(ht, "protocol", "http", nil),
- Upstreams: UpstreamPool{{Dial: net.JoinHostPort(toAddr.Host, toAddr.Port)}},
- }
-
- if changeHost {
- handler.Headers = &headers.Handler{
- Request: &headers.HeaderOps{
- Set: http.Header{
- "Host": []string{"{http.reverse_proxy.upstream.hostport}"},
- },
- },
- }
- }
-
- route := caddyhttp.Route{
- HandlersRaw: []json.RawMessage{
- caddyconfig.JSONModuleObject(handler, "handler", "reverse_proxy", nil),
- },
- }
- if fromAddr.Host != "" {
- route.MatcherSetsRaw = []caddy.ModuleMap{
- {
- "host": caddyconfig.JSON(caddyhttp.MatchHost{fromAddr.Host}, nil),
- },
- }
- }
-
- server := &caddyhttp.Server{
- Routes: caddyhttp.RouteList{route},
- Listen: []string{":" + fromAddr.Port},
- }
-
- httpApp := caddyhttp.App{
- Servers: map[string]*caddyhttp.Server{"proxy": server},
- }
-
- cfg := &caddy.Config{
- Admin: &caddy.AdminConfig{Disabled: true},
- AppsRaw: caddy.ModuleMap{
- "http": caddyconfig.JSON(httpApp, nil),
- },
- }
-
- err = caddy.Run(cfg)
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
-
- fmt.Printf("Caddy proxying %s -> %s\n", fromAddr.String(), toAddr.String())
-
- select {}
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/healthchecks.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/healthchecks.go
deleted file mode 100644
index 8d5bd77e..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/healthchecks.go
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reverseproxy
-
-import (
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/url"
- "regexp"
- "runtime/debug"
- "strconv"
- "strings"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
- "go.uber.org/zap"
-)
-
-// HealthChecks configures active and passive health checks.
-type HealthChecks struct {
- // Active health checks run in the background on a timer. To
- // minimally enable active health checks, set either path or
- // port (or both).
- Active *ActiveHealthChecks `json:"active,omitempty"`
-
- // Passive health checks monitor proxied requests for errors or timeouts.
- // To minimally enable passive health checks, specify at least an empty
- // config object.
- Passive *PassiveHealthChecks `json:"passive,omitempty"`
-}
-
-// ActiveHealthChecks holds configuration related to active
-// health checks (that is, health checks which occur in a
-// background goroutine independently).
-type ActiveHealthChecks struct {
- // The path to use for health checks.
- // DEPRECATED: Use 'uri' instead.
- Path string `json:"path,omitempty"`
-
- // The URI (path and query) to use for health checks
- URI string `json:"uri,omitempty"`
-
- // The port to use (if different from the upstream's dial
- // address) for health checks.
- Port int `json:"port,omitempty"`
-
- // HTTP headers to set on health check requests.
- Headers http.Header `json:"headers,omitempty"`
-
- // How frequently to perform active health checks (default 30s).
- Interval caddy.Duration `json:"interval,omitempty"`
-
- // How long to wait for a response from a backend before
- // considering it unhealthy (default 5s).
- Timeout caddy.Duration `json:"timeout,omitempty"`
-
- // The maximum response body to download from the backend
- // during a health check.
- MaxSize int64 `json:"max_size,omitempty"`
-
- // The HTTP status code to expect from a healthy backend.
- ExpectStatus int `json:"expect_status,omitempty"`
-
- // A regular expression against which to match the response
- // body of a healthy backend.
- ExpectBody string `json:"expect_body,omitempty"`
-
- uri *url.URL
- httpClient *http.Client
- bodyRegexp *regexp.Regexp
- logger *zap.Logger
-}
-
-// PassiveHealthChecks holds configuration related to passive
-// health checks (that is, health checks which occur during
-// the normal flow of request proxying).
-type PassiveHealthChecks struct {
- // How long to remember a failed request to a backend. A duration > 0
- // enables passive health checking. Default is 0.
- FailDuration caddy.Duration `json:"fail_duration,omitempty"`
-
- // The number of failed requests within the FailDuration window to
- // consider a backend as "down". Must be >= 1; default is 1. Requires
- // that FailDuration be > 0.
- MaxFails int `json:"max_fails,omitempty"`
-
- // Limits the number of simultaneous requests to a backend by
- // marking the backend as "down" if it has this many concurrent
- // requests or more.
- UnhealthyRequestCount int `json:"unhealthy_request_count,omitempty"`
-
- // Count the request as failed if the response comes back with
- // one of these status codes.
- UnhealthyStatus []int `json:"unhealthy_status,omitempty"`
-
- // Count the request as failed if the response takes at least this
- // long to receive.
- UnhealthyLatency caddy.Duration `json:"unhealthy_latency,omitempty"`
-
- logger *zap.Logger
-}
-
-// CircuitBreaker is a type that can act as an early-warning
-// system for the health checker when backends are getting
-// overloaded. This interface is still experimental and is
-// subject to change.
-type CircuitBreaker interface {
- OK() bool
- RecordMetric(statusCode int, latency time.Duration)
-}
-
-// activeHealthChecker runs active health checks on a
-// regular basis and blocks until
-// h.HealthChecks.Active.stopChan is closed.
-func (h *Handler) activeHealthChecker() {
- defer func() {
- if err := recover(); err != nil {
- log.Printf("[PANIC] active health checks: %v\n%s", err, debug.Stack())
- }
- }()
- ticker := time.NewTicker(time.Duration(h.HealthChecks.Active.Interval))
- h.doActiveHealthCheckForAllHosts()
- for {
- select {
- case <-ticker.C:
- h.doActiveHealthCheckForAllHosts()
- case <-h.ctx.Done():
- ticker.Stop()
- return
- }
- }
-}
-
-// doActiveHealthCheckForAllHosts immediately performs a
-// health checks for all upstream hosts configured by h.
-func (h *Handler) doActiveHealthCheckForAllHosts() {
- for _, upstream := range h.Upstreams {
- go func(upstream *Upstream) {
- defer func() {
- if err := recover(); err != nil {
- log.Printf("[PANIC] active health check: %v\n%s", err, debug.Stack())
- }
- }()
-
- networkAddr, err := caddy.NewReplacer().ReplaceOrErr(upstream.Dial, true, true)
- if err != nil {
- h.HealthChecks.Active.logger.Error("invalid use of placeholders in dial address for active health checks",
- zap.String("address", networkAddr),
- zap.Error(err),
- )
- return
- }
- addr, err := caddy.ParseNetworkAddress(networkAddr)
- if err != nil {
- h.HealthChecks.Active.logger.Error("bad network address",
- zap.String("address", networkAddr),
- zap.Error(err),
- )
- return
- }
- if hcp := uint(upstream.activeHealthCheckPort); hcp != 0 {
- if addr.IsUnixNetwork() {
- addr.Network = "tcp" // I guess we just assume TCP since we are using a port??
- }
- addr.StartPort, addr.EndPort = hcp, hcp
- }
- if upstream.LookupSRV == "" && addr.PortRangeSize() != 1 {
- h.HealthChecks.Active.logger.Error("multiple addresses (upstream must map to only one address)",
- zap.String("address", networkAddr),
- )
- return
- }
- hostAddr := addr.JoinHostPort(0)
- dialAddr := hostAddr
- if addr.IsUnixNetwork() {
- // this will be used as the Host portion of a http.Request URL, and
- // paths to socket files would produce an error when creating URL,
- // so use a fake Host value instead; unix sockets are usually local
- hostAddr = "localhost"
- }
- err = h.doActiveHealthCheck(DialInfo{Network: addr.Network, Address: dialAddr}, hostAddr, upstream.Host)
- if err != nil {
- h.HealthChecks.Active.logger.Error("active health check failed",
- zap.String("address", hostAddr),
- zap.Error(err),
- )
- }
- }(upstream)
- }
-}
-
-// doActiveHealthCheck performs a health check to host which
-// can be reached at address hostAddr. The actual address for
-// the request will be built according to active health checker
-// config. The health status of the host will be updated
-// according to whether it passes the health check. An error is
-// returned only if the health check fails to occur or if marking
-// the host's health status fails.
-func (h *Handler) doActiveHealthCheck(dialInfo DialInfo, hostAddr string, host Host) error {
- // create the URL for the request that acts as a health check
- scheme := "http"
- if ht, ok := h.Transport.(TLSTransport); ok && ht.TLSEnabled() {
- // this is kind of a hacky way to know if we should use HTTPS, but whatever
- scheme = "https"
- }
- u := &url.URL{
- Scheme: scheme,
- Host: hostAddr,
- }
-
- // if we have a provisioned uri, use that, otherwise use
- // the deprecated Path option
- if h.HealthChecks.Active.uri != nil {
- u.Path = h.HealthChecks.Active.uri.Path
- u.RawQuery = h.HealthChecks.Active.uri.RawQuery
- } else {
- u.Path = h.HealthChecks.Active.Path
- }
-
- // adjust the port, if configured to be different
- if h.HealthChecks.Active.Port != 0 {
- portStr := strconv.Itoa(h.HealthChecks.Active.Port)
- host, _, err := net.SplitHostPort(hostAddr)
- if err != nil {
- host = hostAddr
- }
- u.Host = net.JoinHostPort(host, portStr)
- }
-
- // attach dialing information to this request
- ctx := h.ctx.Context
- ctx = context.WithValue(ctx, caddy.ReplacerCtxKey, caddy.NewReplacer())
- ctx = context.WithValue(ctx, caddyhttp.VarsCtxKey, map[string]interface{}{
- dialInfoVarKey: dialInfo,
- })
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
- if err != nil {
- return fmt.Errorf("making request: %v", err)
- }
- for key, hdrs := range h.HealthChecks.Active.Headers {
- if strings.ToLower(key) == "host" {
- req.Host = h.HealthChecks.Active.Headers.Get(key)
- } else {
- req.Header[key] = hdrs
- }
- }
-
- // do the request, being careful to tame the response body
- resp, err := h.HealthChecks.Active.httpClient.Do(req)
- if err != nil {
- h.HealthChecks.Active.logger.Info("HTTP request failed",
- zap.String("host", hostAddr),
- zap.Error(err),
- )
- _, err2 := host.SetHealthy(false)
- if err2 != nil {
- return fmt.Errorf("marking unhealthy: %v", err2)
- }
- return nil
- }
- var body io.Reader = resp.Body
- if h.HealthChecks.Active.MaxSize > 0 {
- body = io.LimitReader(body, h.HealthChecks.Active.MaxSize)
- }
- defer func() {
- // drain any remaining body so connection could be re-used
- _, _ = io.Copy(ioutil.Discard, body)
- resp.Body.Close()
- }()
-
- // if status code is outside criteria, mark down
- if h.HealthChecks.Active.ExpectStatus > 0 {
- if !caddyhttp.StatusCodeMatches(resp.StatusCode, h.HealthChecks.Active.ExpectStatus) {
- h.HealthChecks.Active.logger.Info("unexpected status code",
- zap.Int("status_code", resp.StatusCode),
- zap.String("host", hostAddr),
- )
- _, err := host.SetHealthy(false)
- if err != nil {
- return fmt.Errorf("marking unhealthy: %v", err)
- }
- return nil
- }
- } else if resp.StatusCode < 200 || resp.StatusCode >= 400 {
- h.HealthChecks.Active.logger.Info("status code out of tolerances",
- zap.Int("status_code", resp.StatusCode),
- zap.String("host", hostAddr),
- )
- _, err := host.SetHealthy(false)
- if err != nil {
- return fmt.Errorf("marking unhealthy: %v", err)
- }
- return nil
- }
-
- // if body does not match regex, mark down
- if h.HealthChecks.Active.bodyRegexp != nil {
- bodyBytes, err := ioutil.ReadAll(body)
- if err != nil {
- h.HealthChecks.Active.logger.Info("failed to read response body",
- zap.String("host", hostAddr),
- zap.Error(err),
- )
- _, err := host.SetHealthy(false)
- if err != nil {
- return fmt.Errorf("marking unhealthy: %v", err)
- }
- return nil
- }
- if !h.HealthChecks.Active.bodyRegexp.Match(bodyBytes) {
- h.HealthChecks.Active.logger.Info("response body failed expectations",
- zap.String("host", hostAddr),
- )
- _, err := host.SetHealthy(false)
- if err != nil {
- return fmt.Errorf("marking unhealthy: %v", err)
- }
- return nil
- }
- }
-
- // passed health check parameters, so mark as healthy
- swapped, err := host.SetHealthy(true)
- if swapped {
- h.HealthChecks.Active.logger.Info("host is up",
- zap.String("host", hostAddr),
- )
- }
- if err != nil {
- return fmt.Errorf("marking healthy: %v", err)
- }
-
- return nil
-}
-
-// countFailure is used with passive health checks. It
-// remembers 1 failure for upstream for the configured
-// duration. If passive health checks are disabled or
-// failure expiry is 0, this is a no-op.
-func (h *Handler) countFailure(upstream *Upstream) {
- // only count failures if passive health checking is enabled
- // and if failures are configured have a non-zero expiry
- if h.HealthChecks == nil || h.HealthChecks.Passive == nil {
- return
- }
- failDuration := time.Duration(h.HealthChecks.Passive.FailDuration)
- if failDuration == 0 {
- return
- }
-
- // count failure immediately
- err := upstream.Host.CountFail(1)
- if err != nil {
- h.HealthChecks.Passive.logger.Error("could not count failure",
- zap.String("host", upstream.Dial),
- zap.Error(err))
- return
- }
-
- // forget it later
- go func(host Host, failDuration time.Duration) {
- defer func() {
- if err := recover(); err != nil {
- log.Printf("[PANIC] health check failure forgetter: %v\n%s", err, debug.Stack())
- }
- }()
- time.Sleep(failDuration)
- err := host.CountFail(-1)
- if err != nil {
- h.HealthChecks.Passive.logger.Error("could not forget failure",
- zap.String("host", upstream.Dial),
- zap.Error(err))
- }
- }(upstream.Host, failDuration)
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/hosts.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/hosts.go
deleted file mode 100644
index b9817d23..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/hosts.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reverseproxy
-
-import (
- "context"
- "fmt"
- "net"
- "net/http"
- "strconv"
- "sync/atomic"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
-)
-
-// Host represents a remote host which can be proxied to.
-// Its methods must be safe for concurrent use.
-type Host interface {
- // NumRequests returns the number of requests
- // currently in process with the host.
- NumRequests() int
-
- // Fails returns the count of recent failures.
- Fails() int
-
- // Unhealthy returns true if the backend is unhealthy.
- Unhealthy() bool
-
- // CountRequest atomically counts the given number of
- // requests as currently in process with the host. The
- // count should not go below 0.
- CountRequest(int) error
-
- // CountFail atomically counts the given number of
- // failures with the host. The count should not go
- // below 0.
- CountFail(int) error
-
- // SetHealthy atomically marks the host as either
- // healthy (true) or unhealthy (false). If the given
- // status is the same, this should be a no-op and
- // return false. It returns true if the status was
- // changed; i.e. if it is now different from before.
- SetHealthy(bool) (bool, error)
-}
-
-// UpstreamPool is a collection of upstreams.
-type UpstreamPool []*Upstream
-
-// Upstream bridges this proxy's configuration to the
-// state of the backend host it is correlated with.
-type Upstream struct {
- Host `json:"-"`
-
- // The [network address](/docs/conventions#network-addresses)
- // to dial to connect to the upstream. Must represent precisely
- // one socket (i.e. no port ranges). A valid network address
- // either has a host and port or is a unix socket address.
- //
- // Placeholders may be used to make the upstream dynamic, but be
- // aware of the health check implications of this: a single
- // upstream that represents numerous (perhaps arbitrary) backends
- // can be considered down if one or enough of the arbitrary
- // backends is down. Also be aware of open proxy vulnerabilities.
- Dial string `json:"dial,omitempty"`
-
- // If DNS SRV records are used for service discovery with this
- // upstream, specify the DNS name for which to look up SRV
- // records here, instead of specifying a dial address.
- LookupSRV string `json:"lookup_srv,omitempty"`
-
- // The maximum number of simultaneous requests to allow to
- // this upstream. If set, overrides the global passive health
- // check UnhealthyRequestCount value.
- MaxRequests int `json:"max_requests,omitempty"`
-
- // TODO: This could be really useful, to bind requests
- // with certain properties to specific backends
- // HeaderAffinity string
- // IPAffinity string
-
- activeHealthCheckPort int
- healthCheckPolicy *PassiveHealthChecks
- cb CircuitBreaker
-}
-
-func (u Upstream) String() string {
- if u.LookupSRV != "" {
- return u.LookupSRV
- }
- return u.Dial
-}
-
-// Available returns true if the remote host
-// is available to receive requests. This is
-// the method that should be used by selection
-// policies, etc. to determine if a backend
-// should be able to be sent a request.
-func (u *Upstream) Available() bool {
- return u.Healthy() && !u.Full()
-}
-
-// Healthy returns true if the remote host
-// is currently known to be healthy or "up".
-// It consults the circuit breaker, if any.
-func (u *Upstream) Healthy() bool {
- healthy := !u.Host.Unhealthy()
- if healthy && u.healthCheckPolicy != nil {
- healthy = u.Host.Fails() < u.healthCheckPolicy.MaxFails
- }
- if healthy && u.cb != nil {
- healthy = u.cb.OK()
- }
- return healthy
-}
-
-// Full returns true if the remote host
-// cannot receive more requests at this time.
-func (u *Upstream) Full() bool {
- return u.MaxRequests > 0 && u.Host.NumRequests() >= u.MaxRequests
-}
-
-// fillDialInfo returns a filled DialInfo for upstream u, using the request
-// context. If the upstream has a SRV lookup configured, that is done and a
-// returned address is chosen; otherwise, the upstream's regular dial address
-// field is used. Note that the returned value is not a pointer.
-func (u *Upstream) fillDialInfo(r *http.Request) (DialInfo, error) {
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- var addr caddy.NetworkAddress
-
- if u.LookupSRV != "" {
- // perform DNS lookup for SRV records and choose one
- srvName := repl.ReplaceAll(u.LookupSRV, "")
- _, records, err := net.DefaultResolver.LookupSRV(r.Context(), "", "", srvName)
- if err != nil {
- return DialInfo{}, err
- }
- addr.Network = "tcp"
- addr.Host = records[0].Target
- addr.StartPort, addr.EndPort = uint(records[0].Port), uint(records[0].Port)
- } else {
- // use provided dial address
- var err error
- dial := repl.ReplaceAll(u.Dial, "")
- addr, err = caddy.ParseNetworkAddress(dial)
- if err != nil {
- return DialInfo{}, fmt.Errorf("upstream %s: invalid dial address %s: %v", u.Dial, dial, err)
- }
- if numPorts := addr.PortRangeSize(); numPorts != 1 {
- return DialInfo{}, fmt.Errorf("upstream %s: dial address must represent precisely one socket: %s represents %d",
- u.Dial, dial, numPorts)
- }
- }
-
- return DialInfo{
- Upstream: u,
- Network: addr.Network,
- Address: addr.JoinHostPort(0),
- Host: addr.Host,
- Port: strconv.Itoa(int(addr.StartPort)),
- }, nil
-}
-
-// upstreamHost is the basic, in-memory representation
-// of the state of a remote host. It implements the
-// Host interface.
-type upstreamHost struct {
- numRequests int64 // must be 64-bit aligned on 32-bit systems (see https://golang.org/pkg/sync/atomic/#pkg-note-BUG)
- fails int64
- unhealthy int32
-}
-
-// NumRequests returns the number of active requests to the upstream.
-func (uh *upstreamHost) NumRequests() int {
- return int(atomic.LoadInt64(&uh.numRequests))
-}
-
-// Fails returns the number of recent failures with the upstream.
-func (uh *upstreamHost) Fails() int {
- return int(atomic.LoadInt64(&uh.fails))
-}
-
-// Unhealthy returns whether the upstream is healthy.
-func (uh *upstreamHost) Unhealthy() bool {
- return atomic.LoadInt32(&uh.unhealthy) == 1
-}
-
-// CountRequest mutates the active request count by
-// delta. It returns an error if the adjustment fails.
-func (uh *upstreamHost) CountRequest(delta int) error {
- result := atomic.AddInt64(&uh.numRequests, int64(delta))
- if result < 0 {
- return fmt.Errorf("count below 0: %d", result)
- }
- return nil
-}
-
-// CountFail mutates the recent failures count by
-// delta. It returns an error if the adjustment fails.
-func (uh *upstreamHost) CountFail(delta int) error {
- result := atomic.AddInt64(&uh.fails, int64(delta))
- if result < 0 {
- return fmt.Errorf("count below 0: %d", result)
- }
- return nil
-}
-
-// SetHealthy sets the upstream has healthy or unhealthy
-// and returns true if the new value is different.
-func (uh *upstreamHost) SetHealthy(healthy bool) (bool, error) {
- var unhealthy, compare int32 = 1, 0
- if healthy {
- unhealthy, compare = 0, 1
- }
- swapped := atomic.CompareAndSwapInt32(&uh.unhealthy, compare, unhealthy)
- return swapped, nil
-}
-
-// DialInfo contains information needed to dial a
-// connection to an upstream host. This information
-// may be different than that which is represented
-// in a URL (for example, unix sockets don't have
-// a host that can be represented in a URL, but
-// they certainly have a network name and address).
-type DialInfo struct {
- // Upstream is the Upstream associated with
- // this DialInfo. It may be nil.
- Upstream *Upstream
-
- // The network to use. This should be one of
- // the values that is accepted by net.Dial:
- // https://golang.org/pkg/net/#Dial
- Network string
-
- // The address to dial. Follows the same
- // semantics and rules as net.Dial.
- Address string
-
- // Host and Port are components of Address.
- Host, Port string
-}
-
-// String returns the Caddy network address form
-// by joining the network and address with a
-// forward slash.
-func (di DialInfo) String() string {
- return caddy.JoinNetworkAddress(di.Network, di.Host, di.Port)
-}
-
-// GetDialInfo gets the upstream dialing info out of the context,
-// and returns true if there was a valid value; false otherwise.
-func GetDialInfo(ctx context.Context) (DialInfo, bool) {
- dialInfo, ok := caddyhttp.GetVar(ctx, dialInfoVarKey).(DialInfo)
- return dialInfo, ok
-}
-
-// hosts is the global repository for hosts that are
-// currently in use by active configuration(s). This
-// allows the state of remote hosts to be preserved
-// through config reloads.
-var hosts = caddy.NewUsagePool()
-
-// dialInfoVarKey is the key used for the variable that holds
-// the dial info for the upstream connection.
-const dialInfoVarKey = "reverse_proxy.dial_info"
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/httptransport.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/httptransport.go
deleted file mode 100644
index 19328513..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/httptransport.go
+++ /dev/null
@@ -1,449 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reverseproxy
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "encoding/base64"
- "fmt"
- "io/ioutil"
- weakrand "math/rand"
- "net"
- "net/http"
- "reflect"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/modules/caddytls"
- "golang.org/x/net/http2"
-)
-
-func init() {
- caddy.RegisterModule(HTTPTransport{})
-}
-
-// HTTPTransport is essentially a configuration wrapper for http.Transport.
-// It defines a JSON structure useful when configuring the HTTP transport
-// for Caddy's reverse proxy. It builds its http.Transport at Provision.
-type HTTPTransport struct {
- // TODO: It's possible that other transports (like fastcgi) might be
- // able to borrow/use at least some of these config fields; if so,
- // maybe move them into a type called CommonTransport and embed it?
-
- // Configures the DNS resolver used to resolve the IP address of upstream hostnames.
- Resolver *UpstreamResolver `json:"resolver,omitempty"`
-
- // Configures TLS to the upstream. Setting this to an empty struct
- // is sufficient to enable TLS with reasonable defaults.
- TLS *TLSConfig `json:"tls,omitempty"`
-
- // Configures HTTP Keep-Alive (enabled by default). Should only be
- // necessary if rigorous testing has shown that tuning this helps
- // improve performance.
- KeepAlive *KeepAlive `json:"keep_alive,omitempty"`
-
- // Whether to enable compression to upstream. Default: true
- Compression *bool `json:"compression,omitempty"`
-
- // Maximum number of connections per host. Default: 0 (no limit)
- MaxConnsPerHost int `json:"max_conns_per_host,omitempty"`
-
- // How long to wait before timing out trying to connect to
- // an upstream.
- DialTimeout caddy.Duration `json:"dial_timeout,omitempty"`
-
- // How long to wait before spawning an RFC 6555 Fast Fallback
- // connection. A negative value disables this.
- FallbackDelay caddy.Duration `json:"dial_fallback_delay,omitempty"`
-
- // How long to wait for reading response headers from server.
- ResponseHeaderTimeout caddy.Duration `json:"response_header_timeout,omitempty"`
-
- // The length of time to wait for a server's first response
- // headers after fully writing the request headers if the
- // request has a header "Expect: 100-continue".
- ExpectContinueTimeout caddy.Duration `json:"expect_continue_timeout,omitempty"`
-
- // The maximum bytes to read from response headers.
- MaxResponseHeaderSize int64 `json:"max_response_header_size,omitempty"`
-
- // The size of the write buffer in bytes.
- WriteBufferSize int `json:"write_buffer_size,omitempty"`
-
- // The size of the read buffer in bytes.
- ReadBufferSize int `json:"read_buffer_size,omitempty"`
-
- // The versions of HTTP to support. As a special case, "h2c"
- // can be specified to use H2C (HTTP/2 over Cleartext) to the
- // upstream (this feature is experimental and subject to
- // change or removal). Default: ["1.1", "2"]
- Versions []string `json:"versions,omitempty"`
-
- // The pre-configured underlying HTTP transport.
- Transport *http.Transport `json:"-"`
-
- h2cTransport *http2.Transport
-}
-
-// CaddyModule returns the Caddy module information.
-func (HTTPTransport) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.reverse_proxy.transport.http",
- New: func() caddy.Module { return new(HTTPTransport) },
- }
-}
-
-// Provision sets up h.Transport with a *http.Transport
-// that is ready to use.
-func (h *HTTPTransport) Provision(ctx caddy.Context) error {
- if len(h.Versions) == 0 {
- h.Versions = []string{"1.1", "2"}
- }
-
- rt, err := h.NewTransport(ctx)
- if err != nil {
- return err
- }
- h.Transport = rt
-
- // if h2c is enabled, configure its transport (std lib http.Transport
- // does not "HTTP/2 over cleartext TCP")
- if sliceContains(h.Versions, "h2c") {
- // crafting our own http2.Transport doesn't allow us to utilize
- // most of the customizations/preferences on the http.Transport,
- // because, for some reason, only http2.ConfigureTransport()
- // is allowed to set the unexported field that refers to a base
- // http.Transport config; oh well
- h2t := &http2.Transport{
- // kind of a hack, but for plaintext/H2C requests, pretend to dial TLS
- DialTLS: func(network, addr string, _ *tls.Config) (net.Conn, error) {
- // TODO: no context, thus potentially wrong dial info
- return net.Dial(network, addr)
- },
- AllowHTTP: true,
- }
- if h.Compression != nil {
- h2t.DisableCompression = !*h.Compression
- }
- h.h2cTransport = h2t
- }
-
- return nil
-}
-
-// NewTransport builds a standard-lib-compatible http.Transport value from h.
-func (h *HTTPTransport) NewTransport(ctx caddy.Context) (*http.Transport, error) {
- dialer := &net.Dialer{
- Timeout: time.Duration(h.DialTimeout),
- FallbackDelay: time.Duration(h.FallbackDelay),
- }
-
- if h.Resolver != nil {
- for _, v := range h.Resolver.Addresses {
- addr, err := caddy.ParseNetworkAddress(v)
- if err != nil {
- return nil, err
- }
- if addr.PortRangeSize() != 1 {
- return nil, fmt.Errorf("resolver address must have exactly one address; cannot call %v", addr)
- }
- h.Resolver.netAddrs = append(h.Resolver.netAddrs, addr)
- }
- d := &net.Dialer{
- Timeout: time.Duration(h.DialTimeout),
- FallbackDelay: time.Duration(h.FallbackDelay),
- }
- dialer.Resolver = &net.Resolver{
- PreferGo: true,
- Dial: func(ctx context.Context, _, _ string) (net.Conn, error) {
- //nolint:gosec
- addr := h.Resolver.netAddrs[weakrand.Intn(len(h.Resolver.netAddrs))]
- return d.DialContext(ctx, addr.Network, addr.JoinHostPort(0))
- },
- }
- }
-
- rt := &http.Transport{
- DialContext: func(ctx context.Context, network, address string) (net.Conn, error) {
- // the proper dialing information should be embedded into the request's context
- if dialInfo, ok := GetDialInfo(ctx); ok {
- network = dialInfo.Network
- address = dialInfo.Address
- }
- conn, err := dialer.DialContext(ctx, network, address)
- if err != nil {
- // identify this error as one that occurred during
- // dialing, which can be important when trying to
- // decide whether to retry a request
- return nil, DialError{err}
- }
- return conn, nil
- },
- MaxConnsPerHost: h.MaxConnsPerHost,
- ResponseHeaderTimeout: time.Duration(h.ResponseHeaderTimeout),
- ExpectContinueTimeout: time.Duration(h.ExpectContinueTimeout),
- MaxResponseHeaderBytes: h.MaxResponseHeaderSize,
- WriteBufferSize: h.WriteBufferSize,
- ReadBufferSize: h.ReadBufferSize,
- }
-
- if h.TLS != nil {
- rt.TLSHandshakeTimeout = time.Duration(h.TLS.HandshakeTimeout)
- var err error
- rt.TLSClientConfig, err = h.TLS.MakeTLSClientConfig(ctx)
- if err != nil {
- return nil, fmt.Errorf("making TLS client config: %v", err)
- }
- }
-
- if h.KeepAlive != nil {
- dialer.KeepAlive = time.Duration(h.KeepAlive.ProbeInterval)
- if h.KeepAlive.Enabled != nil {
- rt.DisableKeepAlives = !*h.KeepAlive.Enabled
- }
- rt.MaxIdleConns = h.KeepAlive.MaxIdleConns
- rt.MaxIdleConnsPerHost = h.KeepAlive.MaxIdleConnsPerHost
- rt.IdleConnTimeout = time.Duration(h.KeepAlive.IdleConnTimeout)
- }
-
- if h.Compression != nil {
- rt.DisableCompression = !*h.Compression
- }
-
- if sliceContains(h.Versions, "2") {
- if err := http2.ConfigureTransport(rt); err != nil {
- return nil, err
- }
- }
-
- return rt, nil
-}
-
-// RoundTrip implements http.RoundTripper.
-func (h *HTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- h.SetScheme(req)
-
- // if H2C ("HTTP/2 over cleartext") is enabled and the upstream request is
- // HTTP/2 without TLS, use the alternate H2C-capable transport instead
- if req.ProtoMajor == 2 && req.URL.Scheme == "http" && h.h2cTransport != nil {
- return h.h2cTransport.RoundTrip(req)
- }
-
- return h.Transport.RoundTrip(req)
-}
-
-// SetScheme ensures that the outbound request req
-// has the scheme set in its URL; the underlying
-// http.Transport requires a scheme to be set.
-func (h *HTTPTransport) SetScheme(req *http.Request) {
- if req.URL.Scheme == "" {
- req.URL.Scheme = "http"
- if h.TLS != nil {
- req.URL.Scheme = "https"
- }
- }
-}
-
-// TLSEnabled returns true if TLS is enabled.
-func (h HTTPTransport) TLSEnabled() bool {
- return h.TLS != nil
-}
-
-// EnableTLS enables TLS on the transport.
-func (h *HTTPTransport) EnableTLS(base *TLSConfig) error {
- h.TLS = base
- return nil
-}
-
-// Cleanup implements caddy.CleanerUpper and closes any idle connections.
-func (h HTTPTransport) Cleanup() error {
- if h.Transport == nil {
- return nil
- }
- h.Transport.CloseIdleConnections()
- return nil
-}
-
-// TLSConfig holds configuration related to the TLS configuration for the
-// transport/client.
-type TLSConfig struct {
- // Optional list of base64-encoded DER-encoded CA certificates to trust.
- RootCAPool []string `json:"root_ca_pool,omitempty"`
-
- // List of PEM-encoded CA certificate files to add to the same trust
- // store as RootCAPool (or root_ca_pool in the JSON).
- RootCAPEMFiles []string `json:"root_ca_pem_files,omitempty"`
-
- // PEM-encoded client certificate filename to present to servers.
- ClientCertificateFile string `json:"client_certificate_file,omitempty"`
-
- // PEM-encoded key to use with the client certificate.
- ClientCertificateKeyFile string `json:"client_certificate_key_file,omitempty"`
-
- // If specified, Caddy will use and automate a client certificate
- // with this subject name.
- ClientCertificateAutomate string `json:"client_certificate_automate,omitempty"`
-
- // If true, TLS verification of server certificates will be disabled.
- // This is insecure and may be removed in the future. Do not use this
- // option except in testing or local development environments.
- InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"`
-
- // The duration to allow a TLS handshake to a server.
- HandshakeTimeout caddy.Duration `json:"handshake_timeout,omitempty"`
-
- // The server name (SNI) to use in TLS handshakes.
- ServerName string `json:"server_name,omitempty"`
-}
-
-// MakeTLSClientConfig returns a tls.Config usable by a client to a backend.
-// If there is no custom TLS configuration, a nil config may be returned.
-func (t TLSConfig) MakeTLSClientConfig(ctx caddy.Context) (*tls.Config, error) {
- cfg := new(tls.Config)
-
- // client auth
- if t.ClientCertificateFile != "" && t.ClientCertificateKeyFile == "" {
- return nil, fmt.Errorf("client_certificate_file specified without client_certificate_key_file")
- }
- if t.ClientCertificateFile == "" && t.ClientCertificateKeyFile != "" {
- return nil, fmt.Errorf("client_certificate_key_file specified without client_certificate_file")
- }
- if t.ClientCertificateFile != "" && t.ClientCertificateKeyFile != "" {
- cert, err := tls.LoadX509KeyPair(t.ClientCertificateFile, t.ClientCertificateKeyFile)
- if err != nil {
- return nil, fmt.Errorf("loading client certificate key pair: %v", err)
- }
- cfg.Certificates = []tls.Certificate{cert}
- }
- if t.ClientCertificateAutomate != "" {
- // TODO: use or enable ctx.IdentityCredentials() ...
- tlsAppIface, err := ctx.App("tls")
- if err != nil {
- return nil, fmt.Errorf("getting tls app: %v", err)
- }
- tlsApp := tlsAppIface.(*caddytls.TLS)
- err = tlsApp.Manage([]string{t.ClientCertificateAutomate})
- if err != nil {
- return nil, fmt.Errorf("managing client certificate: %v", err)
- }
- cfg.GetClientCertificate = func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- certs := tlsApp.AllMatchingCertificates(t.ClientCertificateAutomate)
- var err error
- for _, cert := range certs {
- err = cri.SupportsCertificate(&cert.Certificate)
- if err == nil {
- return &cert.Certificate, nil
- }
- }
- return nil, err
- }
- }
-
- // trusted root CAs
- if len(t.RootCAPool) > 0 || len(t.RootCAPEMFiles) > 0 {
- rootPool := x509.NewCertPool()
- for _, encodedCACert := range t.RootCAPool {
- caCert, err := decodeBase64DERCert(encodedCACert)
- if err != nil {
- return nil, fmt.Errorf("parsing CA certificate: %v", err)
- }
- rootPool.AddCert(caCert)
- }
- for _, pemFile := range t.RootCAPEMFiles {
- pemData, err := ioutil.ReadFile(pemFile)
- if err != nil {
- return nil, fmt.Errorf("failed reading ca cert: %v", err)
- }
- rootPool.AppendCertsFromPEM(pemData)
-
- }
- cfg.RootCAs = rootPool
- }
-
- // custom SNI
- cfg.ServerName = t.ServerName
-
- // throw all security out the window
- cfg.InsecureSkipVerify = t.InsecureSkipVerify
-
- // only return a config if it's not empty
- if reflect.DeepEqual(cfg, new(tls.Config)) {
- return nil, nil
- }
-
- return cfg, nil
-}
-
-// UpstreamResolver holds the set of addresses of DNS resolvers of
-// upstream addresses
-type UpstreamResolver struct {
- // The addresses of DNS resolvers to use when looking up the addresses of proxy upstreams.
- // It accepts [network addresses](/docs/conventions#network-addresses)
- // with port range of only 1. If the host is an IP address, it will be dialed directly to resolve the upstream server.
- // If the host is not an IP address, the addresses are resolved using the [name resolution convention](https://golang.org/pkg/net/#hdr-Name_Resolution) of the Go standard library.
- // If the array contains more than 1 resolver address, one is chosen at random.
- Addresses []string `json:"addresses,omitempty"`
- netAddrs []caddy.NetworkAddress
-}
-
-// KeepAlive holds configuration pertaining to HTTP Keep-Alive.
-type KeepAlive struct {
- // Whether HTTP Keep-Alive is enabled. Default: true
- Enabled *bool `json:"enabled,omitempty"`
-
- // How often to probe for liveness.
- ProbeInterval caddy.Duration `json:"probe_interval,omitempty"`
-
- // Maximum number of idle connections. Default: 0, which means no limit.
- MaxIdleConns int `json:"max_idle_conns,omitempty"`
-
- // Maximum number of idle connections per host. Default: 32.
- MaxIdleConnsPerHost int `json:"max_idle_conns_per_host,omitempty"`
-
- // How long connections should be kept alive when idle. Default: 0, which means no timeout.
- IdleConnTimeout caddy.Duration `json:"idle_timeout,omitempty"`
-}
-
-// decodeBase64DERCert base64-decodes, then DER-decodes, certStr.
-func decodeBase64DERCert(certStr string) (*x509.Certificate, error) {
- // decode base64
- derBytes, err := base64.StdEncoding.DecodeString(certStr)
- if err != nil {
- return nil, err
- }
-
- // parse the DER-encoded certificate
- return x509.ParseCertificate(derBytes)
-}
-
-// sliceContains returns true if needle is in haystack.
-func sliceContains(haystack []string, needle string) bool {
- for _, s := range haystack {
- if s == needle {
- return true
- }
- }
- return false
-}
-
-// Interface guards
-var (
- _ caddy.Provisioner = (*HTTPTransport)(nil)
- _ http.RoundTripper = (*HTTPTransport)(nil)
- _ caddy.CleanerUpper = (*HTTPTransport)(nil)
- _ TLSTransport = (*HTTPTransport)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/reverseproxy.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/reverseproxy.go
deleted file mode 100644
index 671ea044..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/reverseproxy.go
+++ /dev/null
@@ -1,964 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reverseproxy
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "net/textproto"
- "net/url"
- "regexp"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers"
- "go.uber.org/zap"
- "golang.org/x/net/http/httpguts"
-)
-
-func init() {
- caddy.RegisterModule(Handler{})
-}
-
-// Handler implements a highly configurable and production-ready reverse proxy.
-//
-// Upon proxying, this module sets the following placeholders (which can be used
-// both within and after this handler; for example, in response headers):
-//
-// Placeholder | Description
-// ------------|-------------
-// `{http.reverse_proxy.upstream.address}` | The full address to the upstream as given in the config
-// `{http.reverse_proxy.upstream.hostport}` | The host:port of the upstream
-// `{http.reverse_proxy.upstream.host}` | The host of the upstream
-// `{http.reverse_proxy.upstream.port}` | The port of the upstream
-// `{http.reverse_proxy.upstream.requests}` | The approximate current number of requests to the upstream
-// `{http.reverse_proxy.upstream.max_requests}` | The maximum approximate number of requests allowed to the upstream
-// `{http.reverse_proxy.upstream.fails}` | The number of recent failed requests to the upstream
-// `{http.reverse_proxy.upstream.latency}` | How long it took the proxy upstream to write the response header.
-// `{http.reverse_proxy.upstream.duration}` | Time spent proxying to the upstream, including writing response body to client.
-// `{http.reverse_proxy.duration}` | Total time spent proxying, including selecting an upstream, retries, and writing response.
-type Handler struct {
- // Configures the method of transport for the proxy. A transport
- // is what performs the actual "round trip" to the backend.
- // The default transport is plaintext HTTP.
- TransportRaw json.RawMessage `json:"transport,omitempty" caddy:"namespace=http.reverse_proxy.transport inline_key=protocol"`
-
- // A circuit breaker may be used to relieve pressure on a backend
- // that is beginning to exhibit symptoms of stress or latency.
- // By default, there is no circuit breaker.
- CBRaw json.RawMessage `json:"circuit_breaker,omitempty" caddy:"namespace=http.reverse_proxy.circuit_breakers inline_key=type"`
-
- // Load balancing distributes load/requests between backends.
- LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"`
-
- // Health checks update the status of backends, whether they are
- // up or down. Down backends will not be proxied to.
- HealthChecks *HealthChecks `json:"health_checks,omitempty"`
-
- // Upstreams is the list of backends to proxy to.
- Upstreams UpstreamPool `json:"upstreams,omitempty"`
-
- // Adjusts how often to flush the response buffer. By default,
- // no periodic flushing is done. A negative value disables
- // response buffering, and flushes immediately after each
- // write to the client. This option is ignored when the upstream's
- // response is recognized as a streaming response, or if its
- // content length is -1; for such responses, writes are flushed
- // to the client immediately.
- FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
-
- // Headers manipulates headers between Caddy and the backend.
- // By default, all headers are passed-thru without changes,
- // with the exceptions of special hop-by-hop headers.
- //
- // X-Forwarded-For and X-Forwarded-Proto are also set
- // implicitly, but this may change in the future if the official
- // standardized Forwarded header field gains more adoption.
- Headers *headers.Handler `json:"headers,omitempty"`
-
- // If true, the entire request body will be read and buffered
- // in memory before being proxied to the backend. This should
- // be avoided if at all possible for performance reasons, but
- // could be useful if the backend is intolerant of read latency.
- BufferRequests bool `json:"buffer_requests,omitempty"`
-
- // If true, the entire response body will be read and buffered
- // in memory before being proxied to the client. This should
- // be avoided if at all possible for performance reasons, but
- // could be useful if the backend has tighter memory constraints.
- BufferResponses bool `json:"buffer_responses,omitempty"`
-
- // If body buffering is enabled, the maximum size of the buffers
- // used for the requests and responses (in bytes).
- MaxBufferSize int64 `json:"max_buffer_size,omitempty"`
-
- // List of handlers and their associated matchers to evaluate
- // after successful roundtrips. The first handler that matches
- // the response from a backend will be invoked. The response
- // body from the backend will not be written to the client;
- // it is up to the handler to finish handling the response.
- // If passive health checks are enabled, any errors from the
- // handler chain will not affect the health status of the
- // backend.
- //
- // Three new placeholders are available in this handler chain:
- // - `{http.reverse_proxy.status_code}` The status code from the response
- // - `{http.reverse_proxy.status_text}` The status text from the response
- // - `{http.reverse_proxy.header.*}` The headers from the response
- HandleResponse []caddyhttp.ResponseHandler `json:"handle_response,omitempty"`
-
- Transport http.RoundTripper `json:"-"`
- CB CircuitBreaker `json:"-"`
-
- // Holds the named response matchers from the Caddyfile while adapting
- responseMatchers map[string]caddyhttp.ResponseMatcher
-
- // Holds the handle_response Caddyfile tokens while adapting
- handleResponseSegments []*caddyfile.Dispenser
-
- ctx caddy.Context
- logger *zap.Logger
-}
-
-// CaddyModule returns the Caddy module information.
-func (Handler) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.handlers.reverse_proxy",
- New: func() caddy.Module { return new(Handler) },
- }
-}
-
-// Provision ensures that h is set up properly before use.
-func (h *Handler) Provision(ctx caddy.Context) error {
- h.ctx = ctx
- h.logger = ctx.Logger(h)
-
- // verify SRV compatibility
- for i, v := range h.Upstreams {
- if v.LookupSRV == "" {
- continue
- }
- if h.HealthChecks != nil && h.HealthChecks.Active != nil {
- return fmt.Errorf(`upstream: lookup_srv is incompatible with active health checks: %d: {"dial": %q, "lookup_srv": %q}`, i, v.Dial, v.LookupSRV)
- }
- if v.Dial != "" {
- return fmt.Errorf(`upstream: specifying dial address is incompatible with lookup_srv: %d: {"dial": %q, "lookup_srv": %q}`, i, v.Dial, v.LookupSRV)
- }
- }
-
- // start by loading modules
- if h.TransportRaw != nil {
- mod, err := ctx.LoadModule(h, "TransportRaw")
- if err != nil {
- return fmt.Errorf("loading transport: %v", err)
- }
- h.Transport = mod.(http.RoundTripper)
- }
- if h.LoadBalancing != nil && h.LoadBalancing.SelectionPolicyRaw != nil {
- mod, err := ctx.LoadModule(h.LoadBalancing, "SelectionPolicyRaw")
- if err != nil {
- return fmt.Errorf("loading load balancing selection policy: %s", err)
- }
- h.LoadBalancing.SelectionPolicy = mod.(Selector)
- }
- if h.CBRaw != nil {
- mod, err := ctx.LoadModule(h, "CBRaw")
- if err != nil {
- return fmt.Errorf("loading circuit breaker: %s", err)
- }
- h.CB = mod.(CircuitBreaker)
- }
-
- // ensure any embedded headers handler module gets provisioned
- // (see https://caddy.community/t/set-cookie-manipulation-in-reverse-proxy/7666?u=matt
- // for what happens if we forget to provision it)
- if h.Headers != nil {
- err := h.Headers.Provision(ctx)
- if err != nil {
- return fmt.Errorf("provisioning embedded headers handler: %v", err)
- }
- }
-
- // set up transport
- if h.Transport == nil {
- t := &HTTPTransport{
- KeepAlive: &KeepAlive{
- ProbeInterval: caddy.Duration(30 * time.Second),
- IdleConnTimeout: caddy.Duration(2 * time.Minute),
- MaxIdleConnsPerHost: 32, // seems about optimal, see #2805
- },
- DialTimeout: caddy.Duration(10 * time.Second),
- }
- err := t.Provision(ctx)
- if err != nil {
- return fmt.Errorf("provisioning default transport: %v", err)
- }
- h.Transport = t
- }
-
- // set up load balancing
- if h.LoadBalancing == nil {
- h.LoadBalancing = new(LoadBalancing)
- }
- if h.LoadBalancing.SelectionPolicy == nil {
- h.LoadBalancing.SelectionPolicy = RandomSelection{}
- }
- if h.LoadBalancing.TryDuration > 0 && h.LoadBalancing.TryInterval == 0 {
- // a non-zero try_duration with a zero try_interval
- // will always spin the CPU for try_duration if the
- // upstream is local or low-latency; avoid that by
- // defaulting to a sane wait period between attempts
- h.LoadBalancing.TryInterval = caddy.Duration(250 * time.Millisecond)
- }
- lbMatcherSets, err := ctx.LoadModule(h.LoadBalancing, "RetryMatchRaw")
- if err != nil {
- return err
- }
- err = h.LoadBalancing.RetryMatch.FromInterface(lbMatcherSets)
- if err != nil {
- return err
- }
-
- // set up upstreams
- for _, upstream := range h.Upstreams {
- // create or get the host representation for this upstream
- var host Host = new(upstreamHost)
- existingHost, loaded := hosts.LoadOrStore(upstream.String(), host)
- if loaded {
- host = existingHost.(Host)
- }
- upstream.Host = host
-
- // give it the circuit breaker, if any
- upstream.cb = h.CB
-
- // if the passive health checker has a non-zero UnhealthyRequestCount
- // but the upstream has no MaxRequests set (they are the same thing,
- // but the passive health checker is a default value for for upstreams
- // without MaxRequests), copy the value into this upstream, since the
- // value in the upstream (MaxRequests) is what is used during
- // availability checks
- if h.HealthChecks != nil && h.HealthChecks.Passive != nil {
- h.HealthChecks.Passive.logger = h.logger.Named("health_checker.passive")
- if h.HealthChecks.Passive.UnhealthyRequestCount > 0 &&
- upstream.MaxRequests == 0 {
- upstream.MaxRequests = h.HealthChecks.Passive.UnhealthyRequestCount
- }
- }
-
- // upstreams need independent access to the passive
- // health check policy because passive health checks
- // run without access to h.
- if h.HealthChecks != nil {
- upstream.healthCheckPolicy = h.HealthChecks.Passive
- }
- }
-
- if h.HealthChecks != nil {
- // set defaults on passive health checks, if necessary
- if h.HealthChecks.Passive != nil {
- if h.HealthChecks.Passive.FailDuration > 0 && h.HealthChecks.Passive.MaxFails == 0 {
- h.HealthChecks.Passive.MaxFails = 1
- }
- }
-
- // if active health checks are enabled, configure them and start a worker
- if h.HealthChecks.Active != nil && (h.HealthChecks.Active.Path != "" ||
- h.HealthChecks.Active.URI != "" ||
- h.HealthChecks.Active.Port != 0) {
-
- h.HealthChecks.Active.logger = h.logger.Named("health_checker.active")
-
- timeout := time.Duration(h.HealthChecks.Active.Timeout)
- if timeout == 0 {
- timeout = 5 * time.Second
- }
-
- if h.HealthChecks.Active.Path != "" {
- h.HealthChecks.Active.logger.Warn("the 'path' option is deprecated, please use 'uri' instead!")
- }
-
- // parse the URI string (supports path and query)
- if h.HealthChecks.Active.URI != "" {
- parsedURI, err := url.Parse(h.HealthChecks.Active.URI)
- if err != nil {
- return err
- }
- h.HealthChecks.Active.uri = parsedURI
- }
-
- h.HealthChecks.Active.httpClient = &http.Client{
- Timeout: timeout,
- Transport: h.Transport,
- }
-
- for _, upstream := range h.Upstreams {
- // if there's an alternative port for health-check provided in the config,
- // then use it, otherwise use the port of upstream.
- if h.HealthChecks.Active.Port != 0 {
- upstream.activeHealthCheckPort = h.HealthChecks.Active.Port
- }
- }
-
- if h.HealthChecks.Active.Interval == 0 {
- h.HealthChecks.Active.Interval = caddy.Duration(30 * time.Second)
- }
-
- if h.HealthChecks.Active.ExpectBody != "" {
- var err error
- h.HealthChecks.Active.bodyRegexp, err = regexp.Compile(h.HealthChecks.Active.ExpectBody)
- if err != nil {
- return fmt.Errorf("expect_body: compiling regular expression: %v", err)
- }
- }
-
- go h.activeHealthChecker()
- }
- }
-
- // set up any response routes
- for i, rh := range h.HandleResponse {
- err := rh.Provision(ctx)
- if err != nil {
- return fmt.Errorf("provisioning response handler %d: %v", i, err)
- }
- }
-
- return nil
-}
-
-// Cleanup cleans up the resources made by h during provisioning.
-func (h *Handler) Cleanup() error {
- // TODO: Close keepalive connections on reload? https://github.com/caddyserver/caddy/pull/2507/files#diff-70219fd88fe3f36834f474ce6537ed26R762
-
- // remove hosts from our config from the pool
- for _, upstream := range h.Upstreams {
- _, _ = hosts.Delete(upstream.String())
- }
-
- return nil
-}
-
-func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
-
- // if enabled, buffer client request;
- // this should only be enabled if the
- // upstream requires it and does not
- // work with "slow clients" (gunicorn,
- // etc.) - this obviously has a perf
- // overhead and makes the proxy at
- // risk of exhausting memory and more
- // susceptible to slowloris attacks,
- // so it is strongly recommended to
- // only use this feature if absolutely
- // required, if read timeouts are set,
- // and if body size is limited
- if h.BufferRequests {
- r.Body = h.bufferedBody(r.Body)
- }
-
- // prepare the request for proxying; this is needed only once
- err := h.prepareRequest(r)
- if err != nil {
- return caddyhttp.Error(http.StatusInternalServerError,
- fmt.Errorf("preparing request for upstream round-trip: %v", err))
- }
-
- // we will need the original headers and Host value if
- // header operations are configured; and we should
- // restore them after we're done if they are changed
- // (for example, changing the outbound Host header
- // should not permanently change r.Host; issue #3509)
- reqHost := r.Host
- reqHeader := r.Header
- defer func() {
- r.Host = reqHost // TODO: data race, see #4038
- r.Header = reqHeader // TODO: data race, see #4038
- }()
-
- start := time.Now()
- defer func() {
- // total proxying duration, including time spent on LB and retries
- repl.Set("http.reverse_proxy.duration", time.Since(start))
- }()
-
- var proxyErr error
- for {
- // choose an available upstream
- upstream := h.LoadBalancing.SelectionPolicy.Select(h.Upstreams, r, w)
- if upstream == nil {
- if proxyErr == nil {
- proxyErr = fmt.Errorf("no upstreams available")
- }
- if !h.LoadBalancing.tryAgain(h.ctx, start, proxyErr, r) {
- break
- }
- continue
- }
-
- // the dial address may vary per-request if placeholders are
- // used, so perform those replacements here; the resulting
- // DialInfo struct should have valid network address syntax
- dialInfo, err := upstream.fillDialInfo(r)
- if err != nil {
- return statusError(fmt.Errorf("making dial info: %v", err))
- }
-
- // attach to the request information about how to dial the upstream;
- // this is necessary because the information cannot be sufficiently
- // or satisfactorily represented in a URL
- caddyhttp.SetVar(r.Context(), dialInfoVarKey, dialInfo)
-
- // set placeholders with information about this upstream
- repl.Set("http.reverse_proxy.upstream.address", dialInfo.String())
- repl.Set("http.reverse_proxy.upstream.hostport", dialInfo.Address)
- repl.Set("http.reverse_proxy.upstream.host", dialInfo.Host)
- repl.Set("http.reverse_proxy.upstream.port", dialInfo.Port)
- repl.Set("http.reverse_proxy.upstream.requests", upstream.Host.NumRequests())
- repl.Set("http.reverse_proxy.upstream.max_requests", upstream.MaxRequests)
- repl.Set("http.reverse_proxy.upstream.fails", upstream.Host.Fails())
-
- // mutate request headers according to this upstream;
- // because we're in a retry loop, we have to copy
- // headers (and the r.Host value) from the original
- // so that each retry is identical to the first
- if h.Headers != nil && h.Headers.Request != nil {
- r.Header = make(http.Header)
- copyHeader(r.Header, reqHeader)
- r.Host = reqHost
- h.Headers.Request.ApplyToRequest(r)
- }
-
- // proxy the request to that upstream
- proxyErr = h.reverseProxy(w, r, repl, dialInfo, next)
- if proxyErr == nil || proxyErr == context.Canceled {
- // context.Canceled happens when the downstream client
- // cancels the request, which is not our failure
- return nil
- }
-
- // if the roundtrip was successful, don't retry the request or
- // ding the health status of the upstream (an error can still
- // occur after the roundtrip if, for example, a response handler
- // after the roundtrip returns an error)
- if succ, ok := proxyErr.(roundtripSucceeded); ok {
- return succ.error
- }
-
- // remember this failure (if enabled)
- h.countFailure(upstream)
-
- // if we've tried long enough, break
- if !h.LoadBalancing.tryAgain(h.ctx, start, proxyErr, r) {
- break
- }
- }
-
- return statusError(proxyErr)
-}
-
-// prepareRequest modifies req so that it is ready to be proxied,
-// except for directing to a specific upstream. This method mutates
-// headers and other necessary properties of the request and should
-// be done just once (before proxying) regardless of proxy retries.
-// This assumes that no mutations of the request are performed
-// by h during or after proxying.
-func (h Handler) prepareRequest(req *http.Request) error {
- // most of this is borrowed from the Go std lib reverse proxy
-
- if req.ContentLength == 0 {
- req.Body = nil // Issue golang/go#16036: nil Body for http.Transport retries
- }
-
- req.Close = false
-
- // if User-Agent is not set by client, then explicitly
- // disable it so it's not set to default value by std lib
- if _, ok := req.Header["User-Agent"]; !ok {
- req.Header.Set("User-Agent", "")
- }
-
- reqUpType := upgradeType(req.Header)
- removeConnectionHeaders(req.Header)
-
- // Remove hop-by-hop headers to the backend. Especially
- // important is "Connection" because we want a persistent
- // connection, regardless of what the client sent to us.
- // Issue golang/go#46313: don't skip if field is empty.
- for _, h := range hopHeaders {
- // Issue golang/go#21096: tell backend applications that care about trailer support
- // that we support trailers. (We do, but we don't go out of our way to
- // advertise that unless the incoming client request thought it was worth
- // mentioning.)
- if h == "Te" && httpguts.HeaderValuesContainsToken(req.Header["Te"], "trailers") {
- req.Header.Set("Te", "trailers")
- continue
- }
- req.Header.Del(h)
- }
-
- // After stripping all the hop-by-hop connection headers above, add back any
- // necessary for protocol upgrades, such as for websockets.
- if reqUpType != "" {
- req.Header.Set("Connection", "Upgrade")
- req.Header.Set("Upgrade", reqUpType)
- }
-
- if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
- // If we aren't the first proxy retain prior
- // X-Forwarded-For information as a comma+space
- // separated list and fold multiple headers into one.
- prior, ok := req.Header["X-Forwarded-For"]
- omit := ok && prior == nil // Issue 38079: nil now means don't populate the header
- if len(prior) > 0 {
- clientIP = strings.Join(prior, ", ") + ", " + clientIP
- }
- if !omit {
- req.Header.Set("X-Forwarded-For", clientIP)
- }
- }
-
- prior, ok := req.Header["X-Forwarded-Proto"]
- omit := ok && prior == nil
- if len(prior) == 0 && !omit {
- // set X-Forwarded-Proto; many backend apps expect this too
- proto := "https"
- if req.TLS == nil {
- proto = "http"
- }
- req.Header.Set("X-Forwarded-Proto", proto)
- }
-
- return nil
-}
-
-// reverseProxy performs a round-trip to the given backend and processes the response with the client.
-// (This method is mostly the beginning of what was borrowed from the net/http/httputil package in the
-// Go standard library which was used as the foundation.)
-func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, repl *caddy.Replacer, di DialInfo, next caddyhttp.Handler) error {
- _ = di.Upstream.Host.CountRequest(1)
- //nolint:errcheck
- defer di.Upstream.Host.CountRequest(-1)
-
- // point the request to this upstream
- h.directRequest(req, di)
-
- // do the round-trip; emit debug log with values we know are
- // safe, or if there is no error, emit fuller log entry
- start := time.Now()
- res, err := h.Transport.RoundTrip(req)
- duration := time.Since(start)
- logger := h.logger.With(
- zap.String("upstream", di.Upstream.String()),
- zap.Object("request", caddyhttp.LoggableHTTPRequest{Request: req}),
- )
- if err != nil {
- logger.Debug("upstream roundtrip",
- zap.Duration("duration", duration),
- zap.Error(err))
- return err
- }
- logger.Debug("upstream roundtrip",
- zap.Object("headers", caddyhttp.LoggableHTTPHeader(res.Header)),
- zap.Int("status", res.StatusCode))
-
- // duration until upstream wrote response headers (roundtrip duration)
- repl.Set("http.reverse_proxy.upstream.latency", duration)
-
- // update circuit breaker on current conditions
- if di.Upstream.cb != nil {
- di.Upstream.cb.RecordMetric(res.StatusCode, duration)
- }
-
- // perform passive health checks (if enabled)
- if h.HealthChecks != nil && h.HealthChecks.Passive != nil {
- // strike if the status code matches one that is "bad"
- for _, badStatus := range h.HealthChecks.Passive.UnhealthyStatus {
- if caddyhttp.StatusCodeMatches(res.StatusCode, badStatus) {
- h.countFailure(di.Upstream)
- }
- }
-
- // strike if the roundtrip took too long
- if h.HealthChecks.Passive.UnhealthyLatency > 0 &&
- duration >= time.Duration(h.HealthChecks.Passive.UnhealthyLatency) {
- h.countFailure(di.Upstream)
- }
- }
-
- // if enabled, buffer the response body
- if h.BufferResponses {
- res.Body = h.bufferedBody(res.Body)
- }
-
- // see if any response handler is configured for this response from the backend
- for i, rh := range h.HandleResponse {
- if rh.Match != nil && !rh.Match.Match(res.StatusCode, res.Header) {
- continue
- }
-
- // if configured to only change the status code, do that then continue regular proxy response
- if statusCodeStr := rh.StatusCode.String(); statusCodeStr != "" {
- statusCode, err := strconv.Atoi(repl.ReplaceAll(statusCodeStr, ""))
- if err != nil {
- return caddyhttp.Error(http.StatusInternalServerError, err)
- }
- if statusCode != 0 {
- res.StatusCode = statusCode
- }
- break
- }
-
- // otherwise, if there are any routes configured, execute those as the
- // actual response instead of what we got from the proxy backend
- if len(rh.Routes) == 0 {
- continue
- }
-
- res.Body.Close()
-
- // set up the replacer so that parts of the original response can be
- // used for routing decisions
- for field, value := range res.Header {
- repl.Set("http.reverse_proxy.header."+field, strings.Join(value, ","))
- }
- repl.Set("http.reverse_proxy.status_code", res.StatusCode)
- repl.Set("http.reverse_proxy.status_text", res.Status)
-
- h.logger.Debug("handling response", zap.Int("handler", i))
- if routeErr := rh.Routes.Compile(next).ServeHTTP(rw, req); routeErr != nil {
- // wrap error in roundtripSucceeded so caller knows that
- // the roundtrip was successful and to not retry
- return roundtripSucceeded{routeErr}
- }
- }
-
- // deal with 101 Switching Protocols responses: (WebSocket, h2c, etc)
- if res.StatusCode == http.StatusSwitchingProtocols {
- h.handleUpgradeResponse(logger, rw, req, res)
- return nil
- }
-
- removeConnectionHeaders(res.Header)
-
- for _, h := range hopHeaders {
- res.Header.Del(h)
- }
-
- // apply any response header operations
- if h.Headers != nil && h.Headers.Response != nil {
- if h.Headers.Response.Require == nil ||
- h.Headers.Response.Require.Match(res.StatusCode, res.Header) {
- h.Headers.Response.ApplyTo(res.Header, repl)
- }
- }
-
- copyHeader(rw.Header(), res.Header)
-
- // The "Trailer" header isn't included in the Transport's response,
- // at least for *http.Transport. Build it up from Trailer.
- announcedTrailers := len(res.Trailer)
- if announcedTrailers > 0 {
- trailerKeys := make([]string, 0, len(res.Trailer))
- for k := range res.Trailer {
- trailerKeys = append(trailerKeys, k)
- }
- rw.Header().Add("Trailer", strings.Join(trailerKeys, ", "))
- }
-
- rw.WriteHeader(res.StatusCode)
- err = h.copyResponse(rw, res.Body, h.flushInterval(req, res))
- res.Body.Close() // close now, instead of defer, to populate res.Trailer
- if err != nil {
- // we're streaming the response and we've already written headers, so
- // there's nothing an error handler can do to recover at this point;
- // the standard lib's proxy panics at this point, but we'll just log
- // the error and abort the stream here
- h.logger.Error("aborting with incomplete response", zap.Error(err))
- return nil
- }
-
- if len(res.Trailer) > 0 {
- // Force chunking if we saw a response trailer.
- // This prevents net/http from calculating the length for short
- // bodies and adding a Content-Length.
- if fl, ok := rw.(http.Flusher); ok {
- fl.Flush()
- }
- }
-
- // total duration spent proxying, including writing response body
- repl.Set("http.reverse_proxy.upstream.duration", duration)
-
- if len(res.Trailer) == announcedTrailers {
- copyHeader(rw.Header(), res.Trailer)
- return nil
- }
-
- for k, vv := range res.Trailer {
- k = http.TrailerPrefix + k
- for _, v := range vv {
- rw.Header().Add(k, v)
- }
- }
-
- return nil
-}
-
-// tryAgain takes the time that the handler was initially invoked
-// as well as any error currently obtained, and the request being
-// tried, and returns true if another attempt should be made at
-// proxying the request. If true is returned, it has already blocked
-// long enough before the next retry (i.e. no more sleeping is
-// needed). If false is returned, the handler should stop trying to
-// proxy the request.
-func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, proxyErr error, req *http.Request) bool {
- // if we've tried long enough, break
- if time.Since(start) >= time.Duration(lb.TryDuration) {
- return false
- }
-
- // if the error occurred while dialing (i.e. a connection
- // could not even be established to the upstream), then it
- // should be safe to retry, since without a connection, no
- // HTTP request can be transmitted; but if the error is not
- // specifically a dialer error, we need to be careful
- if _, ok := proxyErr.(DialError); proxyErr != nil && !ok {
- // if the error occurred after a connection was established,
- // we have to assume the upstream received the request, and
- // retries need to be carefully decided, because some requests
- // are not idempotent
- if lb.RetryMatch == nil && req.Method != "GET" {
- // by default, don't retry requests if they aren't GET
- return false
- }
- if !lb.RetryMatch.AnyMatch(req) {
- return false
- }
- }
-
- // otherwise, wait and try the next available host
- select {
- case <-time.After(time.Duration(lb.TryInterval)):
- return true
- case <-ctx.Done():
- return false
- }
-}
-
-// directRequest modifies only req.URL so that it points to the upstream
-// in the given DialInfo. It must modify ONLY the request URL.
-func (h Handler) directRequest(req *http.Request, di DialInfo) {
- // we need a host, so set the upstream's host address
- reqHost := di.Address
-
- // if the port equates to the scheme, strip the port because
- // it's weird to make a request like http://example.com:80/.
- if (req.URL.Scheme == "http" && di.Port == "80") ||
- (req.URL.Scheme == "https" && di.Port == "443") {
- reqHost = di.Host
- }
-
- req.URL.Host = reqHost
-}
-
-// bufferedBody reads originalBody into a buffer, then returns a reader for the buffer.
-// Always close the return value when done with it, just like if it was the original body!
-func (h Handler) bufferedBody(originalBody io.ReadCloser) io.ReadCloser {
- buf := bufPool.Get().(*bytes.Buffer)
- buf.Reset()
- if h.MaxBufferSize > 0 {
- n, err := io.CopyN(buf, originalBody, h.MaxBufferSize)
- if err != nil || n == h.MaxBufferSize {
- return bodyReadCloser{
- Reader: io.MultiReader(buf, originalBody),
- buf: buf,
- body: originalBody,
- }
- }
- } else {
- _, _ = io.Copy(buf, originalBody)
- }
- originalBody.Close() // no point in keeping it open
- return bodyReadCloser{
- Reader: buf,
- buf: buf,
- }
-}
-
-func copyHeader(dst, src http.Header) {
- for k, vv := range src {
- for _, v := range vv {
- dst.Add(k, v)
- }
- }
-}
-
-func upgradeType(h http.Header) string {
- if !httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade") {
- return ""
- }
- return strings.ToLower(h.Get("Upgrade"))
-}
-
-// removeConnectionHeaders removes hop-by-hop headers listed in the "Connection" header of h.
-// See RFC 7230, section 6.1
-func removeConnectionHeaders(h http.Header) {
- for _, f := range h["Connection"] {
- for _, sf := range strings.Split(f, ",") {
- if sf = textproto.TrimString(sf); sf != "" {
- h.Del(sf)
- }
- }
- }
-}
-
-// statusError returns an error value that has a status code.
-func statusError(err error) error {
- // errors proxying usually mean there is a problem with the upstream(s)
- statusCode := http.StatusBadGateway
-
- // if the client canceled the request (usually this means they closed
- // the connection, so they won't see any response), we can report it
- // as a client error (4xx) and not a server error (5xx); unfortunately
- // the Go standard library, at least at time of writing in late 2020,
- // obnoxiously wraps the exported, standard context.Canceled error with
- // an unexported garbage value that we have to do a substring check for:
- // https://github.com/golang/go/blob/6965b01ea248cabb70c3749fd218b36089a21efb/src/net/net.go#L416-L430
- if errors.Is(err, context.Canceled) || strings.Contains(err.Error(), "operation was canceled") {
- // regrettably, there is no standard error code for "client closed connection", but
- // for historical reasons we can use a code that a lot of people are already using;
- // using 5xx is problematic for users; see #3748
- statusCode = 499
- }
- return caddyhttp.Error(statusCode, err)
-}
-
-// LoadBalancing has parameters related to load balancing.
-type LoadBalancing struct {
- // A selection policy is how to choose an available backend.
- // The default policy is random selection.
- SelectionPolicyRaw json.RawMessage `json:"selection_policy,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
-
- // How long to try selecting available backends for each request
- // if the next available host is down. By default, this retry is
- // disabled. Clients will wait for up to this long while the load
- // balancer tries to find an available upstream host.
- TryDuration caddy.Duration `json:"try_duration,omitempty"`
-
- // How long to wait between selecting the next host from the pool. Default
- // is 250ms. Only relevant when a request to an upstream host fails. Be
- // aware that setting this to 0 with a non-zero try_duration can cause the
- // CPU to spin if all backends are down and latency is very low.
- TryInterval caddy.Duration `json:"try_interval,omitempty"`
-
- // A list of matcher sets that restricts with which requests retries are
- // allowed. A request must match any of the given matcher sets in order
- // to be retried if the connection to the upstream succeeded but the
- // subsequent round-trip failed. If the connection to the upstream failed,
- // a retry is always allowed. If unspecified, only GET requests will be
- // allowed to be retried. Note that a retry is done with the next available
- // host according to the load balancing policy.
- RetryMatchRaw caddyhttp.RawMatcherSets `json:"retry_match,omitempty" caddy:"namespace=http.matchers"`
-
- SelectionPolicy Selector `json:"-"`
- RetryMatch caddyhttp.MatcherSets `json:"-"`
-}
-
-// Selector selects an available upstream from the pool.
-type Selector interface {
- Select(UpstreamPool, *http.Request, http.ResponseWriter) *Upstream
-}
-
-// Hop-by-hop headers. These are removed when sent to the backend.
-// As of RFC 7230, hop-by-hop headers are required to appear in the
-// Connection header field. These are the headers defined by the
-// obsoleted RFC 2616 (section 13.5.1) and are used for backward
-// compatibility.
-var hopHeaders = []string{
- "Alt-Svc",
- "Connection",
- "Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google
- "Keep-Alive",
- "Proxy-Authenticate",
- "Proxy-Authorization",
- "Te", // canonicalized version of "TE"
- "Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522
- "Transfer-Encoding",
- "Upgrade",
-}
-
-// DialError is an error that specifically occurs
-// in a call to Dial or DialContext.
-type DialError struct{ error }
-
-// TLSTransport is implemented by transports
-// that are capable of using TLS.
-type TLSTransport interface {
- // TLSEnabled returns true if the transport
- // has TLS enabled, false otherwise.
- TLSEnabled() bool
-
- // EnableTLS enables TLS within the transport
- // if it is not already, using the provided
- // value as a basis for the TLS config.
- EnableTLS(base *TLSConfig) error
-}
-
-// roundtripSucceeded is an error type that is returned if the
-// roundtrip succeeded, but an error occurred after-the-fact.
-type roundtripSucceeded struct{ error }
-
-// bodyReadCloser is a reader that, upon closing, will return
-// its buffer to the pool and close the underlying body reader.
-type bodyReadCloser struct {
- io.Reader
- buf *bytes.Buffer
- body io.ReadCloser
-}
-
-func (brc bodyReadCloser) Close() error {
- bufPool.Put(brc.buf)
- if brc.body != nil {
- return brc.body.Close()
- }
- return nil
-}
-
-// bufPool is used for buffering requests and responses.
-var bufPool = sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
-}
-
-// Interface guards
-var (
- _ caddy.Provisioner = (*Handler)(nil)
- _ caddy.CleanerUpper = (*Handler)(nil)
- _ caddyhttp.MiddlewareHandler = (*Handler)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/selectionpolicies.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/selectionpolicies.go
deleted file mode 100644
index 001f7f80..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/selectionpolicies.go
+++ /dev/null
@@ -1,557 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reverseproxy
-
-import (
- "crypto/hmac"
- "crypto/sha256"
- "encoding/hex"
- "fmt"
- "hash/fnv"
- weakrand "math/rand"
- "net"
- "net/http"
- "strconv"
- "sync/atomic"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
-)
-
-func init() {
- caddy.RegisterModule(RandomSelection{})
- caddy.RegisterModule(RandomChoiceSelection{})
- caddy.RegisterModule(LeastConnSelection{})
- caddy.RegisterModule(RoundRobinSelection{})
- caddy.RegisterModule(FirstSelection{})
- caddy.RegisterModule(IPHashSelection{})
- caddy.RegisterModule(URIHashSelection{})
- caddy.RegisterModule(HeaderHashSelection{})
- caddy.RegisterModule(CookieHashSelection{})
-
- weakrand.Seed(time.Now().UTC().UnixNano())
-}
-
-// RandomSelection is a policy that selects
-// an available host at random.
-type RandomSelection struct{}
-
-// CaddyModule returns the Caddy module information.
-func (RandomSelection) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.reverse_proxy.selection_policies.random",
- New: func() caddy.Module { return new(RandomSelection) },
- }
-}
-
-// Select returns an available host, if any.
-func (r RandomSelection) Select(pool UpstreamPool, request *http.Request, _ http.ResponseWriter) *Upstream {
- return selectRandomHost(pool)
-}
-
-// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
-func (r *RandomSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- if d.NextArg() {
- return d.ArgErr()
- }
- }
- return nil
-}
-
-// RandomChoiceSelection is a policy that selects
-// two or more available hosts at random, then
-// chooses the one with the least load.
-type RandomChoiceSelection struct {
- // The size of the sub-pool created from the larger upstream pool. The default value
- // is 2 and the maximum at selection time is the size of the upstream pool.
- Choose int `json:"choose,omitempty"`
-}
-
-// CaddyModule returns the Caddy module information.
-func (RandomChoiceSelection) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.reverse_proxy.selection_policies.random_choose",
- New: func() caddy.Module { return new(RandomChoiceSelection) },
- }
-}
-
-// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
-func (r *RandomChoiceSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- if !d.NextArg() {
- return d.ArgErr()
- }
- chooseStr := d.Val()
- choose, err := strconv.Atoi(chooseStr)
- if err != nil {
- return d.Errf("invalid choice value '%s': %v", chooseStr, err)
- }
- r.Choose = choose
- }
- return nil
-}
-
-// Provision sets up r.
-func (r *RandomChoiceSelection) Provision(ctx caddy.Context) error {
- if r.Choose == 0 {
- r.Choose = 2
- }
- return nil
-}
-
-// Validate ensures that r's configuration is valid.
-func (r RandomChoiceSelection) Validate() error {
- if r.Choose < 2 {
- return fmt.Errorf("choose must be at least 2")
- }
- return nil
-}
-
-// Select returns an available host, if any.
-func (r RandomChoiceSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream {
- k := r.Choose
- if k > len(pool) {
- k = len(pool)
- }
- choices := make([]*Upstream, k)
- for i, upstream := range pool {
- if !upstream.Available() {
- continue
- }
- j := weakrand.Intn(i + 1)
- if j < k {
- choices[j] = upstream
- }
- }
- return leastRequests(choices)
-}
-
-// LeastConnSelection is a policy that selects the
-// host with the least active requests. If multiple
-// hosts have the same fewest number, one is chosen
-// randomly. The term "conn" or "connection" is used
-// in this policy name due to its similar meaning in
-// other software, but our load balancer actually
-// counts active requests rather than connections,
-// since these days requests are multiplexed onto
-// shared connections.
-type LeastConnSelection struct{}
-
-// CaddyModule returns the Caddy module information.
-func (LeastConnSelection) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.reverse_proxy.selection_policies.least_conn",
- New: func() caddy.Module { return new(LeastConnSelection) },
- }
-}
-
-// Select selects the up host with the least number of connections in the
-// pool. If more than one host has the same least number of connections,
-// one of the hosts is chosen at random.
-func (LeastConnSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream {
- var bestHost *Upstream
- var count int
- leastReqs := -1
-
- for _, host := range pool {
- if !host.Available() {
- continue
- }
- numReqs := host.NumRequests()
- if leastReqs == -1 || numReqs < leastReqs {
- leastReqs = numReqs
- count = 0
- }
-
- // among hosts with same least connections, perform a reservoir
- // sample: https://en.wikipedia.org/wiki/Reservoir_sampling
- if numReqs == leastReqs {
- count++
- if (weakrand.Int() % count) == 0 {
- bestHost = host
- }
- }
- }
-
- return bestHost
-}
-
-// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
-func (r *LeastConnSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- if d.NextArg() {
- return d.ArgErr()
- }
- }
- return nil
-}
-
-// RoundRobinSelection is a policy that selects
-// a host based on round-robin ordering.
-type RoundRobinSelection struct {
- robin uint32
-}
-
-// CaddyModule returns the Caddy module information.
-func (RoundRobinSelection) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.reverse_proxy.selection_policies.round_robin",
- New: func() caddy.Module { return new(RoundRobinSelection) },
- }
-}
-
-// Select returns an available host, if any.
-func (r *RoundRobinSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream {
- n := uint32(len(pool))
- if n == 0 {
- return nil
- }
- for i := uint32(0); i < n; i++ {
- robin := atomic.AddUint32(&r.robin, 1)
- host := pool[robin%n]
- if host.Available() {
- return host
- }
- }
- return nil
-}
-
-// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
-func (r *RoundRobinSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- if d.NextArg() {
- return d.ArgErr()
- }
- }
- return nil
-}
-
-// FirstSelection is a policy that selects
-// the first available host.
-type FirstSelection struct{}
-
-// CaddyModule returns the Caddy module information.
-func (FirstSelection) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.reverse_proxy.selection_policies.first",
- New: func() caddy.Module { return new(FirstSelection) },
- }
-}
-
-// Select returns an available host, if any.
-func (FirstSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream {
- for _, host := range pool {
- if host.Available() {
- return host
- }
- }
- return nil
-}
-
-// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
-func (r *FirstSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- if d.NextArg() {
- return d.ArgErr()
- }
- }
- return nil
-}
-
-// IPHashSelection is a policy that selects a host
-// based on hashing the remote IP of the request.
-type IPHashSelection struct{}
-
-// CaddyModule returns the Caddy module information.
-func (IPHashSelection) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.reverse_proxy.selection_policies.ip_hash",
- New: func() caddy.Module { return new(IPHashSelection) },
- }
-}
-
-// Select returns an available host, if any.
-func (IPHashSelection) Select(pool UpstreamPool, req *http.Request, _ http.ResponseWriter) *Upstream {
- clientIP, _, err := net.SplitHostPort(req.RemoteAddr)
- if err != nil {
- clientIP = req.RemoteAddr
- }
- return hostByHashing(pool, clientIP)
-}
-
-// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
-func (r *IPHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- if d.NextArg() {
- return d.ArgErr()
- }
- }
- return nil
-}
-
-// URIHashSelection is a policy that selects a
-// host by hashing the request URI.
-type URIHashSelection struct{}
-
-// CaddyModule returns the Caddy module information.
-func (URIHashSelection) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.reverse_proxy.selection_policies.uri_hash",
- New: func() caddy.Module { return new(URIHashSelection) },
- }
-}
-
-// Select returns an available host, if any.
-func (URIHashSelection) Select(pool UpstreamPool, req *http.Request, _ http.ResponseWriter) *Upstream {
- return hostByHashing(pool, req.RequestURI)
-}
-
-// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
-func (r *URIHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- if d.NextArg() {
- return d.ArgErr()
- }
- }
- return nil
-}
-
-// HeaderHashSelection is a policy that selects
-// a host based on a given request header.
-type HeaderHashSelection struct {
- // The HTTP header field whose value is to be hashed and used for upstream selection.
- Field string `json:"field,omitempty"`
-}
-
-// CaddyModule returns the Caddy module information.
-func (HeaderHashSelection) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.reverse_proxy.selection_policies.header",
- New: func() caddy.Module { return new(HeaderHashSelection) },
- }
-}
-
-// Select returns an available host, if any.
-func (s HeaderHashSelection) Select(pool UpstreamPool, req *http.Request, _ http.ResponseWriter) *Upstream {
- if s.Field == "" {
- return nil
- }
-
- // The Host header should be obtained from the req.Host field
- // since net/http removes it from the header map.
- if s.Field == "Host" && req.Host != "" {
- return hostByHashing(pool, req.Host)
- }
-
- val := req.Header.Get(s.Field)
- if val == "" {
- return RandomSelection{}.Select(pool, req, nil)
- }
- return hostByHashing(pool, val)
-}
-
-// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
-func (s *HeaderHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- if !d.NextArg() {
- return d.ArgErr()
- }
- s.Field = d.Val()
- }
- return nil
-}
-
-// CookieHashSelection is a policy that selects
-// a host based on a given cookie name.
-type CookieHashSelection struct {
- // The HTTP cookie name whose value is to be hashed and used for upstream selection.
- Name string `json:"name,omitempty"`
- // Secret to hash (Hmac256) chosen upstream in cookie
- Secret string `json:"secret,omitempty"`
-}
-
-// CaddyModule returns the Caddy module information.
-func (CookieHashSelection) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.reverse_proxy.selection_policies.cookie",
- New: func() caddy.Module { return new(CookieHashSelection) },
- }
-}
-
-// Select returns an available host, if any.
-func (s CookieHashSelection) Select(pool UpstreamPool, req *http.Request, w http.ResponseWriter) *Upstream {
- if s.Name == "" {
- s.Name = "lb"
- }
- cookie, err := req.Cookie(s.Name)
- // If there's no cookie, select new random host
- if err != nil || cookie == nil {
- return selectNewHostWithCookieHashSelection(pool, w, s.Secret, s.Name)
- }
- // If the cookie is present, loop over the available upstreams until we find a match
- cookieValue := cookie.Value
- for _, upstream := range pool {
- if !upstream.Available() {
- continue
- }
- sha, err := hashCookie(s.Secret, upstream.Dial)
- if err == nil && sha == cookieValue {
- return upstream
- }
- }
- // If there is no matching host, select new random host
- return selectNewHostWithCookieHashSelection(pool, w, s.Secret, s.Name)
-}
-
-// UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax:
-// lb_policy cookie [ []]
-//
-// By default name is `lb`
-func (s *CookieHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- args := d.RemainingArgs()
- switch len(args) {
- case 1:
- case 2:
- s.Name = args[1]
- case 3:
- s.Name = args[1]
- s.Secret = args[2]
- default:
- return d.ArgErr()
- }
- return nil
-}
-
-// Select a new Host randomly and add a sticky session cookie
-func selectNewHostWithCookieHashSelection(pool []*Upstream, w http.ResponseWriter, cookieSecret string, cookieName string) *Upstream {
- randomHost := selectRandomHost(pool)
-
- if randomHost != nil {
- // Hash (HMAC with some key for privacy) the upstream.Dial string as the cookie value
- sha, err := hashCookie(cookieSecret, randomHost.Dial)
- if err == nil {
- // write the cookie.
- http.SetCookie(w, &http.Cookie{Name: cookieName, Value: sha, Path: "/", Secure: false})
- }
- }
- return randomHost
-}
-
-// hashCookie hashes (HMAC 256) some data with the secret
-func hashCookie(secret string, data string) (string, error) {
- h := hmac.New(sha256.New, []byte(secret))
- _, err := h.Write([]byte(data))
- if err != nil {
- return "", err
- }
- return hex.EncodeToString(h.Sum(nil)), nil
-}
-
-// selectRandomHost returns a random available host
-func selectRandomHost(pool []*Upstream) *Upstream {
- // use reservoir sampling because the number of available
- // hosts isn't known: https://en.wikipedia.org/wiki/Reservoir_sampling
- var randomHost *Upstream
- var count int
- for _, upstream := range pool {
- if !upstream.Available() {
- continue
- }
- // (n % 1 == 0) holds for all n, therefore a
- // upstream will always be chosen if there is at
- // least one available
- count++
- if (weakrand.Int() % count) == 0 {
- randomHost = upstream
- }
- }
- return randomHost
-}
-
-// leastRequests returns the host with the
-// least number of active requests to it.
-// If more than one host has the same
-// least number of active requests, then
-// one of those is chosen at random.
-func leastRequests(upstreams []*Upstream) *Upstream {
- if len(upstreams) == 0 {
- return nil
- }
- var best []*Upstream
- var bestReqs int = -1
- for _, upstream := range upstreams {
- if upstream == nil {
- continue
- }
- reqs := upstream.NumRequests()
- if reqs == 0 {
- return upstream
- }
- // If bestReqs was just initialized to -1
- // we need to append upstream also
- if reqs <= bestReqs || bestReqs == -1 {
- bestReqs = reqs
- best = append(best, upstream)
- }
- }
- if len(best) == 0 {
- return nil
- }
- return best[weakrand.Intn(len(best))]
-}
-
-// hostByHashing returns an available host
-// from pool based on a hashable string s.
-func hostByHashing(pool []*Upstream, s string) *Upstream {
- poolLen := uint32(len(pool))
- if poolLen == 0 {
- return nil
- }
- index := hash(s) % poolLen
- for i := uint32(0); i < poolLen; i++ {
- upstream := pool[(index+i)%poolLen]
- if upstream.Available() {
- return upstream
- }
- }
- return nil
-}
-
-// hash calculates a fast hash based on s.
-func hash(s string) uint32 {
- h := fnv.New32a()
- _, _ = h.Write([]byte(s))
- return h.Sum32()
-}
-
-// Interface guards
-var (
- _ Selector = (*RandomSelection)(nil)
- _ Selector = (*RandomChoiceSelection)(nil)
- _ Selector = (*LeastConnSelection)(nil)
- _ Selector = (*RoundRobinSelection)(nil)
- _ Selector = (*FirstSelection)(nil)
- _ Selector = (*IPHashSelection)(nil)
- _ Selector = (*URIHashSelection)(nil)
- _ Selector = (*HeaderHashSelection)(nil)
- _ Selector = (*CookieHashSelection)(nil)
-
- _ caddy.Validator = (*RandomChoiceSelection)(nil)
- _ caddy.Provisioner = (*RandomChoiceSelection)(nil)
-
- _ caddyfile.Unmarshaler = (*RandomChoiceSelection)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/streaming.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/streaming.go
deleted file mode 100644
index 1db352b7..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/streaming.go
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Most of the code in this file was initially borrowed from the Go
-// standard library and modified; It had this copyright notice:
-// Copyright 2011 The Go Authors
-
-package reverseproxy
-
-import (
- "context"
- "io"
- "mime"
- "net/http"
- "sync"
- "time"
-
- "go.uber.org/zap"
-)
-
-func (h Handler) handleUpgradeResponse(logger *zap.Logger, rw http.ResponseWriter, req *http.Request, res *http.Response) {
- reqUpType := upgradeType(req.Header)
- resUpType := upgradeType(res.Header)
- // TODO: Update to use "net/http/internal/ascii" once we bumped
- // the minimum Go version to 1.17.
- // See https://github.com/golang/go/commit/5c489514bc5e61ad9b5b07bd7d8ec65d66a0512a
- if reqUpType != resUpType {
- h.logger.Debug("backend tried to switch to unexpected protocol via Upgrade header",
- zap.String("backend_upgrade", resUpType),
- zap.String("requested_upgrade", reqUpType))
- return
- }
-
- hj, ok := rw.(http.Hijacker)
- if !ok {
- h.logger.Sugar().Errorf("can't switch protocols using non-Hijacker ResponseWriter type %T", rw)
- return
- }
- backConn, ok := res.Body.(io.ReadWriteCloser)
- if !ok {
- h.logger.Error("internal error: 101 switching protocols response with non-writable body")
- return
- }
-
- // adopted from https://github.com/golang/go/commit/8bcf2834afdf6a1f7937390903a41518715ef6f5
- backConnCloseCh := make(chan struct{})
- go func() {
- // Ensure that the cancelation of a request closes the backend.
- // See issue https://golang.org/issue/35559.
- select {
- case <-req.Context().Done():
- case <-backConnCloseCh:
- }
- backConn.Close()
- }()
- defer close(backConnCloseCh)
-
- logger.Debug("upgrading connection")
- conn, brw, err := hj.Hijack()
- if err != nil {
- h.logger.Error("hijack failed on protocol switch", zap.Error(err))
- return
- }
- defer conn.Close()
-
- start := time.Now()
- defer func() {
- logger.Debug("connection closed", zap.Duration("duration", time.Since(start)))
- }()
-
- copyHeader(rw.Header(), res.Header)
-
- res.Header = rw.Header()
- res.Body = nil // so res.Write only writes the headers; we have res.Body in backConn above
- if err := res.Write(brw); err != nil {
- h.logger.Debug("response write", zap.Error(err))
- return
- }
- if err := brw.Flush(); err != nil {
- h.logger.Debug("response flush", zap.Error(err))
- return
- }
-
- errc := make(chan error, 1)
- spc := switchProtocolCopier{user: conn, backend: backConn}
- go spc.copyToBackend(errc)
- go spc.copyFromBackend(errc)
- <-errc
-}
-
-// flushInterval returns the p.FlushInterval value, conditionally
-// overriding its value for a specific request/response.
-func (h Handler) flushInterval(req *http.Request, res *http.Response) time.Duration {
- resCTHeader := res.Header.Get("Content-Type")
- resCT, _, err := mime.ParseMediaType(resCTHeader)
-
- // For Server-Sent Events responses, flush immediately.
- // The MIME type is defined in https://www.w3.org/TR/eventsource/#text-event-stream
- if err == nil && resCT == "text/event-stream" {
- return -1 // negative means immediately
- }
-
- // We might have the case of streaming for which Content-Length might be unset.
- if res.ContentLength == -1 {
- return -1
- }
-
- // for h2 and h2c upstream streaming data to client (issues #3556 and #3606)
- if h.isBidirectionalStream(req, res) {
- return -1
- }
-
- return time.Duration(h.FlushInterval)
-}
-
-// isBidirectionalStream returns whether we should work in bi-directional stream mode.
-//
-// See https://github.com/caddyserver/caddy/pull/3620 for discussion of nuances.
-func (h Handler) isBidirectionalStream(req *http.Request, res *http.Response) bool {
- // We have to check the encoding here; only flush headers with identity encoding.
- // Non-identity encoding might combine with "encode" directive, and in that case,
- // if body size larger than enc.MinLength, upper level encode handle might have
- // Content-Encoding header to write.
- // (see https://github.com/caddyserver/caddy/issues/3606 for use case)
- ae := req.Header.Get("Accept-Encoding")
-
- return req.ProtoMajor == 2 &&
- res.ProtoMajor == 2 &&
- res.ContentLength == -1 &&
- (ae == "identity" || ae == "")
-}
-
-func (h Handler) copyResponse(dst io.Writer, src io.Reader, flushInterval time.Duration) error {
- if flushInterval != 0 {
- if wf, ok := dst.(writeFlusher); ok {
- mlw := &maxLatencyWriter{
- dst: wf,
- latency: flushInterval,
- }
- defer mlw.stop()
-
- // set up initial timer so headers get flushed even if body writes are delayed
- mlw.flushPending = true
- mlw.t = time.AfterFunc(flushInterval, mlw.delayedFlush)
-
- dst = mlw
- }
- }
-
- buf := streamingBufPool.Get().(*[]byte)
- defer streamingBufPool.Put(buf)
- _, err := h.copyBuffer(dst, src, *buf)
- return err
-}
-
-// copyBuffer returns any write errors or non-EOF read errors, and the amount
-// of bytes written.
-func (h Handler) copyBuffer(dst io.Writer, src io.Reader, buf []byte) (int64, error) {
- if len(buf) == 0 {
- buf = make([]byte, defaultBufferSize)
- }
- var written int64
- for {
- nr, rerr := src.Read(buf)
- if rerr != nil && rerr != io.EOF && rerr != context.Canceled {
- // TODO: this could be useful to know (indeed, it revealed an error in our
- // fastcgi PoC earlier; but it's this single error report here that necessitates
- // a function separate from io.CopyBuffer, since io.CopyBuffer does not distinguish
- // between read or write errors; in a reverse proxy situation, write errors are not
- // something we need to report to the client, but read errors are a problem on our
- // end for sure. so we need to decide what we want.)
- // p.logf("copyBuffer: ReverseProxy read error during body copy: %v", rerr)
- h.logger.Error("reading from backend", zap.Error(rerr))
- }
- if nr > 0 {
- nw, werr := dst.Write(buf[:nr])
- if nw > 0 {
- written += int64(nw)
- }
- if werr != nil {
- return written, werr
- }
- if nr != nw {
- return written, io.ErrShortWrite
- }
- }
- if rerr != nil {
- if rerr == io.EOF {
- rerr = nil
- }
- return written, rerr
- }
- }
-}
-
-type writeFlusher interface {
- io.Writer
- http.Flusher
-}
-
-type maxLatencyWriter struct {
- dst writeFlusher
- latency time.Duration // non-zero; negative means to flush immediately
-
- mu sync.Mutex // protects t, flushPending, and dst.Flush
- t *time.Timer
- flushPending bool
-}
-
-func (m *maxLatencyWriter) Write(p []byte) (n int, err error) {
- m.mu.Lock()
- defer m.mu.Unlock()
- n, err = m.dst.Write(p)
- if m.latency < 0 {
- m.dst.Flush()
- return
- }
- if m.flushPending {
- return
- }
- if m.t == nil {
- m.t = time.AfterFunc(m.latency, m.delayedFlush)
- } else {
- m.t.Reset(m.latency)
- }
- m.flushPending = true
- return
-}
-
-func (m *maxLatencyWriter) delayedFlush() {
- m.mu.Lock()
- defer m.mu.Unlock()
- if !m.flushPending { // if stop was called but AfterFunc already started this goroutine
- return
- }
- m.dst.Flush()
- m.flushPending = false
-}
-
-func (m *maxLatencyWriter) stop() {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.flushPending = false
- if m.t != nil {
- m.t.Stop()
- }
-}
-
-// switchProtocolCopier exists so goroutines proxying data back and
-// forth have nice names in stacks.
-type switchProtocolCopier struct {
- user, backend io.ReadWriter
-}
-
-func (c switchProtocolCopier) copyFromBackend(errc chan<- error) {
- _, err := io.Copy(c.user, c.backend)
- errc <- err
-}
-
-func (c switchProtocolCopier) copyToBackend(errc chan<- error) {
- _, err := io.Copy(c.backend, c.user)
- errc <- err
-}
-
-var streamingBufPool = sync.Pool{
- New: func() interface{} {
- // The Pool's New function should generally only return pointer
- // types, since a pointer can be put into the return interface
- // value without an allocation
- // - (from the package docs)
- b := make([]byte, defaultBufferSize)
- return &b
- },
-}
-
-const defaultBufferSize = 32 * 1024
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/routes.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/routes.go
deleted file mode 100644
index ebd763c7..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/routes.go
+++ /dev/null
@@ -1,321 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
-
- "github.com/caddyserver/caddy/v2"
-)
-
-// Route consists of a set of rules for matching HTTP requests,
-// a list of handlers to execute, and optional flow control
-// parameters which customize the handling of HTTP requests
-// in a highly flexible and performant manner.
-type Route struct {
- // Group is an optional name for a group to which this
- // route belongs. Grouping a route makes it mutually
- // exclusive with others in its group; if a route belongs
- // to a group, only the first matching route in that group
- // will be executed.
- Group string `json:"group,omitempty"`
-
- // The matcher sets which will be used to qualify this
- // route for a request (essentially the "if" statement
- // of this route). Each matcher set is OR'ed, but matchers
- // within a set are AND'ed together.
- MatcherSetsRaw RawMatcherSets `json:"match,omitempty" caddy:"namespace=http.matchers"`
-
- // The list of handlers for this route. Upon matching a request, they are chained
- // together in a middleware fashion: requests flow from the first handler to the last
- // (top of the list to the bottom), with the possibility that any handler could stop
- // the chain and/or return an error. Responses flow back through the chain (bottom of
- // the list to the top) as they are written out to the client.
- //
- // Not all handlers call the next handler in the chain. For example, the reverse_proxy
- // handler always sends a request upstream or returns an error. Thus, configuring
- // handlers after reverse_proxy in the same route is illogical, since they would never
- // be executed. You will want to put handlers which originate the response at the very
- // end of your route(s). The documentation for a module should state whether it invokes
- // the next handler, but sometimes it is common sense.
- //
- // Some handlers manipulate the response. Remember that requests flow down the list, and
- // responses flow up the list.
- //
- // For example, if you wanted to use both `templates` and `encode` handlers, you would
- // need to put `templates` after `encode` in your route, because responses flow up.
- // Thus, `templates` will be able to parse and execute the plain-text response as a
- // template, and then return it up to the `encode` handler which will then compress it
- // into a binary format.
- //
- // If `templates` came before `encode`, then `encode` would write a compressed,
- // binary-encoded response to `templates` which would not be able to parse the response
- // properly.
- //
- // The correct order, then, is this:
- //
- // [
- // {"handler": "encode"},
- // {"handler": "templates"},
- // {"handler": "file_server"}
- // ]
- //
- // The request flows â¬‡ï¸ DOWN (`encode` -> `templates` -> `file_server`).
- //
- // 1. First, `encode` will choose how to `encode` the response and wrap the response.
- // 2. Then, `templates` will wrap the response with a buffer.
- // 3. Finally, `file_server` will originate the content from a file.
- //
- // The response flows â¬†ï¸ UP (`file_server` -> `templates` -> `encode`):
- //
- // 1. First, `file_server` will write the file to the response.
- // 2. That write will be buffered and then executed by `templates`.
- // 3. Lastly, the write from `templates` will flow into `encode` which will compress the stream.
- //
- // If you think of routes in this way, it will be easy and even fun to solve the puzzle of writing correct routes.
- HandlersRaw []json.RawMessage `json:"handle,omitempty" caddy:"namespace=http.handlers inline_key=handler"`
-
- // If true, no more routes will be executed after this one.
- Terminal bool `json:"terminal,omitempty"`
-
- // decoded values
- MatcherSets MatcherSets `json:"-"`
- Handlers []MiddlewareHandler `json:"-"`
-
- middleware []Middleware
-}
-
-// Empty returns true if the route has all zero/default values.
-func (r Route) Empty() bool {
- return len(r.MatcherSetsRaw) == 0 &&
- len(r.MatcherSets) == 0 &&
- len(r.HandlersRaw) == 0 &&
- len(r.Handlers) == 0 &&
- !r.Terminal &&
- r.Group == ""
-}
-
-// RouteList is a list of server routes that can
-// create a middleware chain.
-type RouteList []Route
-
-// Provision sets up both the matchers and handlers in the routes.
-func (routes RouteList) Provision(ctx caddy.Context) error {
- err := routes.ProvisionMatchers(ctx)
- if err != nil {
- return err
- }
- return routes.ProvisionHandlers(ctx)
-}
-
-// ProvisionMatchers sets up all the matchers by loading the
-// matcher modules. Only call this method directly if you need
-// to set up matchers and handlers separately without having
-// to provision a second time; otherwise use Provision instead.
-func (routes RouteList) ProvisionMatchers(ctx caddy.Context) error {
- for i := range routes {
- // matchers
- matchersIface, err := ctx.LoadModule(&routes[i], "MatcherSetsRaw")
- if err != nil {
- return fmt.Errorf("route %d: loading matcher modules: %v", i, err)
- }
- err = routes[i].MatcherSets.FromInterface(matchersIface)
- if err != nil {
- return fmt.Errorf("route %d: %v", i, err)
- }
- }
- return nil
-}
-
-// ProvisionHandlers sets up all the handlers by loading the
-// handler modules. Only call this method directly if you need
-// to set up matchers and handlers separately without having
-// to provision a second time; otherwise use Provision instead.
-func (routes RouteList) ProvisionHandlers(ctx caddy.Context) error {
- for i := range routes {
- handlersIface, err := ctx.LoadModule(&routes[i], "HandlersRaw")
- if err != nil {
- return fmt.Errorf("route %d: loading handler modules: %v", i, err)
- }
- for _, handler := range handlersIface.([]interface{}) {
- routes[i].Handlers = append(routes[i].Handlers, handler.(MiddlewareHandler))
- }
-
- // pre-compile the middleware handler chain
- for _, midhandler := range routes[i].Handlers {
- routes[i].middleware = append(routes[i].middleware, wrapMiddleware(ctx, midhandler))
- }
- }
- return nil
-}
-
-// Compile prepares a middleware chain from the route list.
-// This should only be done once: after all the routes have
-// been provisioned, and before serving requests.
-func (routes RouteList) Compile(next Handler) Handler {
- mid := make([]Middleware, 0, len(routes))
- for _, route := range routes {
- mid = append(mid, wrapRoute(route))
- }
- stack := next
- for i := len(mid) - 1; i >= 0; i-- {
- stack = mid[i](stack)
- }
- return stack
-}
-
-// wrapRoute wraps route with a middleware and handler so that it can
-// be chained in and defer evaluation of its matchers to request-time.
-// Like wrapMiddleware, it is vital that this wrapping takes place in
-// its own stack frame so as to not overwrite the reference to the
-// intended route by looping and changing the reference each time.
-func wrapRoute(route Route) Middleware {
- return func(next Handler) Handler {
- return HandlerFunc(func(rw http.ResponseWriter, req *http.Request) error {
- // TODO: Update this comment, it seems we've moved the copy into the handler?
- // copy the next handler (it's an interface, so it's just
- // a very lightweight copy of a pointer); this is important
- // because this is a closure to the func below, which
- // re-assigns the value as it compiles the middleware stack;
- // if we don't make this copy, we'd affect the underlying
- // pointer for all future request (yikes); we could
- // alternatively solve this by moving the func below out of
- // this closure and into a standalone package-level func,
- // but I just thought this made more sense
- nextCopy := next
-
- // route must match at least one of the matcher sets
- if !route.MatcherSets.AnyMatch(req) {
- return nextCopy.ServeHTTP(rw, req)
- }
-
- // if route is part of a group, ensure only the
- // first matching route in the group is applied
- if route.Group != "" {
- groups := req.Context().Value(routeGroupCtxKey).(map[string]struct{})
-
- if _, ok := groups[route.Group]; ok {
- // this group has already been
- // satisfied by a matching route
- return nextCopy.ServeHTTP(rw, req)
- }
-
- // this matching route satisfies the group
- groups[route.Group] = struct{}{}
- }
-
- // make terminal routes terminate
- if route.Terminal {
- if _, ok := req.Context().Value(ErrorCtxKey).(error); ok {
- nextCopy = errorEmptyHandler
- } else {
- nextCopy = emptyHandler
- }
- }
-
- // compile this route's handler stack
- for i := len(route.middleware) - 1; i >= 0; i-- {
- nextCopy = route.middleware[i](nextCopy)
- }
-
- return nextCopy.ServeHTTP(rw, req)
- })
- }
-}
-
-// wrapMiddleware wraps mh such that it can be correctly
-// appended to a list of middleware in preparation for
-// compiling into a handler chain. We can't do this inline
-// inside a loop, because it relies on a reference to mh
-// not changing until the execution of its handler (which
-// is deferred by multiple func closures). In other words,
-// we need to pull this particular MiddlewareHandler
-// pointer into its own stack frame to preserve it so it
-// won't be overwritten in future loop iterations.
-func wrapMiddleware(ctx caddy.Context, mh MiddlewareHandler) Middleware {
- // wrap the middleware with metrics instrumentation
- metricsHandler := newMetricsInstrumentedHandler(caddy.GetModuleName(mh), mh)
-
- return func(next Handler) Handler {
- // copy the next handler (it's an interface, so it's
- // just a very lightweight copy of a pointer); this
- // is a safeguard against the handler changing the
- // value, which could affect future requests (yikes)
- nextCopy := next
-
- return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
- // TODO: This is where request tracing could be implemented
- // TODO: see what the std lib gives us in terms of stack tracing too
- return metricsHandler.ServeHTTP(w, r, nextCopy)
- })
- }
-}
-
-// MatcherSet is a set of matchers which
-// must all match in order for the request
-// to be matched successfully.
-type MatcherSet []RequestMatcher
-
-// Match returns true if the request matches all
-// matchers in mset or if there are no matchers.
-func (mset MatcherSet) Match(r *http.Request) bool {
- for _, m := range mset {
- if !m.Match(r) {
- return false
- }
- }
- return true
-}
-
-// RawMatcherSets is a group of matcher sets
-// in their raw, JSON form.
-type RawMatcherSets []caddy.ModuleMap
-
-// MatcherSets is a group of matcher sets capable
-// of checking whether a request matches any of
-// the sets.
-type MatcherSets []MatcherSet
-
-// AnyMatch returns true if req matches any of the
-// matcher sets in ms or if there are no matchers,
-// in which case the request always matches.
-func (ms MatcherSets) AnyMatch(req *http.Request) bool {
- for _, m := range ms {
- if m.Match(req) {
- return true
- }
- }
- return len(ms) == 0
-}
-
-// FromInterface fills ms from an interface{} value obtained from LoadModule.
-func (ms *MatcherSets) FromInterface(matcherSets interface{}) error {
- for _, matcherSetIfaces := range matcherSets.([]map[string]interface{}) {
- var matcherSet MatcherSet
- for _, matcher := range matcherSetIfaces {
- reqMatcher, ok := matcher.(RequestMatcher)
- if !ok {
- return fmt.Errorf("decoded module is not a RequestMatcher: %#v", matcher)
- }
- matcherSet = append(matcherSet, reqMatcher)
- }
- *ms = append(*ms, matcherSet)
- }
- return nil
-}
-
-var routeGroupCtxKey = caddy.CtxKey("route_group")
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/server.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/server.go
deleted file mode 100644
index 80948325..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/server.go
+++ /dev/null
@@ -1,645 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net"
- "net/http"
- "net/url"
- "runtime"
- "strings"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/modules/caddytls"
- "github.com/lucas-clemente/quic-go/http3"
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
-)
-
-// Server describes an HTTP server.
-type Server struct {
- // Socket addresses to which to bind listeners. Accepts
- // [network addresses](/docs/conventions#network-addresses)
- // that may include port ranges. Listener addresses must
- // be unique; they cannot be repeated across all defined
- // servers.
- Listen []string `json:"listen,omitempty"`
-
- // A list of listener wrapper modules, which can modify the behavior
- // of the base listener. They are applied in the given order.
- ListenerWrappersRaw []json.RawMessage `json:"listener_wrappers,omitempty" caddy:"namespace=caddy.listeners inline_key=wrapper"`
-
- // How long to allow a read from a client's upload. Setting this
- // to a short, non-zero value can mitigate slowloris attacks, but
- // may also affect legitimately slow clients.
- ReadTimeout caddy.Duration `json:"read_timeout,omitempty"`
-
- // ReadHeaderTimeout is like ReadTimeout but for request headers.
- ReadHeaderTimeout caddy.Duration `json:"read_header_timeout,omitempty"`
-
- // WriteTimeout is how long to allow a write to a client. Note
- // that setting this to a small value when serving large files
- // may negatively affect legitimately slow clients.
- WriteTimeout caddy.Duration `json:"write_timeout,omitempty"`
-
- // IdleTimeout is the maximum time to wait for the next request
- // when keep-alives are enabled. If zero, a default timeout of
- // 5m is applied to help avoid resource exhaustion.
- IdleTimeout caddy.Duration `json:"idle_timeout,omitempty"`
-
- // MaxHeaderBytes is the maximum size to parse from a client's
- // HTTP request headers.
- MaxHeaderBytes int `json:"max_header_bytes,omitempty"`
-
- // Routes describes how this server will handle requests.
- // Routes are executed sequentially. First a route's matchers
- // are evaluated, then its grouping. If it matches and has
- // not been mutually-excluded by its grouping, then its
- // handlers are executed sequentially. The sequence of invoked
- // handlers comprises a compiled middleware chain that flows
- // from each matching route and its handlers to the next.
- //
- // By default, all unrouted requests receive a 200 OK response
- // to indicate the server is working.
- Routes RouteList `json:"routes,omitempty"`
-
- // Errors is how this server will handle errors returned from any
- // of the handlers in the primary routes. If the primary handler
- // chain returns an error, the error along with its recommended
- // status code are bubbled back up to the HTTP server which
- // executes a separate error route, specified using this property.
- // The error routes work exactly like the normal routes.
- Errors *HTTPErrorConfig `json:"errors,omitempty"`
-
- // How to handle TLS connections. At least one policy is
- // required to enable HTTPS on this server if automatic
- // HTTPS is disabled or does not apply.
- TLSConnPolicies caddytls.ConnectionPolicies `json:"tls_connection_policies,omitempty"`
-
- // AutoHTTPS configures or disables automatic HTTPS within this server.
- // HTTPS is enabled automatically and by default when qualifying names
- // are present in a Host matcher and/or when the server is listening
- // only on the HTTPS port.
- AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"`
-
- // If true, will require that a request's Host header match
- // the value of the ServerName sent by the client's TLS
- // ClientHello; often a necessary safeguard when using TLS
- // client authentication.
- StrictSNIHost *bool `json:"strict_sni_host,omitempty"`
-
- // Enables access logging and configures how access logs are handled
- // in this server. To minimally enable access logs, simply set this
- // to a non-null, empty struct.
- Logs *ServerLogConfig `json:"logs,omitempty"`
-
- // Enable experimental HTTP/3 support. Note that HTTP/3 is not a
- // finished standard and has extremely limited client support.
- // This field is not subject to compatibility promises.
- ExperimentalHTTP3 bool `json:"experimental_http3,omitempty"`
-
- // Enables H2C ("Cleartext HTTP/2" or "H2 over TCP") support,
- // which will serve HTTP/2 over plaintext TCP connections if
- // the client supports it. Because this is not implemented by the
- // Go standard library, using H2C is incompatible with most
- // of the other options for this server. Do not enable this
- // only to achieve maximum client compatibility. In practice,
- // very few clients implement H2C, and even fewer require it.
- // This setting applies only to unencrypted HTTP listeners.
- // âš ï¸ Experimental feature; subject to change or removal.
- AllowH2C bool `json:"allow_h2c,omitempty"`
-
- name string
-
- primaryHandlerChain Handler
- errorHandlerChain Handler
- listenerWrappers []caddy.ListenerWrapper
-
- tlsApp *caddytls.TLS
- logger *zap.Logger
- accessLogger *zap.Logger
- errorLogger *zap.Logger
-
- h3server *http3.Server
-}
-
-// ServeHTTP is the entry point for all HTTP requests.
-func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Server", "Caddy")
-
- if s.h3server != nil {
- err := s.h3server.SetQuicHeaders(w.Header())
- if err != nil {
- s.logger.Error("setting HTTP/3 Alt-Svc header", zap.Error(err))
- }
- }
-
- repl := caddy.NewReplacer()
- r = PrepareRequest(r, repl, w, s)
-
- // encode the request for logging purposes before
- // it enters any handler chain; this is necessary
- // to capture the original request in case it gets
- // modified during handling
- loggableReq := zap.Object("request", LoggableHTTPRequest{r})
- errLog := s.errorLogger.With(loggableReq)
-
- var duration time.Duration
-
- if s.shouldLogRequest(r) {
- wrec := NewResponseRecorder(w, nil, nil)
- w = wrec
-
- // capture the original version of the request
- accLog := s.accessLogger.With(loggableReq)
-
- defer func() {
- repl.Set("http.response.status", wrec.Status())
- repl.Set("http.response.size", wrec.Size())
- repl.Set("http.response.duration", duration)
-
- logger := accLog
- if s.Logs != nil {
- logger = s.Logs.wrapLogger(logger, r.Host)
- }
-
- log := logger.Info
- if wrec.Status() >= 400 {
- log = logger.Error
- }
-
- userID, _ := repl.GetString("http.auth.user.id")
-
- log("handled request",
- zap.String("common_log", repl.ReplaceAll(commonLogFormat, commonLogEmptyValue)),
- zap.String("user_id", userID),
- zap.Duration("duration", duration),
- zap.Int("size", wrec.Size()),
- zap.Int("status", wrec.Status()),
- zap.Object("resp_headers", LoggableHTTPHeader(wrec.Header())),
- )
- }()
- }
-
- start := time.Now()
-
- // guarantee ACME HTTP challenges; handle them
- // separately from any user-defined handlers
- if s.tlsApp.HandleHTTPChallenge(w, r) {
- duration = time.Since(start)
- return
- }
-
- // execute the primary handler chain
- err := s.primaryHandlerChain.ServeHTTP(w, r)
- duration = time.Since(start)
-
- // if no errors, we're done!
- if err == nil {
- return
- }
-
- // restore original request before invoking error handler chain (issue #3717)
- // TODO: this does not restore original headers, if modified (for efficiency)
- origReq := r.Context().Value(OriginalRequestCtxKey).(http.Request)
- r.Method = origReq.Method
- r.RemoteAddr = origReq.RemoteAddr
- r.RequestURI = origReq.RequestURI
- cloneURL(origReq.URL, r.URL)
-
- // prepare the error log
- logger := errLog
- if s.Logs != nil {
- logger = s.Logs.wrapLogger(logger, r.Host)
- }
- logger = logger.With(zap.Duration("duration", duration))
-
- // get the values that will be used to log the error
- errStatus, errMsg, errFields := errLogValues(err)
-
- // add HTTP error information to request context
- r = s.Errors.WithError(r, err)
-
- if s.Errors != nil && len(s.Errors.Routes) > 0 {
- // execute user-defined error handling route
- err2 := s.errorHandlerChain.ServeHTTP(w, r)
- if err2 == nil {
- // user's error route handled the error response
- // successfully, so now just log the error
- if errStatus >= 500 {
- logger.Error(errMsg, errFields...)
- }
- } else {
- // well... this is awkward
- errFields = append([]zapcore.Field{
- zap.String("error", err2.Error()),
- zap.Namespace("first_error"),
- zap.String("msg", errMsg),
- }, errFields...)
- logger.Error("error handling handler error", errFields...)
- if handlerErr, ok := err.(HandlerError); ok {
- w.WriteHeader(handlerErr.StatusCode)
- } else {
- w.WriteHeader(http.StatusInternalServerError)
- }
- }
- } else {
- if errStatus >= 500 {
- logger.Error(errMsg, errFields...)
- }
- w.WriteHeader(errStatus)
- }
-}
-
-// wrapPrimaryRoute wraps stack (a compiled middleware handler chain)
-// in s.enforcementHandler which performs crucial security checks, etc.
-func (s *Server) wrapPrimaryRoute(stack Handler) Handler {
- return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
- return s.enforcementHandler(w, r, stack)
- })
-}
-
-// enforcementHandler is an implicit middleware which performs
-// standard checks before executing the HTTP middleware chain.
-func (s *Server) enforcementHandler(w http.ResponseWriter, r *http.Request, next Handler) error {
- // enforce strict host matching, which ensures that the SNI
- // value (if any), matches the Host header; essential for
- // servers that rely on TLS ClientAuth sharing a listener
- // with servers that do not; if not enforced, client could
- // bypass by sending benign SNI then restricted Host header
- if s.StrictSNIHost != nil && *s.StrictSNIHost && r.TLS != nil {
- hostname, _, err := net.SplitHostPort(r.Host)
- if err != nil {
- hostname = r.Host // OK; probably lacked port
- }
- if !strings.EqualFold(r.TLS.ServerName, hostname) {
- err := fmt.Errorf("strict host matching: TLS ServerName (%s) and HTTP Host (%s) values differ",
- r.TLS.ServerName, hostname)
- r.Close = true
- return Error(http.StatusForbidden, err)
- }
- }
- return next.ServeHTTP(w, r)
-}
-
-// listenersUseAnyPortOtherThan returns true if there are any
-// listeners in s that use a port which is not otherPort.
-func (s *Server) listenersUseAnyPortOtherThan(otherPort int) bool {
- for _, lnAddr := range s.Listen {
- laddrs, err := caddy.ParseNetworkAddress(lnAddr)
- if err != nil {
- continue
- }
- if uint(otherPort) > laddrs.EndPort || uint(otherPort) < laddrs.StartPort {
- return true
- }
- }
- return false
-}
-
-// hasListenerAddress returns true if s has a listener
-// at the given address fullAddr. Currently, fullAddr
-// must represent exactly one socket address (port
-// ranges are not supported)
-func (s *Server) hasListenerAddress(fullAddr string) bool {
- laddrs, err := caddy.ParseNetworkAddress(fullAddr)
- if err != nil {
- return false
- }
- if laddrs.PortRangeSize() != 1 {
- return false // TODO: support port ranges
- }
-
- for _, lnAddr := range s.Listen {
- thisAddrs, err := caddy.ParseNetworkAddress(lnAddr)
- if err != nil {
- continue
- }
- if thisAddrs.Network != laddrs.Network {
- continue
- }
-
- // Apparently, Linux requires all bound ports to be distinct
- // *regardless of host interface* even if the addresses are
- // in fact different; binding "192.168.0.1:9000" and then
- // ":9000" will fail for ":9000" because "address is already
- // in use" even though it's not, and the same bindings work
- // fine on macOS. I also found on Linux that listening on
- // "[::]:9000" would fail with a similar error, except with
- // the address "0.0.0.0:9000", as if deliberately ignoring
- // that I specified the IPv6 interface explicitly. This seems
- // to be a major bug in the Linux network stack and I don't
- // know why it hasn't been fixed yet, so for now we have to
- // special-case ourselves around Linux like a doting parent.
- // The second issue seems very similar to a discussion here:
- // https://github.com/nodejs/node/issues/9390
- //
- // This is very easy to reproduce by creating an HTTP server
- // that listens to both addresses or just one with a host
- // interface; or for a more confusing reproduction, try
- // listening on "127.0.0.1:80" and ":443" and you'll see
- // the error, if you take away the GOOS condition below.
- //
- // So, an address is equivalent if the port is in the port
- // range, and if not on Linux, the host is the same... sigh.
- if (runtime.GOOS == "linux" || thisAddrs.Host == laddrs.Host) &&
- (laddrs.StartPort <= thisAddrs.EndPort) &&
- (laddrs.StartPort >= thisAddrs.StartPort) {
- return true
- }
- }
- return false
-}
-
-func (s *Server) hasTLSClientAuth() bool {
- for _, cp := range s.TLSConnPolicies {
- if cp.ClientAuthentication != nil && cp.ClientAuthentication.Active() {
- return true
- }
- }
- return false
-}
-
-// findLastRouteWithHostMatcher returns the index of the last route
-// in the server which has a host matcher. Used during Automatic HTTPS
-// to determine where to insert the HTTP->HTTPS redirect route, such
-// that it is after any other host matcher but before any "catch-all"
-// route without a host matcher.
-func (s *Server) findLastRouteWithHostMatcher() int {
- foundHostMatcher := false
- lastIndex := len(s.Routes)
-
- for i, route := range s.Routes {
- // since we want to break out of an inner loop, use a closure
- // to allow us to use 'return' when we found a host matcher
- found := (func() bool {
- for _, sets := range route.MatcherSets {
- for _, matcher := range sets {
- switch matcher.(type) {
- case *MatchHost:
- foundHostMatcher = true
- return true
- }
- }
- }
- return false
- })()
-
- // if we found the host matcher, change the lastIndex to
- // just after the current route
- if found {
- lastIndex = i + 1
- }
- }
-
- // If we didn't actually find a host matcher, return 0
- // because that means every defined route was a "catch-all".
- // See https://caddy.community/t/how-to-set-priority-in-caddyfile/13002/8
- if !foundHostMatcher {
- return 0
- }
-
- return lastIndex
-}
-
-// HTTPErrorConfig determines how to handle errors
-// from the HTTP handlers.
-type HTTPErrorConfig struct {
- // The routes to evaluate after the primary handler
- // chain returns an error. In an error route, extra
- // placeholders are available:
- //
- // Placeholder | Description
- // ------------|---------------
- // `{http.error.status_code}` | The recommended HTTP status code
- // `{http.error.status_text}` | The status text associated with the recommended status code
- // `{http.error.message}` | The error message
- // `{http.error.trace}` | The origin of the error
- // `{http.error.id}` | An identifier for this occurrence of the error
- Routes RouteList `json:"routes,omitempty"`
-}
-
-// WithError makes a shallow copy of r to add the error to its
-// context, and sets placeholders on the request's replacer
-// related to err. It returns the modified request which has
-// the error information in its context and replacer. It
-// overwrites any existing error values that are stored.
-func (*HTTPErrorConfig) WithError(r *http.Request, err error) *http.Request {
- // add the raw error value to the request context
- // so it can be accessed by error handlers
- c := context.WithValue(r.Context(), ErrorCtxKey, err)
- r = r.WithContext(c)
-
- // add error values to the replacer
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- repl.Set("http.error", err)
- if handlerErr, ok := err.(HandlerError); ok {
- repl.Set("http.error.status_code", handlerErr.StatusCode)
- repl.Set("http.error.status_text", http.StatusText(handlerErr.StatusCode))
- repl.Set("http.error.trace", handlerErr.Trace)
- repl.Set("http.error.id", handlerErr.ID)
- }
-
- return r
-}
-
-// shouldLogRequest returns true if this request should be logged.
-func (s *Server) shouldLogRequest(r *http.Request) bool {
- if s.accessLogger == nil || s.Logs == nil {
- // logging is disabled
- return false
- }
- for _, dh := range s.Logs.SkipHosts {
- // logging for this particular host is disabled
- if r.Host == dh {
- return false
- }
- }
- if _, ok := s.Logs.LoggerNames[r.Host]; ok {
- // this host is mapped to a particular logger name
- return true
- }
- if s.Logs.SkipUnmappedHosts {
- // this host is not mapped and thus must not be logged
- return false
- }
- return true
-}
-
-// ServerLogConfig describes a server's logging configuration. If
-// enabled without customization, all requests to this server are
-// logged to the default logger; logger destinations may be
-// customized per-request-host.
-type ServerLogConfig struct {
- // The default logger name for all logs emitted by this server for
- // hostnames that are not in the LoggerNames (logger_names) map.
- DefaultLoggerName string `json:"default_logger_name,omitempty"`
-
- // LoggerNames maps request hostnames to a custom logger name.
- // For example, a mapping of "example.com" to "example" would
- // cause access logs from requests with a Host of example.com
- // to be emitted by a logger named "http.log.access.example".
- LoggerNames map[string]string `json:"logger_names,omitempty"`
-
- // By default, all requests to this server will be logged if
- // access logging is enabled. This field lists the request
- // hosts for which access logging should be disabled.
- SkipHosts []string `json:"skip_hosts,omitempty"`
-
- // If true, requests to any host not appearing in the
- // LoggerNames (logger_names) map will not be logged.
- SkipUnmappedHosts bool `json:"skip_unmapped_hosts,omitempty"`
-}
-
-// wrapLogger wraps logger in a logger named according to user preferences for the given host.
-func (slc ServerLogConfig) wrapLogger(logger *zap.Logger, host string) *zap.Logger {
- if loggerName := slc.getLoggerName(host); loggerName != "" {
- return logger.Named(loggerName)
- }
- return logger
-}
-
-func (slc ServerLogConfig) getLoggerName(host string) string {
- tryHost := func(key string) (string, bool) {
- // first try exact match
- if loggerName, ok := slc.LoggerNames[key]; ok {
- return loggerName, ok
- }
- // strip port and try again (i.e. Host header of "example.com:1234" should
- // match "example.com" if there is no "example.com:1234" in the map)
- hostOnly, _, err := net.SplitHostPort(key)
- if err != nil {
- return "", false
- }
- loggerName, ok := slc.LoggerNames[hostOnly]
- return loggerName, ok
- }
-
- // try the exact hostname first
- if loggerName, ok := tryHost(host); ok {
- return loggerName
- }
-
- // try matching wildcard domains if other non-specific loggers exist
- labels := strings.Split(host, ".")
- for i := range labels {
- if labels[i] == "" {
- continue
- }
- labels[i] = "*"
- wildcardHost := strings.Join(labels, ".")
- if loggerName, ok := tryHost(wildcardHost); ok {
- return loggerName
- }
- }
-
- return slc.DefaultLoggerName
-}
-
-// PrepareRequest fills the request r for use in a Caddy HTTP handler chain. w and s can
-// be nil, but the handlers will lose response placeholders and access to the server.
-func PrepareRequest(r *http.Request, repl *caddy.Replacer, w http.ResponseWriter, s *Server) *http.Request {
- // set up the context for the request
- ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl)
- ctx = context.WithValue(ctx, ServerCtxKey, s)
- ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]interface{}))
- ctx = context.WithValue(ctx, routeGroupCtxKey, make(map[string]struct{}))
- var url2 url.URL // avoid letting this escape to the heap
- ctx = context.WithValue(ctx, OriginalRequestCtxKey, originalRequest(r, &url2))
- r = r.WithContext(ctx)
-
- // once the pointer to the request won't change
- // anymore, finish setting up the replacer
- addHTTPVarsToReplacer(repl, r, w)
-
- return r
-}
-
-// errLogValues inspects err and returns the status code
-// to use, the error log message, and any extra fields.
-// If err is a HandlerError, the returned values will
-// have richer information.
-func errLogValues(err error) (status int, msg string, fields []zapcore.Field) {
- if handlerErr, ok := err.(HandlerError); ok {
- status = handlerErr.StatusCode
- if handlerErr.Err == nil {
- msg = err.Error()
- } else {
- msg = handlerErr.Err.Error()
- }
- fields = []zapcore.Field{
- zap.Int("status", handlerErr.StatusCode),
- zap.String("err_id", handlerErr.ID),
- zap.String("err_trace", handlerErr.Trace),
- }
- return
- }
- status = http.StatusInternalServerError
- msg = err.Error()
- return
-}
-
-// originalRequest returns a partial, shallow copy of
-// req, including: req.Method, deep copy of req.URL
-// (into the urlCopy parameter, which should be on the
-// stack), req.RequestURI, and req.RemoteAddr. Notably,
-// headers are not copied. This function is designed to
-// be very fast and efficient, and useful primarily for
-// read-only/logging purposes.
-func originalRequest(req *http.Request, urlCopy *url.URL) http.Request {
- cloneURL(req.URL, urlCopy)
- return http.Request{
- Method: req.Method,
- RemoteAddr: req.RemoteAddr,
- RequestURI: req.RequestURI,
- URL: urlCopy,
- }
-}
-
-// cloneURL makes a copy of r.URL and returns a
-// new value that doesn't reference the original.
-func cloneURL(from, to *url.URL) {
- *to = *from
- if from.User != nil {
- userInfo := new(url.Userinfo)
- *userInfo = *from.User
- to.User = userInfo
- }
-}
-
-const (
- // commonLogFormat is the common log format. https://en.wikipedia.org/wiki/Common_Log_Format
- commonLogFormat = `{http.request.remote.host} ` + commonLogEmptyValue + ` {http.auth.user.id} [{time.now.common_log}] "{http.request.orig_method} {http.request.orig_uri} {http.request.proto}" {http.response.status} {http.response.size}`
-
- // commonLogEmptyValue is the common empty log value.
- commonLogEmptyValue = "-"
-)
-
-// Context keys for HTTP request context values.
-const (
- // For referencing the server instance
- ServerCtxKey caddy.CtxKey = "server"
-
- // For the request's variable table
- VarsCtxKey caddy.CtxKey = "vars"
-
- // For a partial copy of the unmodified request that
- // originally came into the server's entry handler
- OriginalRequestCtxKey caddy.CtxKey = "original_request"
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticerror.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticerror.go
deleted file mode 100644
index 914e6c14..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticerror.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "fmt"
- "net/http"
- "strconv"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
-)
-
-func init() {
- caddy.RegisterModule(StaticError{})
-}
-
-// StaticError implements a simple handler that returns an error.
-// This handler returns an error value, but does not write a response.
-// This is useful when you want the server to act as if an error
-// occurred; for example, to invoke your custom error handling logic.
-//
-// Since this handler does not write a response, the error information
-// is for use by the server to know how to handle the error.
-type StaticError struct {
- // The error message. Optional. Default is no error message.
- Error string `json:"error,omitempty"`
-
- // The recommended HTTP status code. Can be either an integer or a
- // string if placeholders are needed. Optional. Default is 500.
- StatusCode WeakString `json:"status_code,omitempty"`
-}
-
-// CaddyModule returns the Caddy module information.
-func (StaticError) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.handlers.error",
- New: func() caddy.Module { return new(StaticError) },
- }
-}
-
-// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
-//
-// error [] | [] {
-// message
-// }
-//
-// If there is just one argument (other than the matcher), it is considered
-// to be a status code if it's a valid positive integer of 3 digits.
-func (e *StaticError) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- args := d.RemainingArgs()
- switch len(args) {
- case 1:
- if len(args[0]) == 3 {
- if num, err := strconv.Atoi(args[0]); err == nil && num > 0 {
- e.StatusCode = WeakString(args[0])
- break
- }
- }
- e.Error = args[0]
- case 2:
- e.Error = args[0]
- e.StatusCode = WeakString(args[1])
- default:
- return d.ArgErr()
- }
-
- for d.NextBlock(0) {
- switch d.Val() {
- case "message":
- if e.Error != "" {
- return d.Err("message already specified")
- }
- if !d.AllArgs(&e.Error) {
- return d.ArgErr()
- }
- default:
- return d.Errf("unrecognized subdirective '%s'", d.Val())
- }
- }
- }
- return nil
-}
-
-func (e StaticError) ServeHTTP(w http.ResponseWriter, r *http.Request, _ Handler) error {
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
-
- statusCode := http.StatusInternalServerError
- if codeStr := e.StatusCode.String(); codeStr != "" {
- intVal, err := strconv.Atoi(repl.ReplaceAll(codeStr, ""))
- if err != nil {
- return Error(http.StatusInternalServerError, err)
- }
- statusCode = intVal
- }
-
- return Error(statusCode, fmt.Errorf("%s", e.Error))
-}
-
-// Interface guard
-var (
- _ MiddlewareHandler = (*StaticError)(nil)
- _ caddyfile.Unmarshaler = (*StaticError)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticresp.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticresp.go
deleted file mode 100644
index c587f5ee..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticresp.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "fmt"
- "net/http"
- "strconv"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
-)
-
-func init() {
- caddy.RegisterModule(StaticResponse{})
-}
-
-// StaticResponse implements a simple responder for static responses.
-type StaticResponse struct {
- // The HTTP status code to respond with. Can be an integer or,
- // if needing to use a placeholder, a string.
- StatusCode WeakString `json:"status_code,omitempty"`
-
- // Header fields to set on the response.
- Headers http.Header `json:"headers,omitempty"`
-
- // The response body.
- Body string `json:"body,omitempty"`
-
- // If true, the server will close the client's connection
- // after writing the response.
- Close bool `json:"close,omitempty"`
-
- // Immediately and forcefully closes the connection without
- // writing a response. Interrupts any other HTTP streams on
- // the same connection.
- Abort bool `json:"abort,omitempty"`
-}
-
-// CaddyModule returns the Caddy module information.
-func (StaticResponse) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.handlers.static_response",
- New: func() caddy.Module { return new(StaticResponse) },
- }
-}
-
-// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
-//
-// respond [] | [] {
-// body
-// close
-// }
-//
-// If there is just one argument (other than the matcher), it is considered
-// to be a status code if it's a valid positive integer of 3 digits.
-func (s *StaticResponse) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- args := d.RemainingArgs()
- switch len(args) {
- case 1:
- if len(args[0]) == 3 {
- if num, err := strconv.Atoi(args[0]); err == nil && num > 0 {
- s.StatusCode = WeakString(args[0])
- break
- }
- }
- s.Body = args[0]
- case 2:
- s.Body = args[0]
- s.StatusCode = WeakString(args[1])
- default:
- return d.ArgErr()
- }
-
- for d.NextBlock(0) {
- switch d.Val() {
- case "body":
- if s.Body != "" {
- return d.Err("body already specified")
- }
- if !d.AllArgs(&s.Body) {
- return d.ArgErr()
- }
- case "close":
- if s.Close {
- return d.Err("close already specified")
- }
- s.Close = true
- default:
- return d.Errf("unrecognized subdirective '%s'", d.Val())
- }
- }
- }
- return nil
-}
-
-func (s StaticResponse) ServeHTTP(w http.ResponseWriter, r *http.Request, _ Handler) error {
- // close the connection immediately
- if s.Abort {
- panic(http.ErrAbortHandler)
- }
-
- // close the connection after responding
- if s.Close {
- r.Close = true
- w.Header().Set("Connection", "close")
- }
-
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
-
- // set all headers
- for field, vals := range s.Headers {
- field = repl.ReplaceAll(field, "")
- newVals := make([]string, len(vals))
- for i := range vals {
- newVals[i] = repl.ReplaceAll(vals[i], "")
- }
- w.Header()[field] = newVals
- }
-
- // do not allow Go to sniff the content-type
- if w.Header().Get("Content-Type") == "" {
- w.Header()["Content-Type"] = nil
- }
-
- // get the status code; if this handler exists in an error route,
- // use the recommended status code as the default; otherwise 200
- statusCode := http.StatusOK
- if reqErr, ok := r.Context().Value(ErrorCtxKey).(error); ok {
- if handlerErr, ok := reqErr.(HandlerError); ok {
- if handlerErr.StatusCode > 0 {
- statusCode = handlerErr.StatusCode
- }
- }
- }
- if codeStr := s.StatusCode.String(); codeStr != "" {
- intVal, err := strconv.Atoi(repl.ReplaceAll(codeStr, ""))
- if err != nil {
- return Error(http.StatusInternalServerError, err)
- }
- statusCode = intVal
- }
-
- // write headers
- w.WriteHeader(statusCode)
-
- // write response body
- if s.Body != "" {
- fmt.Fprint(w, repl.ReplaceKnown(s.Body, ""))
- }
-
- return nil
-}
-
-// Interface guards
-var (
- _ MiddlewareHandler = (*StaticResponse)(nil)
- _ caddyfile.Unmarshaler = (*StaticResponse)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/subroute.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/subroute.go
deleted file mode 100644
index 2e80d88d..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/subroute.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "fmt"
- "net/http"
-
- "github.com/caddyserver/caddy/v2"
-)
-
-func init() {
- caddy.RegisterModule(Subroute{})
-}
-
-// Subroute implements a handler that compiles and executes routes.
-// This is useful for a batch of routes that all inherit the same
-// matchers, or for multiple routes that should be treated as a
-// single route.
-//
-// You can also use subroutes to handle errors from its handlers.
-// First the primary routes will be executed, and if they return an
-// error, the errors routes will be executed; in that case, an error
-// is only returned to the entry point at the server if there is an
-// additional error returned from the errors routes.
-type Subroute struct {
- // The primary list of routes to compile and execute.
- Routes RouteList `json:"routes,omitempty"`
-
- // If the primary routes return an error, error handling
- // can be promoted to this configuration instead.
- Errors *HTTPErrorConfig `json:"errors,omitempty"`
-}
-
-// CaddyModule returns the Caddy module information.
-func (Subroute) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.handlers.subroute",
- New: func() caddy.Module { return new(Subroute) },
- }
-}
-
-// Provision sets up subrouting.
-func (sr *Subroute) Provision(ctx caddy.Context) error {
- if sr.Routes != nil {
- err := sr.Routes.Provision(ctx)
- if err != nil {
- return fmt.Errorf("setting up subroutes: %v", err)
- }
- if sr.Errors != nil {
- err := sr.Errors.Routes.Provision(ctx)
- if err != nil {
- return fmt.Errorf("setting up error subroutes: %v", err)
- }
- }
- }
- return nil
-}
-
-func (sr *Subroute) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
- subroute := sr.Routes.Compile(next)
- err := subroute.ServeHTTP(w, r)
- if err != nil && sr.Errors != nil {
- r = sr.Errors.WithError(r, err)
- errRoute := sr.Errors.Routes.Compile(next)
- return errRoute.ServeHTTP(w, r)
- }
- return err
-}
-
-// Interface guards
-var (
- _ caddy.Provisioner = (*Subroute)(nil)
- _ MiddlewareHandler = (*Subroute)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/vars.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/vars.go
deleted file mode 100644
index 479ef0a7..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/vars.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddyhttp
-
-import (
- "context"
- "fmt"
- "net/http"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
-)
-
-func init() {
- caddy.RegisterModule(VarsMiddleware{})
- caddy.RegisterModule(VarsMatcher{})
- caddy.RegisterModule(MatchVarsRE{})
-}
-
-// VarsMiddleware is an HTTP middleware which sets variables
-// in the context, mainly for use by placeholders. The
-// placeholders have the form: `{http.vars.variable_name}`
-type VarsMiddleware map[string]string
-
-// CaddyModule returns the Caddy module information.
-func (VarsMiddleware) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.handlers.vars",
- New: func() caddy.Module { return new(VarsMiddleware) },
- }
-}
-
-func (t VarsMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
- vars := r.Context().Value(VarsCtxKey).(map[string]interface{})
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- for k, v := range t {
- keyExpanded := repl.ReplaceAll(k, "")
- valExpanded := repl.ReplaceAll(v, "")
- vars[keyExpanded] = valExpanded
- }
- return next.ServeHTTP(w, r)
-}
-
-// VarsMatcher is an HTTP request matcher which can match
-// requests based on variables in the context.
-type VarsMatcher map[string]string
-
-// CaddyModule returns the Caddy module information.
-func (VarsMatcher) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.vars",
- New: func() caddy.Module { return new(VarsMatcher) },
- }
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *VarsMatcher) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- if *m == nil {
- *m = make(map[string]string)
- }
- for d.Next() {
- var field, val string
- if !d.Args(&field, &val) {
- return d.Errf("malformed vars matcher: expected both field and value")
- }
- (*m)[field] = val
- if d.NextBlock(0) {
- return d.Err("malformed vars matcher: blocks are not supported")
- }
- }
- return nil
-}
-
-// Match matches a request based on variables in the context.
-func (m VarsMatcher) Match(r *http.Request) bool {
- vars := r.Context().Value(VarsCtxKey).(map[string]interface{})
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- for k, v := range m {
- keyExpanded := repl.ReplaceAll(k, "")
- valExpanded := repl.ReplaceAll(v, "")
- var varStr string
- switch vv := vars[keyExpanded].(type) {
- case string:
- varStr = vv
- case fmt.Stringer:
- varStr = vv.String()
- case error:
- varStr = vv.Error()
- default:
- varStr = fmt.Sprintf("%v", vv)
- }
- if varStr != valExpanded {
- return false
- }
- }
- return true
-}
-
-// MatchVarsRE matches the value of the context variables by a given regular expression.
-//
-// Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
-// where `name` is the regular expression's name, and `capture_group` is either
-// the named or positional capture group from the expression itself. If no name
-// is given, then the placeholder omits the name: `{http.regexp.capture_group}`
-// (potentially leading to collisions).
-type MatchVarsRE map[string]*MatchRegexp
-
-// CaddyModule returns the Caddy module information.
-func (MatchVarsRE) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.vars_regexp",
- New: func() caddy.Module { return new(MatchVarsRE) },
- }
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchVarsRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- if *m == nil {
- *m = make(map[string]*MatchRegexp)
- }
- for d.Next() {
- var first, second, third string
- if !d.Args(&first, &second) {
- return d.ArgErr()
- }
-
- var name, field, val string
- if d.Args(&third) {
- name = first
- field = second
- val = third
- } else {
- field = first
- val = second
- }
-
- (*m)[field] = &MatchRegexp{Pattern: val, Name: name}
- if d.NextBlock(0) {
- return d.Err("malformed vars_regexp matcher: blocks are not supported")
- }
- }
- return nil
-}
-
-// Provision compiles m's regular expressions.
-func (m MatchVarsRE) Provision(ctx caddy.Context) error {
- for _, rm := range m {
- err := rm.Provision(ctx)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// Match returns true if r matches m.
-func (m MatchVarsRE) Match(r *http.Request) bool {
- vars := r.Context().Value(VarsCtxKey).(map[string]interface{})
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- for k, rm := range m {
- var varStr string
- switch vv := vars[k].(type) {
- case string:
- varStr = vv
- case fmt.Stringer:
- varStr = vv.String()
- case error:
- varStr = vv.Error()
- default:
- varStr = fmt.Sprintf("%v", vv)
- }
- valExpanded := repl.ReplaceAll(varStr, "")
- if match := rm.Match(valExpanded, repl); match {
- return match
- }
-
- replacedVal := repl.ReplaceAll(k, "")
- if match := rm.Match(replacedVal, repl); match {
- return match
- }
- }
- return false
-}
-
-// Validate validates m's regular expressions.
-func (m MatchVarsRE) Validate() error {
- for _, rm := range m {
- err := rm.Validate()
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// GetVar gets a value out of the context's variable table by key.
-// If the key does not exist, the return value will be nil.
-func GetVar(ctx context.Context, key string) interface{} {
- varMap, ok := ctx.Value(VarsCtxKey).(map[string]interface{})
- if !ok {
- return nil
- }
- return varMap[key]
-}
-
-// SetVar sets a value in the context's variable table with
-// the given key. It overwrites any previous value with the
-// same key.
-func SetVar(ctx context.Context, key string, value interface{}) {
- varMap, ok := ctx.Value(VarsCtxKey).(map[string]interface{})
- if !ok {
- return
- }
- varMap[key] = value
-}
-
-// Interface guards
-var (
- _ MiddlewareHandler = (*VarsMiddleware)(nil)
- _ RequestMatcher = (*VarsMatcher)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/ca.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/ca.go
deleted file mode 100644
index e3102fba..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/ca.go
+++ /dev/null
@@ -1,403 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddypki
-
-import (
- "crypto"
- "crypto/x509"
- "encoding/json"
- "fmt"
- "path"
- "sync"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/certmagic"
- "github.com/smallstep/certificates/authority"
- "github.com/smallstep/certificates/db"
- "github.com/smallstep/truststore"
- "go.uber.org/zap"
-)
-
-// CA describes a certificate authority, which consists of
-// root/signing certificates and various settings pertaining
-// to the issuance of certificates and trusting them.
-type CA struct {
- // The user-facing name of the certificate authority.
- Name string `json:"name,omitempty"`
-
- // The name to put in the CommonName field of the
- // root certificate.
- RootCommonName string `json:"root_common_name,omitempty"`
-
- // The name to put in the CommonName field of the
- // intermediate certificates.
- IntermediateCommonName string `json:"intermediate_common_name,omitempty"`
-
- // Whether Caddy will attempt to install the CA's root
- // into the system trust store, as well as into Java
- // and Mozilla Firefox trust stores. Default: true.
- InstallTrust *bool `json:"install_trust,omitempty"`
-
- // The root certificate to use; if null, one will be generated.
- Root *KeyPair `json:"root,omitempty"`
-
- // The intermediate (signing) certificate; if null, one will be generated.
- Intermediate *KeyPair `json:"intermediate,omitempty"`
-
- // Optionally configure a separate storage module associated with this
- // issuer, instead of using Caddy's global/default-configured storage.
- // This can be useful if you want to keep your signing keys in a
- // separate location from your leaf certificates.
- StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"`
-
- // The unique config-facing ID of the certificate authority.
- // Since the ID is set in JSON config via object key, this
- // field is exported only for purposes of config generation
- // and module provisioning.
- ID string `json:"-"`
-
- storage certmagic.Storage
- root, inter *x509.Certificate
- interKey interface{} // TODO: should we just store these as crypto.Signer?
- mu *sync.RWMutex
-
- rootCertPath string // mainly used for logging purposes if trusting
- log *zap.Logger
-}
-
-// Provision sets up the CA.
-func (ca *CA) Provision(ctx caddy.Context, id string, log *zap.Logger) error {
- ca.mu = new(sync.RWMutex)
- ca.log = log.Named("ca." + id)
-
- if id == "" {
- return fmt.Errorf("CA ID is required (use 'local' for the default CA)")
- }
- ca.mu.Lock()
- ca.ID = id
- ca.mu.Unlock()
-
- if ca.StorageRaw != nil {
- val, err := ctx.LoadModule(ca, "StorageRaw")
- if err != nil {
- return fmt.Errorf("loading storage module: %v", err)
- }
- cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage()
- if err != nil {
- return fmt.Errorf("creating storage configuration: %v", err)
- }
- ca.storage = cmStorage
- }
- if ca.storage == nil {
- ca.storage = ctx.Storage()
- }
-
- if ca.Name == "" {
- ca.Name = defaultCAName
- }
- if ca.RootCommonName == "" {
- ca.RootCommonName = defaultRootCommonName
- }
- if ca.IntermediateCommonName == "" {
- ca.IntermediateCommonName = defaultIntermediateCommonName
- }
-
- // load the certs and key that will be used for signing
- var rootCert, interCert *x509.Certificate
- var rootKey, interKey interface{}
- var err error
- if ca.Root != nil {
- if ca.Root.Format == "" || ca.Root.Format == "pem_file" {
- ca.rootCertPath = ca.Root.Certificate
- }
- rootCert, rootKey, err = ca.Root.Load()
- } else {
- ca.rootCertPath = "storage:" + ca.storageKeyRootCert()
- rootCert, rootKey, err = ca.loadOrGenRoot()
- }
- if err != nil {
- return err
- }
- if ca.Intermediate != nil {
- interCert, interKey, err = ca.Intermediate.Load()
- } else {
- interCert, interKey, err = ca.loadOrGenIntermediate(rootCert, rootKey)
- }
- if err != nil {
- return err
- }
-
- ca.mu.Lock()
- ca.root, ca.inter, ca.interKey = rootCert, interCert, interKey
- ca.mu.Unlock()
-
- return nil
-}
-
-// RootCertificate returns the CA's root certificate (public key).
-func (ca CA) RootCertificate() *x509.Certificate {
- ca.mu.RLock()
- defer ca.mu.RUnlock()
- return ca.root
-}
-
-// RootKey returns the CA's root private key. Since the root key is
-// not cached in memory long-term, it needs to be loaded from storage,
-// which could yield an error.
-func (ca CA) RootKey() (interface{}, error) {
- _, rootKey, err := ca.loadOrGenRoot()
- return rootKey, err
-}
-
-// IntermediateCertificate returns the CA's intermediate
-// certificate (public key).
-func (ca CA) IntermediateCertificate() *x509.Certificate {
- ca.mu.RLock()
- defer ca.mu.RUnlock()
- return ca.inter
-}
-
-// IntermediateKey returns the CA's intermediate private key.
-func (ca CA) IntermediateKey() interface{} {
- ca.mu.RLock()
- defer ca.mu.RUnlock()
- return ca.interKey
-}
-
-// NewAuthority returns a new Smallstep-powered signing authority for this CA.
-func (ca CA) NewAuthority(authorityConfig AuthorityConfig) (*authority.Authority, error) {
- // get the root certificate and the issuer cert+key
- rootCert := ca.RootCertificate()
- var issuerCert *x509.Certificate
- var issuerKey interface{}
- if authorityConfig.SignWithRoot {
- issuerCert = rootCert
- var err error
- issuerKey, err = ca.RootKey()
- if err != nil {
- return nil, fmt.Errorf("loading signing key: %v", err)
- }
- } else {
- issuerCert = ca.IntermediateCertificate()
- issuerKey = ca.IntermediateKey()
- }
-
- opts := []authority.Option{
- authority.WithConfig(&authority.Config{
- AuthorityConfig: authorityConfig.AuthConfig,
- }),
- authority.WithX509Signer(issuerCert, issuerKey.(crypto.Signer)),
- authority.WithX509RootCerts(rootCert),
- }
- // Add a database if we have one
- if authorityConfig.DB != nil {
- opts = append(opts, authority.WithDatabase(*authorityConfig.DB))
- }
- auth, err := authority.NewEmbedded(opts...)
- if err != nil {
- return nil, fmt.Errorf("initializing certificate authority: %v", err)
- }
-
- return auth, nil
-}
-
-func (ca CA) loadOrGenRoot() (rootCert *x509.Certificate, rootKey interface{}, err error) {
- rootCertPEM, err := ca.storage.Load(ca.storageKeyRootCert())
- if err != nil {
- if _, ok := err.(certmagic.ErrNotExist); !ok {
- return nil, nil, fmt.Errorf("loading root cert: %v", err)
- }
-
- // TODO: should we require that all or none of the assets are required before overwriting anything?
- rootCert, rootKey, err = ca.genRoot()
- if err != nil {
- return nil, nil, fmt.Errorf("generating root: %v", err)
- }
- }
-
- if rootCert == nil {
- rootCert, err = pemDecodeSingleCert(rootCertPEM)
- if err != nil {
- return nil, nil, fmt.Errorf("parsing root certificate PEM: %v", err)
- }
- }
- if rootKey == nil {
- rootKeyPEM, err := ca.storage.Load(ca.storageKeyRootKey())
- if err != nil {
- return nil, nil, fmt.Errorf("loading root key: %v", err)
- }
- rootKey, err = pemDecodePrivateKey(rootKeyPEM)
- if err != nil {
- return nil, nil, fmt.Errorf("decoding root key: %v", err)
- }
- }
-
- return rootCert, rootKey, nil
-}
-
-func (ca CA) genRoot() (rootCert *x509.Certificate, rootKey interface{}, err error) {
- repl := ca.newReplacer()
-
- rootCert, rootKey, err = generateRoot(repl.ReplaceAll(ca.RootCommonName, ""))
- if err != nil {
- return nil, nil, fmt.Errorf("generating CA root: %v", err)
- }
- rootCertPEM, err := pemEncodeCert(rootCert.Raw)
- if err != nil {
- return nil, nil, fmt.Errorf("encoding root certificate: %v", err)
- }
- err = ca.storage.Store(ca.storageKeyRootCert(), rootCertPEM)
- if err != nil {
- return nil, nil, fmt.Errorf("saving root certificate: %v", err)
- }
- rootKeyPEM, err := pemEncodePrivateKey(rootKey)
- if err != nil {
- return nil, nil, fmt.Errorf("encoding root key: %v", err)
- }
- err = ca.storage.Store(ca.storageKeyRootKey(), rootKeyPEM)
- if err != nil {
- return nil, nil, fmt.Errorf("saving root key: %v", err)
- }
-
- return rootCert, rootKey, nil
-}
-
-func (ca CA) loadOrGenIntermediate(rootCert *x509.Certificate, rootKey interface{}) (interCert *x509.Certificate, interKey interface{}, err error) {
- interCertPEM, err := ca.storage.Load(ca.storageKeyIntermediateCert())
- if err != nil {
- if _, ok := err.(certmagic.ErrNotExist); !ok {
- return nil, nil, fmt.Errorf("loading intermediate cert: %v", err)
- }
-
- // TODO: should we require that all or none of the assets are required before overwriting anything?
- interCert, interKey, err = ca.genIntermediate(rootCert, rootKey)
- if err != nil {
- return nil, nil, fmt.Errorf("generating new intermediate cert: %v", err)
- }
- }
-
- if interCert == nil {
- interCert, err = pemDecodeSingleCert(interCertPEM)
- if err != nil {
- return nil, nil, fmt.Errorf("decoding intermediate certificate PEM: %v", err)
- }
- }
-
- if interKey == nil {
- interKeyPEM, err := ca.storage.Load(ca.storageKeyIntermediateKey())
- if err != nil {
- return nil, nil, fmt.Errorf("loading intermediate key: %v", err)
- }
- interKey, err = pemDecodePrivateKey(interKeyPEM)
- if err != nil {
- return nil, nil, fmt.Errorf("decoding intermediate key: %v", err)
- }
- }
-
- return interCert, interKey, nil
-}
-
-func (ca CA) genIntermediate(rootCert *x509.Certificate, rootKey interface{}) (interCert *x509.Certificate, interKey interface{}, err error) {
- repl := ca.newReplacer()
-
- interCert, interKey, err = generateIntermediate(repl.ReplaceAll(ca.IntermediateCommonName, ""), rootCert, rootKey)
- if err != nil {
- return nil, nil, fmt.Errorf("generating CA intermediate: %v", err)
- }
- interCertPEM, err := pemEncodeCert(interCert.Raw)
- if err != nil {
- return nil, nil, fmt.Errorf("encoding intermediate certificate: %v", err)
- }
- err = ca.storage.Store(ca.storageKeyIntermediateCert(), interCertPEM)
- if err != nil {
- return nil, nil, fmt.Errorf("saving intermediate certificate: %v", err)
- }
- interKeyPEM, err := pemEncodePrivateKey(interKey)
- if err != nil {
- return nil, nil, fmt.Errorf("encoding intermediate key: %v", err)
- }
- err = ca.storage.Store(ca.storageKeyIntermediateKey(), interKeyPEM)
- if err != nil {
- return nil, nil, fmt.Errorf("saving intermediate key: %v", err)
- }
-
- return interCert, interKey, nil
-}
-
-func (ca CA) storageKeyCAPrefix() string {
- return path.Join("pki", "authorities", certmagic.StorageKeys.Safe(ca.ID))
-}
-func (ca CA) storageKeyRootCert() string {
- return path.Join(ca.storageKeyCAPrefix(), "root.crt")
-}
-func (ca CA) storageKeyRootKey() string {
- return path.Join(ca.storageKeyCAPrefix(), "root.key")
-}
-func (ca CA) storageKeyIntermediateCert() string {
- return path.Join(ca.storageKeyCAPrefix(), "intermediate.crt")
-}
-func (ca CA) storageKeyIntermediateKey() string {
- return path.Join(ca.storageKeyCAPrefix(), "intermediate.key")
-}
-
-func (ca CA) newReplacer() *caddy.Replacer {
- repl := caddy.NewReplacer()
- repl.Set("pki.ca.name", ca.Name)
- return repl
-}
-
-// installRoot installs this CA's root certificate into the
-// local trust store(s) if it is not already trusted. The CA
-// must already be provisioned.
-func (ca CA) installRoot() error {
- // avoid password prompt if already trusted
- if trusted(ca.root) {
- ca.log.Info("root certificate is already trusted by system",
- zap.String("path", ca.rootCertPath))
- return nil
- }
-
- ca.log.Warn("installing root certificate (you might be prompted for password)",
- zap.String("path", ca.rootCertPath))
-
- return truststore.Install(ca.root,
- truststore.WithDebug(),
- truststore.WithFirefox(),
- truststore.WithJava(),
- )
-}
-
-// AuthorityConfig is used to help a CA configure
-// the underlying signing authority.
-type AuthorityConfig struct {
- SignWithRoot bool
-
- // TODO: should we just embed the underlying authority.Config struct type?
- DB *db.AuthDB
- AuthConfig *authority.AuthConfig
-}
-
-const (
- // DefaultCAID is the default CA ID.
- DefaultCAID = "local"
-
- defaultCAName = "Caddy Local Authority"
- defaultRootCommonName = "{pki.ca.name} - {time.now.year} ECC Root"
- defaultIntermediateCommonName = "{pki.ca.name} - ECC Intermediate"
-
- defaultRootLifetime = 24 * time.Hour * 30 * 12 * 10
- defaultIntermediateLifetime = 24 * time.Hour * 7
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/certificates.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/certificates.go
deleted file mode 100644
index a55c1658..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/certificates.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddypki
-
-import (
- "crypto/x509"
- "time"
-
- "github.com/smallstep/cli/crypto/x509util"
-)
-
-func generateRoot(commonName string) (rootCrt *x509.Certificate, privateKey interface{}, err error) {
- rootProfile, err := x509util.NewRootProfile(commonName)
- if err != nil {
- return
- }
- rootProfile.Subject().NotAfter = time.Now().Add(defaultRootLifetime) // TODO: make configurable
- return newCert(rootProfile)
-}
-
-func generateIntermediate(commonName string, rootCrt *x509.Certificate, rootKey interface{}) (cert *x509.Certificate, privateKey interface{}, err error) {
- interProfile, err := x509util.NewIntermediateProfile(commonName, rootCrt, rootKey)
- if err != nil {
- return
- }
- interProfile.Subject().NotAfter = time.Now().Add(defaultIntermediateLifetime) // TODO: make configurable
- return newCert(interProfile)
-}
-
-func newCert(profile x509util.Profile) (cert *x509.Certificate, privateKey interface{}, err error) {
- certBytes, err := profile.CreateCertificate()
- if err != nil {
- return
- }
- privateKey = profile.SubjectPrivateKey()
- cert, err = x509.ParseCertificate(certBytes)
- return
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/command.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/command.go
deleted file mode 100644
index 34daefaa..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/command.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddypki
-
-import (
- "context"
- "flag"
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/caddyserver/caddy/v2"
- caddycmd "github.com/caddyserver/caddy/v2/cmd"
- "github.com/smallstep/truststore"
-)
-
-func init() {
- caddycmd.RegisterCommand(caddycmd.Command{
- Name: "trust",
- Func: cmdTrust,
- Short: "Installs a CA certificate into local trust stores",
- Long: `
-Adds a root certificate into the local trust stores. Intended for
-development environments only.
-
-Since Caddy will install its root certificates into the local trust
-stores automatically when they are first generated, this command is
-only necessary if you need to pre-install the certificates before
-using them; for example, if you have elevated privileges at one
-point but not later, you will want to use this command so that a
-password prompt is not required later.
-
-This command installs the root certificate only for Caddy's
-default CA.`,
- })
-
- caddycmd.RegisterCommand(caddycmd.Command{
- Name: "untrust",
- Func: cmdUntrust,
- Usage: "[--ca | --cert ]",
- Short: "Untrusts a locally-trusted CA certificate",
- Long: `
-Untrusts a root certificate from the local trust store(s). Intended
-for development environments only.
-
-This command uninstalls trust; it does not necessarily delete the
-root certificate from trust stores entirely. Thus, repeatedly
-trusting and untrusting new certificates can fill up trust databases.
-
-This command does not delete or modify certificate files.
-
-Specify which certificate to untrust either by the ID of its CA with
-the --ca flag, or the direct path to the certificate file with the
---cert flag. If the --ca flag is used, only the default storage paths
-are assumed (i.e. using --ca flag with custom storage backends or file
-paths will not work).
-
-If no flags are specified, --ca=local is assumed.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("untrust", flag.ExitOnError)
- fs.String("ca", "", "The ID of the CA to untrust")
- fs.String("cert", "", "The path to the CA certificate to untrust")
- return fs
- }(),
- })
-}
-
-func cmdTrust(fs caddycmd.Flags) (int, error) {
- // we have to create a sort of dummy context so that
- // the CA can provision itself...
- ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()})
- defer cancel()
-
- // provision the CA, which generates and stores a root
- // certificate if one doesn't already exist in storage
- ca := CA{
- storage: caddy.DefaultStorage,
- }
- err := ca.Provision(ctx, DefaultCAID, caddy.Log())
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
-
- err = ca.installRoot()
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
-
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdUntrust(fs caddycmd.Flags) (int, error) {
- ca := fs.String("ca")
- cert := fs.String("cert")
-
- if ca != "" && cert != "" {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("conflicting command line arguments")
- }
- if ca == "" && cert == "" {
- ca = DefaultCAID
- }
- if ca != "" {
- cert = filepath.Join(caddy.AppDataDir(), "pki", "authorities", ca, "root.crt")
- }
-
- // sanity check, make sure cert file exists first
- _, err := os.Stat(cert)
- if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("accessing certificate file: %v", err)
- }
-
- err = truststore.UninstallFile(cert,
- truststore.WithDebug(),
- truststore.WithFirefox(),
- truststore.WithJava())
- if err != nil {
- return caddy.ExitCodeFailedStartup, err
- }
-
- return caddy.ExitCodeSuccess, nil
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/crypto.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/crypto.go
deleted file mode 100644
index e1a0e354..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/crypto.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddypki
-
-import (
- "bytes"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "io/ioutil"
- "strings"
-)
-
-func pemDecodeSingleCert(pemDER []byte) (*x509.Certificate, error) {
- pemBlock, remaining := pem.Decode(pemDER)
- if pemBlock == nil {
- return nil, fmt.Errorf("no PEM block found")
- }
- if len(remaining) > 0 {
- return nil, fmt.Errorf("input contained more than a single PEM block")
- }
- if pemBlock.Type != "CERTIFICATE" {
- return nil, fmt.Errorf("expected PEM block type to be CERTIFICATE, but got '%s'", pemBlock.Type)
- }
- return x509.ParseCertificate(pemBlock.Bytes)
-}
-
-func pemEncodeCert(der []byte) ([]byte, error) {
- return pemEncode("CERTIFICATE", der)
-}
-
-// pemEncodePrivateKey marshals a EC or RSA private key into a PEM-encoded array of bytes.
-// TODO: this is the same thing as in certmagic. Should we reuse that code somehow? It's unexported.
-func pemEncodePrivateKey(key crypto.PrivateKey) ([]byte, error) {
- var pemType string
- var keyBytes []byte
- switch key := key.(type) {
- case *ecdsa.PrivateKey:
- var err error
- pemType = "EC"
- keyBytes, err = x509.MarshalECPrivateKey(key)
- if err != nil {
- return nil, err
- }
- case *rsa.PrivateKey:
- pemType = "RSA"
- keyBytes = x509.MarshalPKCS1PrivateKey(key)
- case *ed25519.PrivateKey:
- var err error
- pemType = "ED25519"
- keyBytes, err = x509.MarshalPKCS8PrivateKey(key)
- if err != nil {
- return nil, err
- }
- default:
- return nil, fmt.Errorf("unsupported key type: %T", key)
- }
- return pemEncode(pemType+" PRIVATE KEY", keyBytes)
-}
-
-// pemDecodePrivateKey loads a PEM-encoded ECC/RSA private key from an array of bytes.
-// Borrowed from Go standard library, to handle various private key and PEM block types.
-// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L291-L308
-// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L238)
-// TODO: this is the same thing as in certmagic. Should we reuse that code somehow? It's unexported.
-func pemDecodePrivateKey(keyPEMBytes []byte) (crypto.PrivateKey, error) {
- keyBlockDER, _ := pem.Decode(keyPEMBytes)
-
- if keyBlockDER.Type != "PRIVATE KEY" && !strings.HasSuffix(keyBlockDER.Type, " PRIVATE KEY") {
- return nil, fmt.Errorf("unknown PEM header %q", keyBlockDER.Type)
- }
-
- if key, err := x509.ParsePKCS1PrivateKey(keyBlockDER.Bytes); err == nil {
- return key, nil
- }
-
- if key, err := x509.ParsePKCS8PrivateKey(keyBlockDER.Bytes); err == nil {
- switch key := key.(type) {
- case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey:
- return key, nil
- default:
- return nil, fmt.Errorf("found unknown private key type in PKCS#8 wrapping: %T", key)
- }
- }
-
- if key, err := x509.ParseECPrivateKey(keyBlockDER.Bytes); err == nil {
- return key, nil
- }
-
- return nil, fmt.Errorf("unknown private key type")
-}
-
-func pemEncode(blockType string, b []byte) ([]byte, error) {
- var buf bytes.Buffer
- err := pem.Encode(&buf, &pem.Block{Type: blockType, Bytes: b})
- return buf.Bytes(), err
-}
-
-func trusted(cert *x509.Certificate) bool {
- chains, err := cert.Verify(x509.VerifyOptions{})
- return len(chains) > 0 && err == nil
-}
-
-// KeyPair represents a public-private key pair, where the
-// public key is also called a certificate.
-type KeyPair struct {
- // The certificate. By default, this should be the path to
- // a PEM file unless format is something else.
- Certificate string `json:"certificate,omitempty"`
-
- // The private key. By default, this should be the path to
- // a PEM file unless format is something else.
- PrivateKey string `json:"private_key,omitempty"`
-
- // The format in which the certificate and private
- // key are provided. Default: pem_file
- Format string `json:"format,omitempty"`
-}
-
-// Load loads the certificate and key.
-func (kp KeyPair) Load() (*x509.Certificate, interface{}, error) {
- switch kp.Format {
- case "", "pem_file":
- certData, err := ioutil.ReadFile(kp.Certificate)
- if err != nil {
- return nil, nil, err
- }
- keyData, err := ioutil.ReadFile(kp.PrivateKey)
- if err != nil {
- return nil, nil, err
- }
-
- cert, err := pemDecodeSingleCert(certData)
- if err != nil {
- return nil, nil, err
- }
- key, err := pemDecodePrivateKey(keyData)
- if err != nil {
- return nil, nil, err
- }
-
- return cert, key, nil
-
- default:
- return nil, nil, fmt.Errorf("unsupported format: %s", kp.Format)
- }
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/maintain.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/maintain.go
deleted file mode 100644
index 31e453ff..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/maintain.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddypki
-
-import (
- "crypto/x509"
- "fmt"
- "log"
- "runtime/debug"
- "time"
-
- "go.uber.org/zap"
-)
-
-func (p *PKI) maintenance() {
- defer func() {
- if err := recover(); err != nil {
- log.Printf("[PANIC] PKI maintenance: %v\n%s", err, debug.Stack())
- }
- }()
-
- ticker := time.NewTicker(10 * time.Minute) // TODO: make configurable
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- p.renewCerts()
- case <-p.ctx.Done():
- return
- }
- }
-}
-
-func (p *PKI) renewCerts() {
- for _, ca := range p.CAs {
- err := p.renewCertsForCA(ca)
- if err != nil {
- p.log.Error("renewing intermediate certificates",
- zap.Error(err),
- zap.String("ca", ca.ID))
- }
- }
-}
-
-func (p *PKI) renewCertsForCA(ca *CA) error {
- ca.mu.Lock()
- defer ca.mu.Unlock()
-
- log := p.log.With(zap.String("ca", ca.ID))
-
- // only maintain the root if it's not manually provided in the config
- if ca.Root == nil {
- if needsRenewal(ca.root) {
- // TODO: implement root renewal (use same key)
- log.Warn("root certificate expiring soon (FIXME: ROOT RENEWAL NOT YET IMPLEMENTED)",
- zap.Duration("time_remaining", time.Until(ca.inter.NotAfter)),
- )
- }
- }
-
- // only maintain the intermediate if it's not manually provided in the config
- if ca.Intermediate == nil {
- if needsRenewal(ca.inter) {
- log.Info("intermediate expires soon; renewing",
- zap.Duration("time_remaining", time.Until(ca.inter.NotAfter)),
- )
-
- rootCert, rootKey, err := ca.loadOrGenRoot()
- if err != nil {
- return fmt.Errorf("loading root key: %v", err)
- }
- interCert, interKey, err := ca.genIntermediate(rootCert, rootKey)
- if err != nil {
- return fmt.Errorf("generating new certificate: %v", err)
- }
- ca.inter, ca.interKey = interCert, interKey
-
- log.Info("renewed intermediate",
- zap.Time("new_expiration", ca.inter.NotAfter),
- )
- }
- }
-
- return nil
-}
-
-func needsRenewal(cert *x509.Certificate) bool {
- lifetime := cert.NotAfter.Sub(cert.NotBefore)
- renewalWindow := time.Duration(float64(lifetime) * renewalWindowRatio)
- renewalWindowStart := cert.NotAfter.Add(-renewalWindow)
- return time.Now().After(renewalWindowStart)
-}
-
-const renewalWindowRatio = 0.2 // TODO: make configurable
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/pki.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/pki.go
deleted file mode 100644
index b6f08b18..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/pki.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddypki
-
-import (
- "fmt"
-
- "github.com/caddyserver/caddy/v2"
- "go.uber.org/zap"
-)
-
-func init() {
- caddy.RegisterModule(PKI{})
-}
-
-// PKI provides Public Key Infrastructure facilities for Caddy.
-type PKI struct {
- // The CAs to manage. Each CA is keyed by an ID that is used
- // to uniquely identify it from other CAs. The default CA ID
- // is "local".
- CAs map[string]*CA `json:"certificate_authorities,omitempty"`
-
- ctx caddy.Context
- log *zap.Logger
-}
-
-// CaddyModule returns the Caddy module information.
-func (PKI) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "pki",
- New: func() caddy.Module { return new(PKI) },
- }
-}
-
-// Provision sets up the configuration for the PKI app.
-func (p *PKI) Provision(ctx caddy.Context) error {
- p.ctx = ctx
- p.log = ctx.Logger(p)
-
- // if this app is initialized at all, ensure there's at
- // least a default CA that can be used: the standard CA
- // which is used implicitly for signing local-use certs
- if p.CAs == nil {
- p.CAs = make(map[string]*CA)
- }
- if _, ok := p.CAs[DefaultCAID]; !ok {
- p.CAs[DefaultCAID] = new(CA)
- }
-
- for caID, ca := range p.CAs {
- err := ca.Provision(ctx, caID, p.log)
- if err != nil {
- return fmt.Errorf("provisioning CA '%s': %v", caID, err)
- }
- }
-
- return nil
-}
-
-// Start starts the PKI app.
-func (p *PKI) Start() error {
- // install roots to trust store, if not disabled
- for _, ca := range p.CAs {
- if ca.InstallTrust != nil && !*ca.InstallTrust {
- ca.log.Warn("root certificate trust store installation disabled; unconfigured clients may show warnings",
- zap.String("path", ca.rootCertPath))
- continue
- }
-
- if err := ca.installRoot(); err != nil {
- // could be some system dependencies that are missing;
- // shouldn't totally prevent startup, but we should log it
- ca.log.Error("failed to install root certificate",
- zap.Error(err),
- zap.String("certificate_file", ca.rootCertPath))
- }
- }
-
- // see if root/intermediates need renewal...
- p.renewCerts()
-
- // ...and keep them renewed
- go p.maintenance()
-
- return nil
-}
-
-// Stop stops the PKI app.
-func (p *PKI) Stop() error {
- return nil
-}
-
-// Interface guards
-var (
- _ caddy.Provisioner = (*PKI)(nil)
- _ caddy.App = (*PKI)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/acmeissuer.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/acmeissuer.go
deleted file mode 100644
index b60e560e..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/acmeissuer.go
+++ /dev/null
@@ -1,541 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "context"
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "net/url"
- "strconv"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/caddyserver/certmagic"
- "github.com/mholt/acmez"
- "github.com/mholt/acmez/acme"
- "go.uber.org/zap"
-)
-
-func init() {
- caddy.RegisterModule(ACMEIssuer{})
-}
-
-// ACMEIssuer makes an ACME manager
-// for managing certificates using ACME.
-//
-// TODO: support multiple ACME endpoints (probably
-// requires an array of these structs) - caddy would
-// also have to load certs from the backup CAs if the
-// first one is expired...
-type ACMEIssuer struct {
- // The URL to the CA's ACME directory endpoint.
- CA string `json:"ca,omitempty"`
-
- // The URL to the test CA's ACME directory endpoint.
- // This endpoint is only used during retries if there
- // is a failure using the primary CA.
- TestCA string `json:"test_ca,omitempty"`
-
- // Your email address, so the CA can contact you if necessary.
- // Not required, but strongly recommended to provide one so
- // you can be reached if there is a problem. Your email is
- // not sent to any Caddy mothership or used for any purpose
- // other than ACME transactions.
- Email string `json:"email,omitempty"`
-
- // If you have an existing account with the ACME server, put
- // the private key here in PEM format. The ACME client will
- // look up your account information with this key first before
- // trying to create a new one. You can use placeholders here,
- // for example if you have it in an environment variable.
- AccountKey string `json:"account_key,omitempty"`
-
- // If using an ACME CA that requires an external account
- // binding, specify the CA-provided credentials here.
- ExternalAccount *acme.EAB `json:"external_account,omitempty"`
-
- // Time to wait before timing out an ACME operation.
- ACMETimeout caddy.Duration `json:"acme_timeout,omitempty"`
-
- // Configures the various ACME challenge types.
- Challenges *ChallengesConfig `json:"challenges,omitempty"`
-
- // An array of files of CA certificates to accept when connecting to the
- // ACME CA. Generally, you should only use this if the ACME CA endpoint
- // is internal or for development/testing purposes.
- TrustedRootsPEMFiles []string `json:"trusted_roots_pem_files,omitempty"`
-
- // Preferences for selecting alternate certificate chains, if offered
- // by the CA. By default, the first offered chain will be selected.
- // If configured, the chains may be sorted and the first matching chain
- // will be selected.
- PreferredChains *ChainPreference `json:"preferred_chains,omitempty"`
-
- rootPool *x509.CertPool
- template certmagic.ACMEManager
- magic *certmagic.Config
- logger *zap.Logger
-}
-
-// CaddyModule returns the Caddy module information.
-func (ACMEIssuer) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "tls.issuance.acme",
- New: func() caddy.Module { return new(ACMEIssuer) },
- }
-}
-
-// Provision sets up iss.
-func (iss *ACMEIssuer) Provision(ctx caddy.Context) error {
- iss.logger = ctx.Logger(iss)
-
- repl := caddy.NewReplacer()
-
- // expand email address, if non-empty
- if iss.Email != "" {
- email, err := repl.ReplaceOrErr(iss.Email, true, true)
- if err != nil {
- return fmt.Errorf("expanding email address '%s': %v", iss.Email, err)
- }
- iss.Email = email
- }
-
- // expand account key, if non-empty
- if iss.AccountKey != "" {
- accountKey, err := repl.ReplaceOrErr(iss.AccountKey, true, true)
- if err != nil {
- return fmt.Errorf("expanding account key PEM '%s': %v", iss.AccountKey, err)
- }
- iss.AccountKey = accountKey
- }
-
- // DNS providers
- if iss.Challenges != nil && iss.Challenges.DNS != nil && iss.Challenges.DNS.ProviderRaw != nil {
- val, err := ctx.LoadModule(iss.Challenges.DNS, "ProviderRaw")
- if err != nil {
- return fmt.Errorf("loading DNS provider module: %v", err)
- }
-
- if deprecatedProvider, ok := val.(acmez.Solver); ok {
- // TODO: For a temporary amount of time, we are allowing the use of DNS
- // providers from go-acme/lego since there are so many providers implemented
- // using that API -- they are adapted as an all-in-one Caddy module in this
- // repository: https://github.com/caddy-dns/lego-deprecated - the module is a
- // acmez.Solver type, so we use it directly. The user must set environment
- // variables to configure it. Remove this shim once a sufficient number of
- // DNS providers are implemented for the libdns APIs instead.
- iss.Challenges.DNS.solver = deprecatedProvider
- } else {
- iss.Challenges.DNS.solver = &certmagic.DNS01Solver{
- DNSProvider: val.(certmagic.ACMEDNSProvider),
- TTL: time.Duration(iss.Challenges.DNS.TTL),
- PropagationTimeout: time.Duration(iss.Challenges.DNS.PropagationTimeout),
- Resolvers: iss.Challenges.DNS.Resolvers,
- }
- }
- }
-
- // add any custom CAs to trust store
- if len(iss.TrustedRootsPEMFiles) > 0 {
- iss.rootPool = x509.NewCertPool()
- for _, pemFile := range iss.TrustedRootsPEMFiles {
- pemData, err := ioutil.ReadFile(pemFile)
- if err != nil {
- return fmt.Errorf("loading trusted root CA's PEM file: %s: %v", pemFile, err)
- }
- if !iss.rootPool.AppendCertsFromPEM(pemData) {
- return fmt.Errorf("unable to add %s to trust pool: %v", pemFile, err)
- }
- }
- }
-
- var err error
- iss.template, err = iss.makeIssuerTemplate()
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (iss *ACMEIssuer) makeIssuerTemplate() (certmagic.ACMEManager, error) {
- template := certmagic.ACMEManager{
- CA: iss.CA,
- TestCA: iss.TestCA,
- Email: iss.Email,
- AccountKeyPEM: iss.AccountKey,
- CertObtainTimeout: time.Duration(iss.ACMETimeout),
- TrustedRoots: iss.rootPool,
- ExternalAccount: iss.ExternalAccount,
- Logger: iss.logger,
- }
-
- if iss.Challenges != nil {
- if iss.Challenges.HTTP != nil {
- template.DisableHTTPChallenge = iss.Challenges.HTTP.Disabled
- template.AltHTTPPort = iss.Challenges.HTTP.AlternatePort
- }
- if iss.Challenges.TLSALPN != nil {
- template.DisableTLSALPNChallenge = iss.Challenges.TLSALPN.Disabled
- template.AltTLSALPNPort = iss.Challenges.TLSALPN.AlternatePort
- }
- if iss.Challenges.DNS != nil {
- template.DNS01Solver = iss.Challenges.DNS.solver
- }
- template.ListenHost = iss.Challenges.BindHost
- }
-
- if iss.PreferredChains != nil {
- template.PreferredChains = certmagic.ChainPreference{
- Smallest: iss.PreferredChains.Smallest,
- AnyCommonName: iss.PreferredChains.AnyCommonName,
- RootCommonName: iss.PreferredChains.RootCommonName,
- }
- }
-
- return template, nil
-}
-
-// SetConfig sets the associated certmagic config for this issuer.
-// This is required because ACME needs values from the config in
-// order to solve the challenges during issuance. This implements
-// the ConfigSetter interface.
-func (iss *ACMEIssuer) SetConfig(cfg *certmagic.Config) {
- iss.magic = cfg
-}
-
-// TODO: I kind of hate how each call to these methods needs to
-// make a new ACME manager to fill in defaults before using; can
-// we find the right place to do that just once and then re-use?
-
-// PreCheck implements the certmagic.PreChecker interface.
-func (iss *ACMEIssuer) PreCheck(ctx context.Context, names []string, interactive bool) error {
- return certmagic.NewACMEManager(iss.magic, iss.template).PreCheck(ctx, names, interactive)
-}
-
-// Issue obtains a certificate for the given csr.
-func (iss *ACMEIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) {
- return certmagic.NewACMEManager(iss.magic, iss.template).Issue(ctx, csr)
-}
-
-// IssuerKey returns the unique issuer key for the configured CA endpoint.
-func (iss *ACMEIssuer) IssuerKey() string {
- return certmagic.NewACMEManager(iss.magic, iss.template).IssuerKey()
-}
-
-// Revoke revokes the given certificate.
-func (iss *ACMEIssuer) Revoke(ctx context.Context, cert certmagic.CertificateResource, reason int) error {
- return certmagic.NewACMEManager(iss.magic, iss.template).Revoke(ctx, cert, reason)
-}
-
-// GetACMEIssuer returns iss. This is useful when other types embed ACMEIssuer, because
-// type-asserting them to *ACMEIssuer will fail, but type-asserting them to an interface
-// with only this method will succeed, and will still allow the embedded ACMEIssuer
-// to be accessed and manipulated.
-func (iss *ACMEIssuer) GetACMEIssuer() *ACMEIssuer { return iss }
-
-// UnmarshalCaddyfile deserializes Caddyfile tokens into iss.
-//
-// ... acme [] {
-// dir
-// test_dir
-// email
-// timeout
-// disable_http_challenge
-// disable_tlsalpn_challenge
-// alt_http_port
-// alt_tlsalpn_port
-// eab
-// trusted_roots
-// dns []
-// resolvers
-// preferred_chains [smallest] {
-// root_common_name
-// any_common_name
-// }
-// }
-//
-func (iss *ACMEIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- if d.NextArg() {
- iss.CA = d.Val()
- if d.NextArg() {
- return d.ArgErr()
- }
- }
- for nesting := d.Nesting(); d.NextBlock(nesting); {
- switch d.Val() {
- case "dir":
- if iss.CA != "" {
- return d.Errf("directory is already specified: %s", iss.CA)
- }
- if !d.AllArgs(&iss.CA) {
- return d.ArgErr()
- }
-
- case "test_dir":
- if !d.AllArgs(&iss.TestCA) {
- return d.ArgErr()
- }
-
- case "email":
- if !d.AllArgs(&iss.Email) {
- return d.ArgErr()
- }
-
- case "timeout":
- var timeoutStr string
- if !d.AllArgs(&timeoutStr) {
- return d.ArgErr()
- }
- timeout, err := caddy.ParseDuration(timeoutStr)
- if err != nil {
- return d.Errf("invalid timeout duration %s: %v", timeoutStr, err)
- }
- iss.ACMETimeout = caddy.Duration(timeout)
-
- case "disable_http_challenge":
- if d.NextArg() {
- return d.ArgErr()
- }
- if iss.Challenges == nil {
- iss.Challenges = new(ChallengesConfig)
- }
- if iss.Challenges.HTTP == nil {
- iss.Challenges.HTTP = new(HTTPChallengeConfig)
- }
- iss.Challenges.HTTP.Disabled = true
-
- case "disable_tlsalpn_challenge":
- if d.NextArg() {
- return d.ArgErr()
- }
- if iss.Challenges == nil {
- iss.Challenges = new(ChallengesConfig)
- }
- if iss.Challenges.TLSALPN == nil {
- iss.Challenges.TLSALPN = new(TLSALPNChallengeConfig)
- }
- iss.Challenges.TLSALPN.Disabled = true
-
- case "alt_http_port":
- if !d.NextArg() {
- return d.ArgErr()
- }
- port, err := strconv.Atoi(d.Val())
- if err != nil {
- return d.Errf("invalid port %s: %v", d.Val(), err)
- }
- if iss.Challenges == nil {
- iss.Challenges = new(ChallengesConfig)
- }
- if iss.Challenges.HTTP == nil {
- iss.Challenges.HTTP = new(HTTPChallengeConfig)
- }
- iss.Challenges.HTTP.AlternatePort = port
-
- case "alt_tlsalpn_port":
- if !d.NextArg() {
- return d.ArgErr()
- }
- port, err := strconv.Atoi(d.Val())
- if err != nil {
- return d.Errf("invalid port %s: %v", d.Val(), err)
- }
- if iss.Challenges == nil {
- iss.Challenges = new(ChallengesConfig)
- }
- if iss.Challenges.TLSALPN == nil {
- iss.Challenges.TLSALPN = new(TLSALPNChallengeConfig)
- }
- iss.Challenges.TLSALPN.AlternatePort = port
-
- case "eab":
- iss.ExternalAccount = new(acme.EAB)
- if !d.AllArgs(&iss.ExternalAccount.KeyID, &iss.ExternalAccount.MACKey) {
- return d.ArgErr()
- }
-
- case "trusted_roots":
- iss.TrustedRootsPEMFiles = d.RemainingArgs()
-
- case "dns":
- if !d.NextArg() {
- return d.ArgErr()
- }
- provName := d.Val()
- if iss.Challenges == nil {
- iss.Challenges = new(ChallengesConfig)
- }
- if iss.Challenges.DNS == nil {
- iss.Challenges.DNS = new(DNSChallengeConfig)
- }
- unm, err := caddyfile.UnmarshalModule(d, "dns.providers."+provName)
- if err != nil {
- return err
- }
- iss.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(unm, "name", provName, nil)
- case "propagation_timeout":
- if !d.NextArg() {
- return d.ArgErr()
- }
- timeoutStr := d.Val()
- timeout, err := caddy.ParseDuration(timeoutStr)
- if err != nil {
- return d.Errf("invalid propagation_timeout duration %s: %v", timeoutStr, err)
- }
- if iss.Challenges == nil {
- iss.Challenges = new(ChallengesConfig)
- }
- if iss.Challenges.DNS == nil {
- iss.Challenges.DNS = new(DNSChallengeConfig)
- }
- iss.Challenges.DNS.PropagationTimeout = caddy.Duration(timeout)
-
- case "resolvers":
- if iss.Challenges == nil {
- iss.Challenges = new(ChallengesConfig)
- }
- if iss.Challenges.DNS == nil {
- iss.Challenges.DNS = new(DNSChallengeConfig)
- }
- iss.Challenges.DNS.Resolvers = d.RemainingArgs()
- if len(iss.Challenges.DNS.Resolvers) == 0 {
- return d.ArgErr()
- }
-
- case "preferred_chains":
- chainPref, err := ParseCaddyfilePreferredChainsOptions(d)
- if err != nil {
- return err
- }
- iss.PreferredChains = chainPref
-
- default:
- return d.Errf("unrecognized ACME issuer property: %s", d.Val())
- }
- }
- }
- return nil
-}
-
-// onDemandAskRequest makes a request to the ask URL
-// to see if a certificate can be obtained for name.
-// The certificate request should be denied if this
-// returns an error.
-func onDemandAskRequest(ask string, name string) error {
- askURL, err := url.Parse(ask)
- if err != nil {
- return fmt.Errorf("parsing ask URL: %v", err)
- }
- qs := askURL.Query()
- qs.Set("domain", name)
- askURL.RawQuery = qs.Encode()
-
- resp, err := onDemandAskClient.Get(askURL.String())
- if err != nil {
- return fmt.Errorf("error checking %v to determine if certificate for hostname '%s' should be allowed: %v",
- ask, name, err)
- }
- resp.Body.Close()
-
- if resp.StatusCode < 200 || resp.StatusCode > 299 {
- return fmt.Errorf("certificate for hostname '%s' not allowed; non-2xx status code %d returned from %v",
- name, resp.StatusCode, ask)
- }
-
- return nil
-}
-
-func ParseCaddyfilePreferredChainsOptions(d *caddyfile.Dispenser) (*ChainPreference, error) {
- chainPref := new(ChainPreference)
- if d.NextArg() {
- smallestOpt := d.Val()
- if smallestOpt == "smallest" {
- trueBool := true
- chainPref.Smallest = &trueBool
- if d.NextArg() { // Only one argument allowed
- return nil, d.ArgErr()
- }
- if d.NextBlock(d.Nesting()) { // Don't allow other options when smallest == true
- return nil, d.Err("No more options are accepted when using the 'smallest' option")
- }
- } else { // Smallest option should always be 'smallest' or unset
- return nil, d.Errf("Invalid argument '%s'", smallestOpt)
- }
- }
- for nesting := d.Nesting(); d.NextBlock(nesting); {
- switch d.Val() {
- case "root_common_name":
- rootCommonNameOpt := d.RemainingArgs()
- chainPref.RootCommonName = rootCommonNameOpt
- if rootCommonNameOpt == nil {
- return nil, d.ArgErr()
- }
- if chainPref.AnyCommonName != nil {
- return nil, d.Err("Can't set root_common_name when any_common_name is already set")
- }
-
- case "any_common_name":
- anyCommonNameOpt := d.RemainingArgs()
- chainPref.AnyCommonName = anyCommonNameOpt
- if anyCommonNameOpt == nil {
- return nil, d.ArgErr()
- }
- if chainPref.RootCommonName != nil {
- return nil, d.Err("Can't set any_common_name when root_common_name is already set")
- }
-
- default:
- return nil, d.Errf("Received unrecognized parameter '%s'", d.Val())
- }
- }
-
- if chainPref.Smallest == nil && chainPref.RootCommonName == nil && chainPref.AnyCommonName == nil {
- return nil, d.Err("No options for preferred_chains received")
- }
-
- return chainPref, nil
-}
-
-// ChainPreference describes the client's preferred certificate chain,
-// useful if the CA offers alternate chains. The first matching chain
-// will be selected.
-type ChainPreference struct {
- // Prefer chains with the fewest number of bytes.
- Smallest *bool `json:"smallest,omitempty"`
-
- // Select first chain having a root with one of
- // these common names.
- RootCommonName []string `json:"root_common_name,omitempty"`
-
- // Select first chain that has any issuer with one
- // of these common names.
- AnyCommonName []string `json:"any_common_name,omitempty"`
-}
-
-// Interface guards
-var (
- _ certmagic.PreChecker = (*ACMEIssuer)(nil)
- _ certmagic.Issuer = (*ACMEIssuer)(nil)
- _ certmagic.Revoker = (*ACMEIssuer)(nil)
- _ caddy.Provisioner = (*ACMEIssuer)(nil)
- _ ConfigSetter = (*ACMEIssuer)(nil)
- _ caddyfile.Unmarshaler = (*ACMEIssuer)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/automation.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/automation.go
deleted file mode 100644
index c4a90a84..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/automation.go
+++ /dev/null
@@ -1,399 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/certmagic"
- "github.com/mholt/acmez"
-)
-
-// AutomationConfig governs the automated management of TLS certificates.
-type AutomationConfig struct {
- // The list of automation policies. The first matching
- // policy will be applied for a given certificate/name.
- Policies []*AutomationPolicy `json:"policies,omitempty"`
-
- // On-Demand TLS defers certificate operations to the
- // moment they are needed, e.g. during a TLS handshake.
- // Useful when you don't know all the hostnames at
- // config-time, or when you are not in control of the
- // domain names you are managing certificates for.
- // In 2015, Caddy became the first web server to
- // implement this experimental technology.
- //
- // Note that this field does not enable on-demand TLS,
- // it only configures it for when it is used. To enable
- // it, create an automation policy with `on_demand`.
- OnDemand *OnDemandConfig `json:"on_demand,omitempty"`
-
- // Caddy staples OCSP (and caches the response) for all
- // qualifying certificates by default. This setting
- // changes how often it scans responses for freshness,
- // and updates them if they are getting stale. Default: 1h
- OCSPCheckInterval caddy.Duration `json:"ocsp_interval,omitempty"`
-
- // Every so often, Caddy will scan all loaded, managed
- // certificates for expiration. This setting changes how
- // frequently the scan for expiring certificates is
- // performed. Default: 10m
- RenewCheckInterval caddy.Duration `json:"renew_interval,omitempty"`
-
- // How often to scan storage units for old or expired
- // assets and remove them. These scans exert lots of
- // reads (and list operations) on the storage module, so
- // choose a longer interval for large deployments.
- // Default: 24h
- //
- // Storage will always be cleaned when the process first
- // starts. Then, a new cleaning will be started this
- // duration after the previous cleaning started if the
- // previous cleaning finished in less than half the time
- // of this interval (otherwise next start will be skipped).
- StorageCleanInterval caddy.Duration `json:"storage_clean_interval,omitempty"`
-
- defaultPublicAutomationPolicy *AutomationPolicy
- defaultInternalAutomationPolicy *AutomationPolicy // only initialized if necessary
-}
-
-// AutomationPolicy designates the policy for automating the
-// management (obtaining, renewal, and revocation) of managed
-// TLS certificates.
-//
-// An AutomationPolicy value is not valid until it has been
-// provisioned; use the `AddAutomationPolicy()` method on the
-// TLS app to properly provision a new policy.
-type AutomationPolicy struct {
- // Which subjects (hostnames or IP addresses) this policy applies to.
- Subjects []string `json:"subjects,omitempty"`
-
- // The modules that may issue certificates. Default: internal if all
- // subjects do not qualify for public certificates; othewise acme and
- // zerossl.
- IssuersRaw []json.RawMessage `json:"issuers,omitempty" caddy:"namespace=tls.issuance inline_key=module"`
-
- // If true, certificates will be requested with MustStaple. Not all
- // CAs support this, and there are potentially serious consequences
- // of enabling this feature without proper threat modeling.
- MustStaple bool `json:"must_staple,omitempty"`
-
- // How long before a certificate's expiration to try renewing it,
- // as a function of its total lifetime. As a general and conservative
- // rule, it is a good idea to renew a certificate when it has about
- // 1/3 of its total lifetime remaining. This utilizes the majority
- // of the certificate's lifetime while still saving time to
- // troubleshoot problems. However, for extremely short-lived certs,
- // you may wish to increase the ratio to ~1/2.
- RenewalWindowRatio float64 `json:"renewal_window_ratio,omitempty"`
-
- // The type of key to generate for certificates.
- // Supported values: `ed25519`, `p256`, `p384`, `rsa2048`, `rsa4096`.
- KeyType string `json:"key_type,omitempty"`
-
- // Optionally configure a separate storage module associated with this
- // manager, instead of using Caddy's global/default-configured storage.
- StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"`
-
- // If true, certificates will be managed "on demand"; that is, during
- // TLS handshakes or when needed, as opposed to at startup or config
- // load.
- OnDemand bool `json:"on_demand,omitempty"`
-
- // Disables OCSP stapling. Disabling OCSP stapling puts clients at
- // greater risk, reduces their privacy, and usually lowers client
- // performance. It is NOT recommended to disable this unless you
- // are able to justify the costs.
- // EXPERIMENTAL. Subject to change.
- DisableOCSPStapling bool `json:"disable_ocsp_stapling,omitempty"`
-
- // Overrides the URLs of OCSP responders embedded in certificates.
- // Each key is a OCSP server URL to override, and its value is the
- // replacement. An empty value will disable querying of that server.
- // EXPERIMENTAL. Subject to change.
- OCSPOverrides map[string]string `json:"ocsp_overrides,omitempty"`
-
- // Issuers stores the decoded issuer parameters. This is only
- // used to populate an underlying certmagic.Config's Issuers
- // field; it is not referenced thereafter.
- Issuers []certmagic.Issuer `json:"-"`
-
- magic *certmagic.Config
- storage certmagic.Storage
-}
-
-// Provision sets up ap and builds its underlying CertMagic config.
-func (ap *AutomationPolicy) Provision(tlsApp *TLS) error {
- // policy-specific storage implementation
- if ap.StorageRaw != nil {
- val, err := tlsApp.ctx.LoadModule(ap, "StorageRaw")
- if err != nil {
- return fmt.Errorf("loading TLS storage module: %v", err)
- }
- cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage()
- if err != nil {
- return fmt.Errorf("creating TLS storage configuration: %v", err)
- }
- ap.storage = cmStorage
- }
-
- var ond *certmagic.OnDemandConfig
- if ap.OnDemand {
- ond = &certmagic.OnDemandConfig{
- DecisionFunc: func(name string) error {
- // if an "ask" endpoint was defined, consult it first
- if tlsApp.Automation != nil &&
- tlsApp.Automation.OnDemand != nil &&
- tlsApp.Automation.OnDemand.Ask != "" {
- err := onDemandAskRequest(tlsApp.Automation.OnDemand.Ask, name)
- if err != nil {
- return err
- }
- }
- // check the rate limiter last because
- // doing so makes a reservation
- if !onDemandRateLimiter.Allow() {
- return fmt.Errorf("on-demand rate limit exceeded")
- }
- return nil
- },
- }
- }
-
- // load and provision any explicitly-configured issuer modules
- if ap.IssuersRaw != nil {
- val, err := tlsApp.ctx.LoadModule(ap, "IssuersRaw")
- if err != nil {
- return fmt.Errorf("loading TLS automation management module: %s", err)
- }
- for _, issVal := range val.([]interface{}) {
- ap.Issuers = append(ap.Issuers, issVal.(certmagic.Issuer))
- }
- }
-
- issuers := ap.Issuers
- if len(issuers) == 0 {
- var err error
- issuers, err = DefaultIssuersProvisioned(tlsApp.ctx)
- if err != nil {
- return err
- }
- }
-
- keyType := ap.KeyType
- if keyType != "" {
- var err error
- keyType, err = caddy.NewReplacer().ReplaceOrErr(ap.KeyType, true, true)
- if err != nil {
- return fmt.Errorf("invalid key type %s: %s", ap.KeyType, err)
- }
- if _, ok := supportedCertKeyTypes[keyType]; !ok {
- return fmt.Errorf("unrecognized key type: %s", keyType)
- }
- }
- keySource := certmagic.StandardKeyGenerator{
- KeyType: supportedCertKeyTypes[keyType],
- }
-
- storage := ap.storage
- if storage == nil {
- storage = tlsApp.ctx.Storage()
- }
-
- template := certmagic.Config{
- MustStaple: ap.MustStaple,
- RenewalWindowRatio: ap.RenewalWindowRatio,
- KeySource: keySource,
- OnDemand: ond,
- OCSP: certmagic.OCSPConfig{
- DisableStapling: ap.DisableOCSPStapling,
- ResponderOverrides: ap.OCSPOverrides,
- },
- Storage: storage,
- Issuers: issuers,
- Logger: tlsApp.logger,
- }
- ap.magic = certmagic.New(tlsApp.certCache, template)
-
- // sometimes issuers may need the parent certmagic.Config in
- // order to function properly (for example, ACMEIssuer needs
- // access to the correct storage and cache so it can solve
- // ACME challenges -- it's an annoying, inelegant circular
- // dependency that I don't know how to resolve nicely!)
- for _, issuer := range ap.magic.Issuers {
- if annoying, ok := issuer.(ConfigSetter); ok {
- annoying.SetConfig(ap.magic)
- }
- }
-
- return nil
-}
-
-// DefaultIssuers returns empty Issuers (not provisioned) to be used as defaults.
-// This function is experimental and has no compatibility promises.
-func DefaultIssuers() []certmagic.Issuer {
- return []certmagic.Issuer{
- new(ACMEIssuer),
- &ZeroSSLIssuer{ACMEIssuer: new(ACMEIssuer)},
- }
-}
-
-// DefaultIssuersProvisioned returns empty but provisioned default Issuers from
-// DefaultIssuers(). This function is experimental and has no compatibility promises.
-func DefaultIssuersProvisioned(ctx caddy.Context) ([]certmagic.Issuer, error) {
- issuers := DefaultIssuers()
- for i, iss := range issuers {
- if prov, ok := iss.(caddy.Provisioner); ok {
- err := prov.Provision(ctx)
- if err != nil {
- return nil, fmt.Errorf("provisioning default issuer %d: %T: %v", i, iss, err)
- }
- }
- }
- return issuers, nil
-}
-
-// ChallengesConfig configures the ACME challenges.
-type ChallengesConfig struct {
- // HTTP configures the ACME HTTP challenge. This
- // challenge is enabled and used automatically
- // and by default.
- HTTP *HTTPChallengeConfig `json:"http,omitempty"`
-
- // TLSALPN configures the ACME TLS-ALPN challenge.
- // This challenge is enabled and used automatically
- // and by default.
- TLSALPN *TLSALPNChallengeConfig `json:"tls-alpn,omitempty"`
-
- // Configures the ACME DNS challenge. Because this
- // challenge typically requires credentials for
- // interfacing with a DNS provider, this challenge is
- // not enabled by default. This is the only challenge
- // type which does not require a direct connection
- // to Caddy from an external server.
- //
- // NOTE: DNS providers are currently being upgraded,
- // and this API is subject to change, but should be
- // stabilized soon.
- DNS *DNSChallengeConfig `json:"dns,omitempty"`
-
- // Optionally customize the host to which a listener
- // is bound if required for solving a challenge.
- BindHost string `json:"bind_host,omitempty"`
-}
-
-// HTTPChallengeConfig configures the ACME HTTP challenge.
-type HTTPChallengeConfig struct {
- // If true, the HTTP challenge will be disabled.
- Disabled bool `json:"disabled,omitempty"`
-
- // An alternate port on which to service this
- // challenge. Note that the HTTP challenge port is
- // hard-coded into the spec and cannot be changed,
- // so you would have to forward packets from the
- // standard HTTP challenge port to this one.
- AlternatePort int `json:"alternate_port,omitempty"`
-}
-
-// TLSALPNChallengeConfig configures the ACME TLS-ALPN challenge.
-type TLSALPNChallengeConfig struct {
- // If true, the TLS-ALPN challenge will be disabled.
- Disabled bool `json:"disabled,omitempty"`
-
- // An alternate port on which to service this
- // challenge. Note that the TLS-ALPN challenge port
- // is hard-coded into the spec and cannot be changed,
- // so you would have to forward packets from the
- // standard TLS-ALPN challenge port to this one.
- AlternatePort int `json:"alternate_port,omitempty"`
-}
-
-// DNSChallengeConfig configures the ACME DNS challenge.
-//
-// NOTE: This API is still experimental and is subject to change.
-type DNSChallengeConfig struct {
- // The DNS provider module to use which will manage
- // the DNS records relevant to the ACME challenge.
- ProviderRaw json.RawMessage `json:"provider,omitempty" caddy:"namespace=dns.providers inline_key=name"`
-
- // The TTL of the TXT record used for the DNS challenge.
- TTL caddy.Duration `json:"ttl,omitempty"`
-
- // How long to wait for DNS record to propagate.
- PropagationTimeout caddy.Duration `json:"propagation_timeout,omitempty"`
-
- // Custom DNS resolvers to prefer over system/built-in defaults.
- // Often necessary to configure when using split-horizon DNS.
- Resolvers []string `json:"resolvers,omitempty"`
-
- solver acmez.Solver
-}
-
-// OnDemandConfig configures on-demand TLS, for obtaining
-// needed certificates at handshake-time. Because this
-// feature can easily be abused, you should use this to
-// establish rate limits and/or an internal endpoint that
-// Caddy can "ask" if it should be allowed to manage
-// certificates for a given hostname.
-type OnDemandConfig struct {
- // An optional rate limit to throttle the
- // issuance of certificates from handshakes.
- RateLimit *RateLimit `json:"rate_limit,omitempty"`
-
- // If Caddy needs to obtain or renew a certificate
- // during a TLS handshake, it will perform a quick
- // HTTP request to this URL to check if it should be
- // allowed to try to get a certificate for the name
- // in the "domain" query string parameter, like so:
- // `?domain=example.com`. The endpoint must return a
- // 200 OK status if a certificate is allowed;
- // anything else will cause it to be denied.
- // Redirects are not followed.
- Ask string `json:"ask,omitempty"`
-}
-
-// RateLimit specifies an interval with optional burst size.
-type RateLimit struct {
- // A duration value. A certificate may be obtained 'burst'
- // times during this interval.
- Interval caddy.Duration `json:"interval,omitempty"`
-
- // How many times during an interval a certificate can be obtained.
- Burst int `json:"burst,omitempty"`
-}
-
-// ConfigSetter is implemented by certmagic.Issuers that
-// need access to a parent certmagic.Config as part of
-// their provisioning phase. For example, the ACMEIssuer
-// requires a config so it can access storage and the
-// cache to solve ACME challenges.
-type ConfigSetter interface {
- SetConfig(cfg *certmagic.Config)
-}
-
-// These perpetual values are used for on-demand TLS.
-var (
- onDemandRateLimiter = certmagic.NewRateLimiter(0, 0)
- onDemandAskClient = &http.Client{
- Timeout: 10 * time.Second,
- CheckRedirect: func(req *http.Request, via []*http.Request) error {
- return fmt.Errorf("following http redirects is not allowed")
- },
- }
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/certselection.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/certselection.go
deleted file mode 100644
index 0311f116..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/certselection.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "crypto/tls"
- "crypto/x509"
- "encoding/json"
- "fmt"
- "math/big"
-
- "github.com/caddyserver/certmagic"
-)
-
-// CustomCertSelectionPolicy represents a policy for selecting the certificate
-// used to complete a handshake when there may be multiple options. All fields
-// specified must match the candidate certificate for it to be chosen.
-// This was needed to solve https://github.com/caddyserver/caddy/issues/2588.
-type CustomCertSelectionPolicy struct {
- // The certificate must have one of these serial numbers.
- SerialNumber []bigInt `json:"serial_number,omitempty"`
-
- // The certificate must have one of these organization names.
- SubjectOrganization []string `json:"subject_organization,omitempty"`
-
- // The certificate must use this public key algorithm.
- PublicKeyAlgorithm PublicKeyAlgorithm `json:"public_key_algorithm,omitempty"`
-
- // The certificate must have at least one of the tags in the list.
- AnyTag []string `json:"any_tag,omitempty"`
-
- // The certificate must have all of the tags in the list.
- AllTags []string `json:"all_tags,omitempty"`
-}
-
-// SelectCertificate implements certmagic.CertificateSelector. It
-// only chooses a certificate that at least meets the criteria in
-// p. It then chooses the first non-expired certificate that is
-// compatible with the client. If none are valid, it chooses the
-// first viable candidate anyway.
-func (p CustomCertSelectionPolicy) SelectCertificate(hello *tls.ClientHelloInfo, choices []certmagic.Certificate) (certmagic.Certificate, error) {
- viable := make([]certmagic.Certificate, 0, len(choices))
-
-nextChoice:
- for _, cert := range choices {
- if len(p.SerialNumber) > 0 {
- var found bool
- for _, sn := range p.SerialNumber {
- if cert.Leaf.SerialNumber.Cmp(&sn.Int) == 0 {
- found = true
- break
- }
- }
- if !found {
- continue
- }
- }
-
- if len(p.SubjectOrganization) > 0 {
- var found bool
- for _, subjOrg := range p.SubjectOrganization {
- for _, org := range cert.Leaf.Subject.Organization {
- if subjOrg == org {
- found = true
- break
- }
- }
- }
- if !found {
- continue
- }
- }
-
- if p.PublicKeyAlgorithm != PublicKeyAlgorithm(x509.UnknownPublicKeyAlgorithm) &&
- PublicKeyAlgorithm(cert.Leaf.PublicKeyAlgorithm) != p.PublicKeyAlgorithm {
- continue
- }
-
- if len(p.AnyTag) > 0 {
- var found bool
- for _, tag := range p.AnyTag {
- if cert.HasTag(tag) {
- found = true
- break
- }
- }
- if !found {
- continue
- }
- }
-
- if len(p.AllTags) > 0 {
- for _, tag := range p.AllTags {
- if !cert.HasTag(tag) {
- continue nextChoice
- }
- }
- }
-
- // this certificate at least meets the policy's requirements,
- // but we still have to check expiration and compatibility
- viable = append(viable, cert)
- }
-
- if len(viable) == 0 {
- return certmagic.Certificate{}, fmt.Errorf("no certificates matched custom selection policy")
- }
-
- return certmagic.DefaultCertificateSelector(hello, viable)
-}
-
-// bigInt is a big.Int type that interops with JSON encodings as a string.
-type bigInt struct{ big.Int }
-
-func (bi bigInt) MarshalJSON() ([]byte, error) {
- return json.Marshal(bi.String())
-}
-
-func (bi *bigInt) UnmarshalJSON(p []byte) error {
- if string(p) == "null" {
- return nil
- }
- var stringRep string
- err := json.Unmarshal(p, &stringRep)
- if err != nil {
- return err
- }
- _, ok := bi.SetString(stringRep, 10)
- if !ok {
- return fmt.Errorf("not a valid big integer: %s", p)
- }
- return nil
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/connpolicy.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/connpolicy.go
deleted file mode 100644
index 6c7fe3f4..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/connpolicy.go
+++ /dev/null
@@ -1,484 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "crypto/tls"
- "crypto/x509"
- "encoding/base64"
- "fmt"
- "io/ioutil"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/mholt/acmez"
-)
-
-// ConnectionPolicies govern the establishment of TLS connections. It is
-// an ordered group of connection policies; the first matching policy will
-// be used to configure TLS connections at handshake-time.
-type ConnectionPolicies []*ConnectionPolicy
-
-// Provision sets up each connection policy. It should be called
-// during the Validate() phase, after the TLS app (if any) is
-// already set up.
-func (cp ConnectionPolicies) Provision(ctx caddy.Context) error {
- for i, pol := range cp {
- // matchers
- mods, err := ctx.LoadModule(pol, "MatchersRaw")
- if err != nil {
- return fmt.Errorf("loading handshake matchers: %v", err)
- }
- for _, modIface := range mods.(map[string]interface{}) {
- cp[i].matchers = append(cp[i].matchers, modIface.(ConnectionMatcher))
- }
-
- // enable HTTP/2 by default
- if len(pol.ALPN) == 0 {
- pol.ALPN = append(pol.ALPN, defaultALPN...)
- }
-
- // pre-build standard TLS config so we don't have to at handshake-time
- err = pol.buildStandardTLSConfig(ctx)
- if err != nil {
- return fmt.Errorf("connection policy %d: building standard TLS config: %s", i, err)
- }
- }
-
- return nil
-}
-
-// TLSConfig returns a standard-lib-compatible TLS configuration which
-// selects the first matching policy based on the ClientHello.
-func (cp ConnectionPolicies) TLSConfig(ctx caddy.Context) *tls.Config {
- // using ServerName to match policies is extremely common, especially in configs
- // with lots and lots of different policies; we can fast-track those by indexing
- // them by SNI, so we don't have to iterate potentially thousands of policies
- // (TODO: this map does not account for wildcards, see if this is a problem in practice? look for reports of high connection latency with wildcard certs but low latency for non-wildcards in multi-thousand-cert deployments)
- indexedBySNI := make(map[string]ConnectionPolicies)
- if len(cp) > 30 {
- for _, p := range cp {
- for _, m := range p.matchers {
- if sni, ok := m.(MatchServerName); ok {
- for _, sniName := range sni {
- indexedBySNI[sniName] = append(indexedBySNI[sniName], p)
- }
- }
- }
- }
- }
-
- return &tls.Config{
- MinVersion: tls.VersionTLS12,
- GetConfigForClient: func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
- // filter policies by SNI first, if possible, to speed things up
- // when there may be lots of policies
- possiblePolicies := cp
- if indexedPolicies, ok := indexedBySNI[hello.ServerName]; ok {
- possiblePolicies = indexedPolicies
- }
-
- policyLoop:
- for _, pol := range possiblePolicies {
- for _, matcher := range pol.matchers {
- if !matcher.Match(hello) {
- continue policyLoop
- }
- }
- return pol.stdTLSConfig, nil
- }
-
- return nil, fmt.Errorf("no server TLS configuration available for ClientHello: %+v", hello)
- },
- }
-}
-
-// ConnectionPolicy specifies the logic for handling a TLS handshake.
-// An empty policy is valid; safe and sensible defaults will be used.
-type ConnectionPolicy struct {
- // How to match this policy with a TLS ClientHello. If
- // this policy is the first to match, it will be used.
- MatchersRaw caddy.ModuleMap `json:"match,omitempty" caddy:"namespace=tls.handshake_match"`
-
- // How to choose a certificate if more than one matched
- // the given ServerName (SNI) value.
- CertSelection *CustomCertSelectionPolicy `json:"certificate_selection,omitempty"`
-
- // The list of cipher suites to support. Caddy's
- // defaults are modern and secure.
- CipherSuites []string `json:"cipher_suites,omitempty"`
-
- // The list of elliptic curves to support. Caddy's
- // defaults are modern and secure.
- Curves []string `json:"curves,omitempty"`
-
- // Protocols to use for Application-Layer Protocol
- // Negotiation (ALPN) during the handshake.
- ALPN []string `json:"alpn,omitempty"`
-
- // Minimum TLS protocol version to allow. Default: `tls1.2`
- ProtocolMin string `json:"protocol_min,omitempty"`
-
- // Maximum TLS protocol version to allow. Default: `tls1.3`
- ProtocolMax string `json:"protocol_max,omitempty"`
-
- // Enables and configures TLS client authentication.
- ClientAuthentication *ClientAuthentication `json:"client_authentication,omitempty"`
-
- // DefaultSNI becomes the ServerName in a ClientHello if there
- // is no policy configured for the empty SNI value.
- DefaultSNI string `json:"default_sni,omitempty"`
-
- matchers []ConnectionMatcher
- stdTLSConfig *tls.Config
-}
-
-func (p *ConnectionPolicy) buildStandardTLSConfig(ctx caddy.Context) error {
- tlsAppIface, err := ctx.App("tls")
- if err != nil {
- return fmt.Errorf("getting tls app: %v", err)
- }
- tlsApp := tlsAppIface.(*TLS)
-
- // fill in some "easy" default values, but for other values
- // (such as slices), we should ensure that they start empty
- // so the user-provided config can fill them in; then we will
- // fill in a default config at the end if they are still unset
- cfg := &tls.Config{
- NextProtos: p.ALPN,
- PreferServerCipherSuites: true,
- GetCertificate: func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
- // TODO: I don't love how this works: we pre-build certmagic configs
- // so that handshakes are faster. Unfortunately, certmagic configs are
- // comprised of settings from both a TLS connection policy and a TLS
- // automation policy. The only two fields (as of March 2020; v2 beta 17)
- // of a certmagic config that come from the TLS connection policy are
- // CertSelection and DefaultServerName, so an automation policy is what
- // builds the base certmagic config. Since the pre-built config is
- // shared, I don't think we can change any of its fields per-handshake,
- // hence the awkward shallow copy (dereference) here and the subsequent
- // changing of some of its fields. I'm worried this dereference allocates
- // more at handshake-time, but I don't know how to practically pre-build
- // a certmagic config for each combination of conn policy + automation policy...
- cfg := *tlsApp.getConfigForName(hello.ServerName)
- if p.CertSelection != nil {
- // you would think we could just set this whether or not
- // p.CertSelection is nil, but that leads to panics if
- // it is, because cfg.CertSelection is an interface,
- // so it will have a non-nil value even if the actual
- // value underlying it is nil (sigh)
- cfg.CertSelection = p.CertSelection
- }
- cfg.DefaultServerName = p.DefaultSNI
- return cfg.GetCertificate(hello)
- },
- MinVersion: tls.VersionTLS12,
- MaxVersion: tls.VersionTLS13,
- }
-
- // session tickets support
- if tlsApp.SessionTickets != nil {
- cfg.SessionTicketsDisabled = tlsApp.SessionTickets.Disabled
-
- // session ticket key rotation
- tlsApp.SessionTickets.register(cfg)
- ctx.OnCancel(func() {
- // do cleanup when the context is canceled because,
- // though unlikely, it is possible that a context
- // needing a TLS server config could exist for less
- // than the lifetime of the whole app
- tlsApp.SessionTickets.unregister(cfg)
- })
- }
-
- // TODO: Clean up session ticket active locks in storage if app (or process) is being closed!
-
- // add all the cipher suites in order, without duplicates
- cipherSuitesAdded := make(map[uint16]struct{})
- for _, csName := range p.CipherSuites {
- csID := CipherSuiteID(csName)
- if csID == 0 {
- return fmt.Errorf("unsupported cipher suite: %s", csName)
- }
- if _, ok := cipherSuitesAdded[csID]; !ok {
- cipherSuitesAdded[csID] = struct{}{}
- cfg.CipherSuites = append(cfg.CipherSuites, csID)
- }
- }
-
- // add all the curve preferences in order, without duplicates
- curvesAdded := make(map[tls.CurveID]struct{})
- for _, curveName := range p.Curves {
- curveID := SupportedCurves[curveName]
- if _, ok := curvesAdded[curveID]; !ok {
- curvesAdded[curveID] = struct{}{}
- cfg.CurvePreferences = append(cfg.CurvePreferences, curveID)
- }
- }
-
- // ensure ALPN includes the ACME TLS-ALPN protocol
- var alpnFound bool
- for _, a := range p.ALPN {
- if a == acmez.ACMETLS1Protocol {
- alpnFound = true
- break
- }
- }
- if !alpnFound {
- cfg.NextProtos = append(cfg.NextProtos, acmez.ACMETLS1Protocol)
- }
-
- // min and max protocol versions
- if (p.ProtocolMin != "" && p.ProtocolMax != "") && p.ProtocolMin > p.ProtocolMax {
- return fmt.Errorf("protocol min (%x) cannot be greater than protocol max (%x)", p.ProtocolMin, p.ProtocolMax)
- }
- if p.ProtocolMin != "" {
- cfg.MinVersion = SupportedProtocols[p.ProtocolMin]
- }
- if p.ProtocolMax != "" {
- cfg.MaxVersion = SupportedProtocols[p.ProtocolMax]
- }
-
- // client authentication
- if p.ClientAuthentication != nil {
- err := p.ClientAuthentication.ConfigureTLSConfig(cfg)
- if err != nil {
- return fmt.Errorf("configuring TLS client authentication: %v", err)
- }
- }
-
- setDefaultTLSParams(cfg)
-
- p.stdTLSConfig = cfg
-
- return nil
-}
-
-// SettingsEmpty returns true if p's settings (fields
-// except the matchers) are all empty/unset.
-func (p ConnectionPolicy) SettingsEmpty() bool {
- return p.CertSelection == nil &&
- p.CipherSuites == nil &&
- p.Curves == nil &&
- p.ALPN == nil &&
- p.ProtocolMin == "" &&
- p.ProtocolMax == "" &&
- p.ClientAuthentication == nil &&
- p.DefaultSNI == ""
-}
-
-// ClientAuthentication configures TLS client auth.
-type ClientAuthentication struct {
- // A list of base64 DER-encoded CA certificates
- // against which to validate client certificates.
- // Client certs which are not signed by any of
- // these CAs will be rejected.
- TrustedCACerts []string `json:"trusted_ca_certs,omitempty"`
-
- // TrustedCACertPEMFiles is a list of PEM file names
- // from which to load certificates of trusted CAs.
- // Client certificates which are not signed by any of
- // these CA certificates will be rejected.
- TrustedCACertPEMFiles []string `json:"trusted_ca_certs_pem_files,omitempty"`
-
- // A list of base64 DER-encoded client leaf certs
- // to accept. If this list is not empty, client certs
- // which are not in this list will be rejected.
- TrustedLeafCerts []string `json:"trusted_leaf_certs,omitempty"`
-
- // The mode for authenticating the client. Allowed values are:
- //
- // Mode | Description
- // -----|---------------
- // `request` | Ask clients for a certificate, but allow even if there isn't one; do not verify it
- // `require` | Require clients to present a certificate, but do not verify it
- // `verify_if_given` | Ask clients for a certificate; allow even if there isn't one, but verify it if there is
- // `require_and_verify` | Require clients to present a valid certificate that is verified
- //
- // The default mode is `require_and_verify` if any
- // TrustedCACerts or TrustedCACertPEMFiles or TrustedLeafCerts
- // are provided; otherwise, the default mode is `require`.
- Mode string `json:"mode,omitempty"`
-
- // state established with the last call to ConfigureTLSConfig
- trustedLeafCerts []*x509.Certificate
- existingVerifyPeerCert func([][]byte, [][]*x509.Certificate) error
-}
-
-// Active returns true if clientauth has an actionable configuration.
-func (clientauth ClientAuthentication) Active() bool {
- return len(clientauth.TrustedCACerts) > 0 ||
- len(clientauth.TrustedCACertPEMFiles) > 0 ||
- len(clientauth.TrustedLeafCerts) > 0 ||
- len(clientauth.Mode) > 0
-}
-
-// ConfigureTLSConfig sets up cfg to enforce clientauth's configuration.
-func (clientauth *ClientAuthentication) ConfigureTLSConfig(cfg *tls.Config) error {
- // if there's no actionable client auth, simply disable it
- if !clientauth.Active() {
- cfg.ClientAuth = tls.NoClientCert
- return nil
- }
-
- // enforce desired mode of client authentication
- if len(clientauth.Mode) > 0 {
- switch clientauth.Mode {
- case "request":
- cfg.ClientAuth = tls.RequestClientCert
- case "require":
- cfg.ClientAuth = tls.RequireAnyClientCert
- case "verify_if_given":
- cfg.ClientAuth = tls.VerifyClientCertIfGiven
- case "require_and_verify":
- cfg.ClientAuth = tls.RequireAndVerifyClientCert
- default:
- return fmt.Errorf("client auth mode not recognized: %s", clientauth.Mode)
- }
- } else {
- // otherwise, set a safe default mode
- if len(clientauth.TrustedCACerts) > 0 ||
- len(clientauth.TrustedCACertPEMFiles) > 0 ||
- len(clientauth.TrustedLeafCerts) > 0 {
- cfg.ClientAuth = tls.RequireAndVerifyClientCert
- } else {
- cfg.ClientAuth = tls.RequireAnyClientCert
- }
- }
-
- // enforce CA verification by adding CA certs to the ClientCAs pool
- if len(clientauth.TrustedCACerts) > 0 || len(clientauth.TrustedCACertPEMFiles) > 0 {
- caPool := x509.NewCertPool()
- for _, clientCAString := range clientauth.TrustedCACerts {
- clientCA, err := decodeBase64DERCert(clientCAString)
- if err != nil {
- return fmt.Errorf("parsing certificate: %v", err)
- }
- caPool.AddCert(clientCA)
- }
- for _, pemFile := range clientauth.TrustedCACertPEMFiles {
- pemContents, err := ioutil.ReadFile(pemFile)
- if err != nil {
- return fmt.Errorf("reading %s: %v", pemFile, err)
- }
- caPool.AppendCertsFromPEM(pemContents)
- }
- cfg.ClientCAs = caPool
- }
-
- // enforce leaf verification by writing our own verify function
- if len(clientauth.TrustedLeafCerts) > 0 {
- clientauth.trustedLeafCerts = []*x509.Certificate{}
- for _, clientCertString := range clientauth.TrustedLeafCerts {
- clientCert, err := decodeBase64DERCert(clientCertString)
- if err != nil {
- return fmt.Errorf("parsing certificate: %v", err)
- }
- clientauth.trustedLeafCerts = append(clientauth.trustedLeafCerts, clientCert)
- }
- // if a custom verification function already exists, wrap it
- clientauth.existingVerifyPeerCert = cfg.VerifyPeerCertificate
- cfg.VerifyPeerCertificate = clientauth.verifyPeerCertificate
- }
-
- return nil
-}
-
-// verifyPeerCertificate is for use as a tls.Config.VerifyPeerCertificate
-// callback to do custom client certificate verification. It is intended
-// for installation only by clientauth.ConfigureTLSConfig().
-func (clientauth ClientAuthentication) verifyPeerCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
- // first use any pre-existing custom verification function
- if clientauth.existingVerifyPeerCert != nil {
- err := clientauth.existingVerifyPeerCert(rawCerts, verifiedChains)
- if err != nil {
- return err
- }
- }
-
- if len(rawCerts) == 0 {
- return fmt.Errorf("no client certificate provided")
- }
-
- remoteLeafCert, err := x509.ParseCertificate(rawCerts[0])
- if err != nil {
- return fmt.Errorf("can't parse the given certificate: %s", err.Error())
- }
-
- for _, trustedLeafCert := range clientauth.trustedLeafCerts {
- if remoteLeafCert.Equal(trustedLeafCert) {
- return nil
- }
- }
-
- return fmt.Errorf("client leaf certificate failed validation")
-}
-
-// decodeBase64DERCert base64-decodes, then DER-decodes, certStr.
-func decodeBase64DERCert(certStr string) (*x509.Certificate, error) {
- derBytes, err := base64.StdEncoding.DecodeString(certStr)
- if err != nil {
- return nil, err
- }
- return x509.ParseCertificate(derBytes)
-}
-
-// setDefaultTLSParams sets the default TLS cipher suites, protocol versions,
-// and server preferences of cfg if they are not already set; it does not
-// overwrite values, only fills in missing values.
-func setDefaultTLSParams(cfg *tls.Config) {
- if len(cfg.CipherSuites) == 0 {
- cfg.CipherSuites = getOptimalDefaultCipherSuites()
- }
-
- // Not a cipher suite, but still important for mitigating protocol downgrade attacks
- // (prepend since having it at end breaks http2 due to non-h2-approved suites before it)
- cfg.CipherSuites = append([]uint16{tls.TLS_FALLBACK_SCSV}, cfg.CipherSuites...)
-
- if len(cfg.CurvePreferences) == 0 {
- cfg.CurvePreferences = defaultCurves
- }
-
- if cfg.MinVersion == 0 {
- cfg.MinVersion = tls.VersionTLS12
- }
- if cfg.MaxVersion == 0 {
- cfg.MaxVersion = tls.VersionTLS13
- }
-
- cfg.PreferServerCipherSuites = true
-}
-
-// PublicKeyAlgorithm is a JSON-unmarshalable wrapper type.
-type PublicKeyAlgorithm x509.PublicKeyAlgorithm
-
-// UnmarshalJSON satisfies json.Unmarshaler.
-func (a *PublicKeyAlgorithm) UnmarshalJSON(b []byte) error {
- algoStr := strings.ToLower(strings.Trim(string(b), `"`))
- algo, ok := publicKeyAlgorithms[algoStr]
- if !ok {
- return fmt.Errorf("unrecognized public key algorithm: %s (expected one of %v)",
- algoStr, publicKeyAlgorithms)
- }
- *a = PublicKeyAlgorithm(algo)
- return nil
-}
-
-// ConnectionMatcher is a type which matches TLS handshakes.
-type ConnectionMatcher interface {
- Match(*tls.ClientHelloInfo) bool
-}
-
-var defaultALPN = []string{"h2", "http/1.1"}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/fileloader.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/fileloader.go
deleted file mode 100644
index fdf54864..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/fileloader.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "crypto/tls"
- "fmt"
- "io/ioutil"
-
- "github.com/caddyserver/caddy/v2"
-)
-
-func init() {
- caddy.RegisterModule(FileLoader{})
-}
-
-// FileLoader loads certificates and their associated keys from disk.
-type FileLoader []CertKeyFilePair
-
-// CaddyModule returns the Caddy module information.
-func (FileLoader) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "tls.certificates.load_files",
- New: func() caddy.Module { return new(FileLoader) },
- }
-}
-
-// CertKeyFilePair pairs certificate and key file names along with their
-// encoding format so that they can be loaded from disk.
-type CertKeyFilePair struct {
- // Path to the certificate (public key) file.
- Certificate string `json:"certificate"`
-
- // Path to the private key file.
- Key string `json:"key"`
-
- // The format of the cert and key. Can be "pem". Default: "pem"
- Format string `json:"format,omitempty"`
-
- // Arbitrary values to associate with this certificate.
- // Can be useful when you want to select a particular
- // certificate when there may be multiple valid candidates.
- Tags []string `json:"tags,omitempty"`
-}
-
-// LoadCertificates returns the certificates to be loaded by fl.
-func (fl FileLoader) LoadCertificates() ([]Certificate, error) {
- certs := make([]Certificate, 0, len(fl))
- for _, pair := range fl {
- certData, err := ioutil.ReadFile(pair.Certificate)
- if err != nil {
- return nil, err
- }
- keyData, err := ioutil.ReadFile(pair.Key)
- if err != nil {
- return nil, err
- }
-
- var cert tls.Certificate
- switch pair.Format {
- case "":
- fallthrough
- case "pem":
- cert, err = tls.X509KeyPair(certData, keyData)
- default:
- return nil, fmt.Errorf("unrecognized certificate/key encoding format: %s", pair.Format)
- }
- if err != nil {
- return nil, err
- }
-
- certs = append(certs, Certificate{Certificate: cert, Tags: pair.Tags})
- }
- return certs, nil
-}
-
-// Interface guard
-var _ CertificateLoader = (FileLoader)(nil)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/folderloader.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/folderloader.go
deleted file mode 100644
index 10b017ee..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/folderloader.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "bytes"
- "crypto/tls"
- "encoding/pem"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
-)
-
-func init() {
- caddy.RegisterModule(FolderLoader{})
-}
-
-// FolderLoader loads certificates and their associated keys from disk
-// by recursively walking the specified directories, looking for PEM
-// files which contain both a certificate and a key.
-type FolderLoader []string
-
-// CaddyModule returns the Caddy module information.
-func (FolderLoader) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "tls.certificates.load_folders",
- New: func() caddy.Module { return new(FolderLoader) },
- }
-}
-
-// LoadCertificates loads all the certificates+keys in the directories
-// listed in fl from all files ending with .pem. This method of loading
-// certificates expects the certificate and key to be bundled into the
-// same file.
-func (fl FolderLoader) LoadCertificates() ([]Certificate, error) {
- var certs []Certificate
- for _, dir := range fl {
- err := filepath.Walk(dir, func(fpath string, info os.FileInfo, err error) error {
- if err != nil {
- return fmt.Errorf("unable to traverse into path: %s", fpath)
- }
- if info.IsDir() {
- return nil
- }
- if !strings.HasSuffix(strings.ToLower(info.Name()), ".pem") {
- return nil
- }
-
- cert, err := x509CertFromCertAndKeyPEMFile(fpath)
- if err != nil {
- return err
- }
-
- certs = append(certs, Certificate{Certificate: cert})
-
- return nil
- })
- if err != nil {
- return nil, err
- }
- }
- return certs, nil
-}
-
-func x509CertFromCertAndKeyPEMFile(fpath string) (tls.Certificate, error) {
- bundle, err := ioutil.ReadFile(fpath)
- if err != nil {
- return tls.Certificate{}, err
- }
-
- certBuilder, keyBuilder := new(bytes.Buffer), new(bytes.Buffer)
- var foundKey bool // use only the first key in the file
-
- for {
- // Decode next block so we can see what type it is
- var derBlock *pem.Block
- derBlock, bundle = pem.Decode(bundle)
- if derBlock == nil {
- break
- }
-
- if derBlock.Type == "CERTIFICATE" {
- // Re-encode certificate as PEM, appending to certificate chain
- err = pem.Encode(certBuilder, derBlock)
- if err != nil {
- return tls.Certificate{}, err
- }
- } else if derBlock.Type == "EC PARAMETERS" {
- // EC keys generated from openssl can be composed of two blocks:
- // parameters and key (parameter block should come first)
- if !foundKey {
- // Encode parameters
- err = pem.Encode(keyBuilder, derBlock)
- if err != nil {
- return tls.Certificate{}, err
- }
-
- // Key must immediately follow
- derBlock, bundle = pem.Decode(bundle)
- if derBlock == nil || derBlock.Type != "EC PRIVATE KEY" {
- return tls.Certificate{}, fmt.Errorf("%s: expected elliptic private key to immediately follow EC parameters", fpath)
- }
- err = pem.Encode(keyBuilder, derBlock)
- if err != nil {
- return tls.Certificate{}, err
- }
- foundKey = true
- }
- } else if derBlock.Type == "PRIVATE KEY" || strings.HasSuffix(derBlock.Type, " PRIVATE KEY") {
- // RSA key
- if !foundKey {
- err = pem.Encode(keyBuilder, derBlock)
- if err != nil {
- return tls.Certificate{}, err
- }
- foundKey = true
- }
- } else {
- return tls.Certificate{}, fmt.Errorf("%s: unrecognized PEM block type: %s", fpath, derBlock.Type)
- }
- }
-
- certPEMBytes, keyPEMBytes := certBuilder.Bytes(), keyBuilder.Bytes()
- if len(certPEMBytes) == 0 {
- return tls.Certificate{}, fmt.Errorf("%s: failed to parse PEM data", fpath)
- }
- if len(keyPEMBytes) == 0 {
- return tls.Certificate{}, fmt.Errorf("%s: no private key block found", fpath)
- }
-
- cert, err := tls.X509KeyPair(certPEMBytes, keyPEMBytes)
- if err != nil {
- return tls.Certificate{}, fmt.Errorf("%s: making X509 key pair: %v", fpath, err)
- }
-
- return cert, nil
-}
-
-var _ CertificateLoader = (FolderLoader)(nil)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/internalissuer.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/internalissuer.go
deleted file mode 100644
index 7a25f6d3..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/internalissuer.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "bytes"
- "context"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/caddyserver/caddy/v2/modules/caddypki"
- "github.com/caddyserver/certmagic"
- "github.com/smallstep/certificates/authority/provisioner"
- "go.uber.org/zap"
-)
-
-func init() {
- caddy.RegisterModule(InternalIssuer{})
-}
-
-// InternalIssuer is a certificate issuer that generates
-// certificates internally using a locally-configured
-// CA which can be customized using the `pki` app.
-type InternalIssuer struct {
- // The ID of the CA to use for signing. The default
- // CA ID is "local". The CA can be configured with the
- // `pki` app.
- CA string `json:"ca,omitempty"`
-
- // The validity period of certificates.
- Lifetime caddy.Duration `json:"lifetime,omitempty"`
-
- // If true, the root will be the issuer instead of
- // the intermediate. This is NOT recommended and should
- // only be used when devices/clients do not properly
- // validate certificate chains.
- SignWithRoot bool `json:"sign_with_root,omitempty"`
-
- ca *caddypki.CA
- logger *zap.Logger
-}
-
-// CaddyModule returns the Caddy module information.
-func (InternalIssuer) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "tls.issuance.internal",
- New: func() caddy.Module { return new(InternalIssuer) },
- }
-}
-
-// Provision sets up the issuer.
-func (iss *InternalIssuer) Provision(ctx caddy.Context) error {
- iss.logger = ctx.Logger(iss)
-
- // get a reference to the configured CA
- appModule, err := ctx.App("pki")
- if err != nil {
- return err
- }
- pkiApp := appModule.(*caddypki.PKI)
- if iss.CA == "" {
- iss.CA = caddypki.DefaultCAID
- }
- ca, ok := pkiApp.CAs[iss.CA]
- if !ok {
- return fmt.Errorf("no certificate authority configured with id: %s", iss.CA)
- }
- iss.ca = ca
-
- // set any other default values
- if iss.Lifetime == 0 {
- iss.Lifetime = caddy.Duration(defaultInternalCertLifetime)
- }
-
- return nil
-}
-
-// IssuerKey returns the unique issuer key for the
-// confgured CA endpoint.
-func (iss InternalIssuer) IssuerKey() string {
- return iss.ca.ID
-}
-
-// Issue issues a certificate to satisfy the CSR.
-func (iss InternalIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) {
- // prepare the signing authority
- authCfg := caddypki.AuthorityConfig{
- SignWithRoot: iss.SignWithRoot,
- }
- auth, err := iss.ca.NewAuthority(authCfg)
- if err != nil {
- return nil, err
- }
-
- // get the cert (public key) that will be used for signing
- var issuerCert *x509.Certificate
- if iss.SignWithRoot {
- issuerCert = iss.ca.RootCertificate()
- } else {
- issuerCert = iss.ca.IntermediateCertificate()
- }
-
- // ensure issued certificate does not expire later than its issuer
- lifetime := time.Duration(iss.Lifetime)
- if time.Now().Add(lifetime).After(issuerCert.NotAfter) {
- lifetime = time.Until(issuerCert.NotAfter)
- iss.logger.Warn("cert lifetime would exceed issuer NotAfter, clamping lifetime",
- zap.Duration("orig_lifetime", time.Duration(iss.Lifetime)),
- zap.Duration("lifetime", lifetime),
- zap.Time("not_after", issuerCert.NotAfter),
- )
- }
-
- certChain, err := auth.Sign(csr, provisioner.SignOptions{}, customCertLifetime(caddy.Duration(lifetime)))
- if err != nil {
- return nil, err
- }
-
- var buf bytes.Buffer
- for _, cert := range certChain {
- err := pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
- if err != nil {
- return nil, err
- }
- }
-
- return &certmagic.IssuedCertificate{
- Certificate: buf.Bytes(),
- }, nil
-}
-
-// UnmarshalCaddyfile deserializes Caddyfile tokens into iss.
-//
-// ... internal {
-// ca
-// }
-//
-func (iss *InternalIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- for d.NextBlock(0) {
- switch d.Val() {
- case "ca":
- if !d.AllArgs(&iss.CA) {
- return d.ArgErr()
- }
- }
- }
- }
- return nil
-}
-
-// customCertLifetime allows us to customize certificates that are issued
-// by Smallstep libs, particularly the NotBefore & NotAfter dates.
-type customCertLifetime time.Duration
-
-func (d customCertLifetime) Modify(cert *x509.Certificate, _ provisioner.SignOptions) error {
- cert.NotBefore = time.Now()
- cert.NotAfter = cert.NotBefore.Add(time.Duration(d))
- return nil
-}
-
-const defaultInternalCertLifetime = 12 * time.Hour
-
-// Interface guards
-var (
- _ caddy.Provisioner = (*InternalIssuer)(nil)
- _ certmagic.Issuer = (*InternalIssuer)(nil)
- _ provisioner.CertificateModifier = (*customCertLifetime)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/matchers.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/matchers.go
deleted file mode 100644
index aee0e726..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/matchers.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "crypto/tls"
- "fmt"
- "net"
- "strings"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/certmagic"
- "go.uber.org/zap"
-)
-
-func init() {
- caddy.RegisterModule(MatchServerName{})
- caddy.RegisterModule(MatchRemoteIP{})
-}
-
-// MatchServerName matches based on SNI. Names in
-// this list may use left-most-label wildcards,
-// similar to wildcard certificates.
-type MatchServerName []string
-
-// CaddyModule returns the Caddy module information.
-func (MatchServerName) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "tls.handshake_match.sni",
- New: func() caddy.Module { return new(MatchServerName) },
- }
-}
-
-// Match matches hello based on SNI.
-func (m MatchServerName) Match(hello *tls.ClientHelloInfo) bool {
- for _, name := range m {
- if certmagic.MatchWildcard(hello.ServerName, name) {
- return true
- }
- }
- return false
-}
-
-// MatchRemoteIP matches based on the remote IP of the
-// connection. Specific IPs or CIDR ranges can be specified.
-//
-// Note that IPs can sometimes be spoofed, so do not rely
-// on this as a replacement for actual authentication.
-type MatchRemoteIP struct {
- // The IPs or CIDR ranges to match.
- Ranges []string `json:"ranges,omitempty"`
-
- // The IPs or CIDR ranges to *NOT* match.
- NotRanges []string `json:"not_ranges,omitempty"`
-
- cidrs []*net.IPNet
- notCidrs []*net.IPNet
- logger *zap.Logger
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "tls.handshake_match.remote_ip",
- New: func() caddy.Module { return new(MatchRemoteIP) },
- }
-}
-
-// Provision parses m's IP ranges, either from IP or CIDR expressions.
-func (m *MatchRemoteIP) Provision(ctx caddy.Context) error {
- m.logger = ctx.Logger(m)
- for _, str := range m.Ranges {
- cidrs, err := m.parseIPRange(str)
- if err != nil {
- return err
- }
- m.cidrs = cidrs
- }
- for _, str := range m.NotRanges {
- cidrs, err := m.parseIPRange(str)
- if err != nil {
- return err
- }
- m.notCidrs = cidrs
- }
- return nil
-}
-
-// Match matches hello based on the connection's remote IP.
-func (m MatchRemoteIP) Match(hello *tls.ClientHelloInfo) bool {
- remoteAddr := hello.Conn.RemoteAddr().String()
- ipStr, _, err := net.SplitHostPort(remoteAddr)
- if err != nil {
- ipStr = remoteAddr // weird; maybe no port?
- }
- ip := net.ParseIP(ipStr)
- if ip == nil {
- m.logger.Error("invalid client IP addresss", zap.String("ip", ipStr))
- return false
- }
- return (len(m.cidrs) == 0 || m.matches(ip, m.cidrs)) &&
- (len(m.notCidrs) == 0 || !m.matches(ip, m.notCidrs))
-}
-
-func (MatchRemoteIP) parseIPRange(str string) ([]*net.IPNet, error) {
- var cidrs []*net.IPNet
- if strings.Contains(str, "/") {
- _, ipNet, err := net.ParseCIDR(str)
- if err != nil {
- return nil, fmt.Errorf("parsing CIDR expression: %v", err)
- }
- cidrs = append(cidrs, ipNet)
- } else {
- ip := net.ParseIP(str)
- if ip == nil {
- return nil, fmt.Errorf("invalid IP address: %s", str)
- }
- mask := len(ip) * 8
- cidrs = append(cidrs, &net.IPNet{
- IP: ip,
- Mask: net.CIDRMask(mask, mask),
- })
- }
- return cidrs, nil
-}
-
-func (MatchRemoteIP) matches(ip net.IP, ranges []*net.IPNet) bool {
- for _, ipRange := range ranges {
- if ipRange.Contains(ip) {
- return true
- }
- }
- return false
-}
-
-// Interface guards
-var (
- _ ConnectionMatcher = (*MatchServerName)(nil)
- _ ConnectionMatcher = (*MatchRemoteIP)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/pemloader.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/pemloader.go
deleted file mode 100644
index 61b08851..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/pemloader.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "crypto/tls"
- "fmt"
-
- "github.com/caddyserver/caddy/v2"
-)
-
-func init() {
- caddy.RegisterModule(PEMLoader{})
-}
-
-// PEMLoader loads certificates and their associated keys by
-// decoding their PEM blocks directly. This has the advantage
-// of not needing to store them on disk at all.
-type PEMLoader []CertKeyPEMPair
-
-// CaddyModule returns the Caddy module information.
-func (PEMLoader) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "tls.certificates.load_pem",
- New: func() caddy.Module { return new(PEMLoader) },
- }
-}
-
-// CertKeyPEMPair pairs certificate and key PEM blocks.
-type CertKeyPEMPair struct {
- // The certificate (public key) in PEM format.
- CertificatePEM string `json:"certificate"`
-
- // The private key in PEM format.
- KeyPEM string `json:"key"`
-
- // Arbitrary values to associate with this certificate.
- // Can be useful when you want to select a particular
- // certificate when there may be multiple valid candidates.
- Tags []string `json:"tags,omitempty"`
-}
-
-// LoadCertificates returns the certificates contained in pl.
-func (pl PEMLoader) LoadCertificates() ([]Certificate, error) {
- certs := make([]Certificate, 0, len(pl))
- for i, pair := range pl {
- cert, err := tls.X509KeyPair([]byte(pair.CertificatePEM), []byte(pair.KeyPEM))
- if err != nil {
- return nil, fmt.Errorf("PEM pair %d: %v", i, err)
- }
- certs = append(certs, Certificate{
- Certificate: cert,
- Tags: pair.Tags,
- })
- }
- return certs, nil
-}
-
-// Interface guard
-var _ CertificateLoader = (PEMLoader)(nil)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/sessiontickets.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/sessiontickets.go
deleted file mode 100644
index bfc5628a..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/sessiontickets.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "crypto/rand"
- "crypto/tls"
- "encoding/json"
- "fmt"
- "io"
- "log"
- "runtime/debug"
- "sync"
- "time"
-
- "github.com/caddyserver/caddy/v2"
-)
-
-// SessionTicketService configures and manages TLS session tickets.
-type SessionTicketService struct {
- // KeySource is the method by which Caddy produces or obtains
- // TLS session ticket keys (STEKs). By default, Caddy generates
- // them internally using a secure pseudorandom source.
- KeySource json.RawMessage `json:"key_source,omitempty" caddy:"namespace=tls.stek inline_key=provider"`
-
- // How often Caddy rotates STEKs. Default: 12h.
- RotationInterval caddy.Duration `json:"rotation_interval,omitempty"`
-
- // The maximum number of keys to keep in rotation. Default: 4.
- MaxKeys int `json:"max_keys,omitempty"`
-
- // Disables STEK rotation.
- DisableRotation bool `json:"disable_rotation,omitempty"`
-
- // Disables TLS session resumption by tickets.
- Disabled bool `json:"disabled,omitempty"`
-
- keySource STEKProvider
- configs map[*tls.Config]struct{}
- stopChan chan struct{}
- currentKeys [][32]byte
- mu *sync.Mutex
-}
-
-func (s *SessionTicketService) provision(ctx caddy.Context) error {
- s.configs = make(map[*tls.Config]struct{})
- s.mu = new(sync.Mutex)
-
- // establish sane defaults
- if s.RotationInterval == 0 {
- s.RotationInterval = caddy.Duration(defaultSTEKRotationInterval)
- }
- if s.MaxKeys <= 0 {
- s.MaxKeys = defaultMaxSTEKs
- }
- if s.KeySource == nil {
- s.KeySource = json.RawMessage(`{"provider":"standard"}`)
- }
-
- // load the STEK module, which will provide keys
- val, err := ctx.LoadModule(s, "KeySource")
- if err != nil {
- return fmt.Errorf("loading TLS session ticket ephemeral keys provider module: %s", err)
- }
- s.keySource = val.(STEKProvider)
-
- // if session tickets or just rotation are
- // disabled, no need to start service
- if s.Disabled || s.DisableRotation {
- return nil
- }
-
- // start the STEK module; this ensures we have
- // a starting key before any config needs one
- return s.start()
-}
-
-// start loads the starting STEKs and spawns a goroutine
-// which loops to rotate the STEKs, which continues until
-// stop() is called. If start() was already called, this
-// is a no-op.
-func (s *SessionTicketService) start() error {
- if s.stopChan != nil {
- return nil
- }
- s.stopChan = make(chan struct{})
-
- // initializing the key source gives us our
- // initial key(s) to start with; if successful,
- // we need to be sure to call Next() so that
- // the key source can know when it is done
- initialKeys, err := s.keySource.Initialize(s)
- if err != nil {
- return fmt.Errorf("setting STEK module configuration: %v", err)
- }
-
- s.mu.Lock()
- s.currentKeys = initialKeys
- s.mu.Unlock()
-
- // keep the keys rotated
- go s.stayUpdated()
-
- return nil
-}
-
-// stayUpdated is a blocking function which rotates
-// the keys whenever new ones are sent. It reads
-// from keysChan until s.stop() is called.
-func (s *SessionTicketService) stayUpdated() {
- defer func() {
- if err := recover(); err != nil {
- log.Printf("[PANIC] session ticket service: %v\n%s", err, debug.Stack())
- }
- }()
-
- // this call is essential when Initialize()
- // returns without error, because the stop
- // channel is the only way the key source
- // will know when to clean up
- keysChan := s.keySource.Next(s.stopChan)
-
- for {
- select {
- case newKeys := <-keysChan:
- s.mu.Lock()
- s.currentKeys = newKeys
- configs := s.configs
- s.mu.Unlock()
- for cfg := range configs {
- cfg.SetSessionTicketKeys(newKeys)
- }
- case <-s.stopChan:
- return
- }
- }
-}
-
-// stop terminates the key rotation goroutine.
-func (s *SessionTicketService) stop() {
- if s.stopChan != nil {
- close(s.stopChan)
- }
-}
-
-// register sets the session ticket keys on cfg
-// and keeps them updated. Any values registered
-// must be unregistered, or they will not be
-// garbage-collected. s.start() must have been
-// called first. If session tickets are disabled
-// or if ticket key rotation is disabled, this
-// function is a no-op.
-func (s *SessionTicketService) register(cfg *tls.Config) {
- if s.Disabled || s.DisableRotation {
- return
- }
- s.mu.Lock()
- cfg.SetSessionTicketKeys(s.currentKeys)
- s.configs[cfg] = struct{}{}
- s.mu.Unlock()
-}
-
-// unregister stops session key management on cfg and
-// removes the internal stored reference to cfg. If
-// session tickets are disabled or if ticket key rotation
-// is disabled, this function is a no-op.
-func (s *SessionTicketService) unregister(cfg *tls.Config) {
- if s.Disabled || s.DisableRotation {
- return
- }
- s.mu.Lock()
- delete(s.configs, cfg)
- s.mu.Unlock()
-}
-
-// RotateSTEKs rotates the keys in keys by producing a new key and eliding
-// the oldest one. The new slice of keys is returned.
-func (s SessionTicketService) RotateSTEKs(keys [][32]byte) ([][32]byte, error) {
- // produce a new key
- newKey, err := s.generateSTEK()
- if err != nil {
- return nil, fmt.Errorf("generating STEK: %v", err)
- }
-
- // we need to prepend this new key to the list of
- // keys so that it is preferred, but we need to be
- // careful that we do not grow the slice larger
- // than MaxKeys, otherwise we'll be storing one
- // more key in memory than we expect; so be sure
- // that the slice does not grow beyond the limit
- // even for a brief period of time, since there's
- // no guarantee when that extra allocation will
- // be overwritten; this is why we first trim the
- // length to one less the max, THEN prepend the
- // new key
- if len(keys) >= s.MaxKeys {
- keys[len(keys)-1] = [32]byte{} // zero-out memory of oldest key
- keys = keys[:s.MaxKeys-1] // trim length of slice
- }
- keys = append([][32]byte{newKey}, keys...) // prepend new key
-
- return keys, nil
-}
-
-// generateSTEK generates key material suitable for use as a
-// session ticket ephemeral key.
-func (s *SessionTicketService) generateSTEK() ([32]byte, error) {
- var newTicketKey [32]byte
- _, err := io.ReadFull(rand.Reader, newTicketKey[:])
- return newTicketKey, err
-}
-
-// STEKProvider is a type that can provide session ticket ephemeral
-// keys (STEKs).
-type STEKProvider interface {
- // Initialize provides the STEK configuration to the STEK
- // module so that it can obtain and manage keys accordingly.
- // It returns the initial key(s) to use. Implementations can
- // rely on Next() being called if Initialize() returns
- // without error, so that it may know when it is done.
- Initialize(config *SessionTicketService) ([][32]byte, error)
-
- // Next returns the channel through which the next session
- // ticket keys will be transmitted until doneChan is closed.
- // Keys should be sent on keysChan as they are updated.
- // When doneChan is closed, any resources allocated in
- // Initialize() must be cleaned up.
- Next(doneChan <-chan struct{}) (keysChan <-chan [][32]byte)
-}
-
-const (
- defaultSTEKRotationInterval = 12 * time.Hour
- defaultMaxSTEKs = 4
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/storageloader.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/storageloader.go
deleted file mode 100644
index e78996f1..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/storageloader.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "crypto/tls"
- "fmt"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/certmagic"
-)
-
-func init() {
- caddy.RegisterModule(StorageLoader{})
-}
-
-// StorageLoader loads certificates and their associated keys
-// from the globally configured storage module.
-type StorageLoader struct {
- // A list of pairs of certificate and key file names along with their
- // encoding format so that they can be loaded from storage.
- Pairs []CertKeyFilePair `json:"pairs,omitempty"`
-
- // Reference to the globally configured storage module.
- storage certmagic.Storage
-}
-
-// CaddyModule returns the Caddy module information.
-func (StorageLoader) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "tls.certificates.load_storage",
- New: func() caddy.Module { return new(StorageLoader) },
- }
-}
-
-// Provision loads the storage module for sl.
-func (sl *StorageLoader) Provision(ctx caddy.Context) error {
- sl.storage = ctx.Storage()
- return nil
-}
-
-// LoadCertificates returns the certificates to be loaded by sl.
-func (sl StorageLoader) LoadCertificates() ([]Certificate, error) {
- certs := make([]Certificate, 0, len(sl.Pairs))
- for _, pair := range sl.Pairs {
- certData, err := sl.storage.Load(pair.Certificate)
- if err != nil {
- return nil, err
- }
- keyData, err := sl.storage.Load(pair.Key)
- if err != nil {
- return nil, err
- }
-
- var cert tls.Certificate
- switch pair.Format {
- case "":
- fallthrough
- case "pem":
- cert, err = tls.X509KeyPair(certData, keyData)
- default:
- return nil, fmt.Errorf("unrecognized certificate/key encoding format: %s", pair.Format)
- }
- if err != nil {
- return nil, err
- }
-
- certs = append(certs, Certificate{Certificate: cert, Tags: pair.Tags})
- }
- return certs, nil
-}
-
-// Interface guard
-var (
- _ CertificateLoader = (*StorageLoader)(nil)
- _ caddy.Provisioner = (*StorageLoader)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/tls.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/tls.go
deleted file mode 100644
index a93183e6..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/tls.go
+++ /dev/null
@@ -1,571 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "crypto/tls"
- "encoding/json"
- "fmt"
- "log"
- "net/http"
- "runtime/debug"
- "sync"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/certmagic"
- "go.uber.org/zap"
-)
-
-func init() {
- caddy.RegisterModule(TLS{})
- caddy.RegisterModule(AutomateLoader{})
-}
-
-// TLS provides TLS facilities including certificate
-// loading and management, client auth, and more.
-type TLS struct {
- // Caches certificates in memory for quick use during
- // TLS handshakes. Each key is the name of a certificate
- // loader module. All loaded certificates get pooled
- // into the same cache and may be used to complete TLS
- // handshakes for the relevant server names (SNI).
- // Certificates loaded manually (anything other than
- // "automate") are not automatically managed and will
- // have to be refreshed manually before they expire.
- CertificatesRaw caddy.ModuleMap `json:"certificates,omitempty" caddy:"namespace=tls.certificates"`
-
- // Configures the automation of certificate management.
- Automation *AutomationConfig `json:"automation,omitempty"`
-
- // Configures session ticket ephemeral keys (STEKs).
- SessionTickets *SessionTicketService `json:"session_tickets,omitempty"`
-
- // Configures the in-memory certificate cache.
- Cache *CertCacheOptions `json:"cache,omitempty"`
-
- // Disables OCSP stapling for manually-managed certificates only.
- // To configure OCSP stapling for automated certificates, use an
- // automation policy instead.
- //
- // Disabling OCSP stapling puts clients at greater risk, reduces their
- // privacy, and usually lowers client performance. It is NOT recommended
- // to disable this unless you are able to justify the costs.
- // EXPERIMENTAL. Subject to change.
- DisableOCSPStapling bool `json:"disable_ocsp_stapling,omitempty"`
-
- certificateLoaders []CertificateLoader
- automateNames []string
- certCache *certmagic.Cache
- ctx caddy.Context
- storageCleanTicker *time.Ticker
- storageCleanStop chan struct{}
- logger *zap.Logger
-}
-
-// CaddyModule returns the Caddy module information.
-func (TLS) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "tls",
- New: func() caddy.Module { return new(TLS) },
- }
-}
-
-// Provision sets up the configuration for the TLS app.
-func (t *TLS) Provision(ctx caddy.Context) error {
- t.ctx = ctx
- t.logger = ctx.Logger(t)
- repl := caddy.NewReplacer()
-
- // set up a new certificate cache; this (re)loads all certificates
- cacheOpts := certmagic.CacheOptions{
- GetConfigForCert: func(cert certmagic.Certificate) (*certmagic.Config, error) {
- return t.getConfigForName(cert.Names[0]), nil
- },
- Logger: t.logger.Named("cache"),
- }
- if t.Automation != nil {
- cacheOpts.OCSPCheckInterval = time.Duration(t.Automation.OCSPCheckInterval)
- cacheOpts.RenewCheckInterval = time.Duration(t.Automation.RenewCheckInterval)
- }
- if t.Cache != nil {
- cacheOpts.Capacity = t.Cache.Capacity
- }
- if cacheOpts.Capacity <= 0 {
- cacheOpts.Capacity = 10000
- }
- t.certCache = certmagic.NewCache(cacheOpts)
-
- // certificate loaders
- val, err := ctx.LoadModule(t, "CertificatesRaw")
- if err != nil {
- return fmt.Errorf("loading certificate loader modules: %s", err)
- }
- for modName, modIface := range val.(map[string]interface{}) {
- if modName == "automate" {
- // special case; these will be loaded in later using our automation facilities,
- // which we want to avoid doing during provisioning
- if automateNames, ok := modIface.(*AutomateLoader); ok && automateNames != nil {
- t.automateNames = []string(*automateNames)
- } else {
- return fmt.Errorf("loading certificates with 'automate' requires array of strings, got: %T", modIface)
- }
- continue
- }
- t.certificateLoaders = append(t.certificateLoaders, modIface.(CertificateLoader))
- }
-
- // automation/management policies
- if t.Automation == nil {
- t.Automation = new(AutomationConfig)
- }
- t.Automation.defaultPublicAutomationPolicy = new(AutomationPolicy)
- err = t.Automation.defaultPublicAutomationPolicy.Provision(t)
- if err != nil {
- return fmt.Errorf("provisioning default public automation policy: %v", err)
- }
- for _, n := range t.automateNames {
- // if any names specified by the "automate" loader do not qualify for a public
- // certificate, we should initialize a default internal automation policy
- // (but we don't want to do this unnecessarily, since it may prompt for password!)
- if certmagic.SubjectQualifiesForPublicCert(n) {
- continue
- }
- t.Automation.defaultInternalAutomationPolicy = &AutomationPolicy{
- IssuersRaw: []json.RawMessage{json.RawMessage(`{"module":"internal"}`)},
- }
- err = t.Automation.defaultInternalAutomationPolicy.Provision(t)
- if err != nil {
- return fmt.Errorf("provisioning default internal automation policy: %v", err)
- }
- break
- }
- for i, ap := range t.Automation.Policies {
- err := ap.Provision(t)
- if err != nil {
- return fmt.Errorf("provisioning automation policy %d: %v", i, err)
- }
- }
-
- // session ticket ephemeral keys (STEK) service and provider
- if t.SessionTickets != nil {
- err := t.SessionTickets.provision(ctx)
- if err != nil {
- return fmt.Errorf("provisioning session tickets configuration: %v", err)
- }
- }
-
- // on-demand rate limiting
- if t.Automation != nil && t.Automation.OnDemand != nil && t.Automation.OnDemand.RateLimit != nil {
- onDemandRateLimiter.SetMaxEvents(t.Automation.OnDemand.RateLimit.Burst)
- onDemandRateLimiter.SetWindow(time.Duration(t.Automation.OnDemand.RateLimit.Interval))
- } else {
- // remove any existing rate limiter
- onDemandRateLimiter.SetMaxEvents(0)
- onDemandRateLimiter.SetWindow(0)
- }
-
- // run replacer on ask URL (for environment variables)
- if t.Automation != nil && t.Automation.OnDemand != nil && t.Automation.OnDemand.Ask != "" {
- t.Automation.OnDemand.Ask = repl.ReplaceAll(t.Automation.OnDemand.Ask, "")
- }
-
- // load manual/static (unmanaged) certificates - we do this in
- // provision so that other apps (such as http) can know which
- // certificates have been manually loaded, and also so that
- // commands like validate can be a better test
- magic := certmagic.New(t.certCache, certmagic.Config{
- Storage: ctx.Storage(),
- Logger: t.logger,
- OCSP: certmagic.OCSPConfig{
- DisableStapling: t.DisableOCSPStapling,
- },
- })
- for _, loader := range t.certificateLoaders {
- certs, err := loader.LoadCertificates()
- if err != nil {
- return fmt.Errorf("loading certificates: %v", err)
- }
- for _, cert := range certs {
- err := magic.CacheUnmanagedTLSCertificate(cert.Certificate, cert.Tags)
- if err != nil {
- return fmt.Errorf("caching unmanaged certificate: %v", err)
- }
- }
- }
-
- return nil
-}
-
-// Validate validates t's configuration.
-func (t *TLS) Validate() error {
- if t.Automation != nil {
- // ensure that host aren't repeated; since only the first
- // automation policy is used, repeating a host in the lists
- // isn't useful and is probably a mistake; same for two
- // catch-all/default policies
- var hasDefault bool
- hostSet := make(map[string]int)
- for i, ap := range t.Automation.Policies {
- if len(ap.Subjects) == 0 {
- if hasDefault {
- return fmt.Errorf("automation policy %d is the second policy that acts as default/catch-all, but will never be used", i)
- }
- hasDefault = true
- }
- for _, h := range ap.Subjects {
- if first, ok := hostSet[h]; ok {
- return fmt.Errorf("automation policy %d: cannot apply more than one automation policy to host: %s (first match in policy %d)", i, h, first)
- }
- hostSet[h] = i
- }
- }
- }
- if t.Cache != nil {
- if t.Cache.Capacity < 0 {
- return fmt.Errorf("cache capacity must be >= 0")
- }
- }
- return nil
-}
-
-// Start activates the TLS module.
-func (t *TLS) Start() error {
- // warn if on-demand TLS is enabled but no restrictions are in place
- if t.Automation.OnDemand == nil ||
- (t.Automation.OnDemand.Ask == "" && t.Automation.OnDemand.RateLimit == nil) {
- for _, ap := range t.Automation.Policies {
- if ap.OnDemand {
- t.logger.Warn("YOUR SERVER MAY BE VULNERABLE TO ABUSE: on-demand TLS is enabled, but no protections are in place",
- zap.String("docs", "https://caddyserver.com/docs/automatic-https#on-demand-tls"))
- break
- }
- }
- }
-
- // now that we are running, and all manual certificates have
- // been loaded, time to load the automated/managed certificates
- err := t.Manage(t.automateNames)
- if err != nil {
- return fmt.Errorf("automate: managing %v: %v", t.automateNames, err)
- }
-
- t.keepStorageClean()
-
- return nil
-}
-
-// Stop stops the TLS module and cleans up any allocations.
-func (t *TLS) Stop() error {
- // stop the storage cleaner goroutine and ticker
- if t.storageCleanStop != nil {
- close(t.storageCleanStop)
- }
- if t.storageCleanTicker != nil {
- t.storageCleanTicker.Stop()
- }
- return nil
-}
-
-// Cleanup frees up resources allocated during Provision.
-func (t *TLS) Cleanup() error {
- // stop the certificate cache
- if t.certCache != nil {
- t.certCache.Stop()
- }
-
- // stop the session ticket rotation goroutine
- if t.SessionTickets != nil {
- t.SessionTickets.stop()
- }
-
- return nil
-}
-
-// Manage immediately begins managing names according to the
-// matching automation policy.
-func (t *TLS) Manage(names []string) error {
- // for a large number of names, we can be more memory-efficient
- // by making only one certmagic.Config for all the names that
- // use that config, rather than calling ManageAsync once for
- // every name; so first, bin names by AutomationPolicy
- policyToNames := make(map[*AutomationPolicy][]string)
- for _, name := range names {
- ap := t.getAutomationPolicyForName(name)
- policyToNames[ap] = append(policyToNames[ap], name)
- }
-
- // now that names are grouped by policy, we can simply make one
- // certmagic.Config for each (potentially large) group of names
- // and call ManageAsync just once for the whole batch
- for ap, names := range policyToNames {
- err := ap.magic.ManageAsync(t.ctx.Context, names)
- if err != nil {
- return fmt.Errorf("automate: manage %v: %v", names, err)
- }
- }
-
- return nil
-}
-
-// HandleHTTPChallenge ensures that the HTTP challenge is handled for the
-// certificate named by r.Host, if it is an HTTP challenge request. It
-// requires that the automation policy for r.Host has an issuer of type
-// *certmagic.ACMEManager, or one that is ACME-enabled (GetACMEIssuer()).
-func (t *TLS) HandleHTTPChallenge(w http.ResponseWriter, r *http.Request) bool {
- // no-op if it's not an ACME challenge request
- if !certmagic.LooksLikeHTTPChallenge(r) {
- return false
- }
-
- // try all the issuers until we find the one that initiated the challenge
- ap := t.getAutomationPolicyForName(r.Host)
- type acmeCapable interface{ GetACMEIssuer() *ACMEIssuer }
- for _, iss := range ap.magic.Issuers {
- if am, ok := iss.(acmeCapable); ok {
- iss := am.GetACMEIssuer()
- if certmagic.NewACMEManager(iss.magic, iss.template).HandleHTTPChallenge(w, r) {
- return true
- }
- }
- }
-
- // it's possible another server in this process initiated the challenge;
- // users have requested that Caddy only handle HTTP challenges it initiated,
- // so that users can proxy the others through to their backends; but we
- // might not have an automation policy for all identifiers that are trying
- // to get certificates (e.g. the admin endpoint), so we do this manual check
- if challenge, ok := certmagic.GetACMEChallenge(r.Host); ok {
- return certmagic.SolveHTTPChallenge(t.logger, w, r, challenge.Challenge)
- }
-
- return false
-}
-
-// AddAutomationPolicy provisions and adds ap to the list of the app's
-// automation policies. If an existing automation policy exists that has
-// fewer hosts in its list than ap does, ap will be inserted before that
-// other policy (this helps ensure that ap will be prioritized/chosen
-// over, say, a catch-all policy).
-func (t *TLS) AddAutomationPolicy(ap *AutomationPolicy) error {
- if t.Automation == nil {
- t.Automation = new(AutomationConfig)
- }
- err := ap.Provision(t)
- if err != nil {
- return err
- }
- // sort new automation policies just before any other which is a superset
- // of this one; if we find an existing policy that covers every subject in
- // ap but less specifically (e.g. a catch-all policy, or one with wildcards
- // or with fewer subjects), insert ap just before it, otherwise ap would
- // never be used because the first matching policy is more general
- for i, existing := range t.Automation.Policies {
- // first see if existing is superset of ap for all names
- var otherIsSuperset bool
- outer:
- for _, thisSubj := range ap.Subjects {
- for _, otherSubj := range existing.Subjects {
- if certmagic.MatchWildcard(thisSubj, otherSubj) {
- otherIsSuperset = true
- break outer
- }
- }
- }
- // if existing AP is a superset or if it contains fewer names (i.e. is
- // more general), then new AP is more specific, so insert before it
- if otherIsSuperset || len(existing.Subjects) < len(ap.Subjects) {
- t.Automation.Policies = append(t.Automation.Policies[:i],
- append([]*AutomationPolicy{ap}, t.Automation.Policies[i:]...)...)
- return nil
- }
- }
- // otherwise just append the new one
- t.Automation.Policies = append(t.Automation.Policies, ap)
- return nil
-}
-
-func (t *TLS) getConfigForName(name string) *certmagic.Config {
- ap := t.getAutomationPolicyForName(name)
- return ap.magic
-}
-
-// getAutomationPolicyForName returns the first matching automation policy
-// for the given subject name. If no matching policy can be found, the
-// default policy is used, depending on whether the name qualifies for a
-// public certificate or not.
-func (t *TLS) getAutomationPolicyForName(name string) *AutomationPolicy {
- for _, ap := range t.Automation.Policies {
- if len(ap.Subjects) == 0 {
- return ap // no host filter is an automatic match
- }
- for _, h := range ap.Subjects {
- if certmagic.MatchWildcard(name, h) {
- return ap
- }
- }
- }
- if certmagic.SubjectQualifiesForPublicCert(name) || t.Automation.defaultInternalAutomationPolicy == nil {
- return t.Automation.defaultPublicAutomationPolicy
- }
- return t.Automation.defaultInternalAutomationPolicy
-}
-
-// AllMatchingCertificates returns the list of all certificates in
-// the cache which could be used to satisfy the given SAN.
-func (t *TLS) AllMatchingCertificates(san string) []certmagic.Certificate {
- return t.certCache.AllMatchingCertificates(san)
-}
-
-// keepStorageClean starts a goroutine that immediately cleans up all
-// known storage units if it was not recently done, and then runs the
-// operation at every tick from t.storageCleanTicker.
-func (t *TLS) keepStorageClean() {
- t.storageCleanTicker = time.NewTicker(t.storageCleanInterval())
- t.storageCleanStop = make(chan struct{})
- go func() {
- defer func() {
- if err := recover(); err != nil {
- log.Printf("[PANIC] storage cleaner: %v\n%s", err, debug.Stack())
- }
- }()
- t.cleanStorageUnits()
- for {
- select {
- case <-t.storageCleanStop:
- return
- case <-t.storageCleanTicker.C:
- t.cleanStorageUnits()
- }
- }
- }()
-}
-
-func (t *TLS) cleanStorageUnits() {
- storageCleanMu.Lock()
- defer storageCleanMu.Unlock()
-
- // If storage was cleaned recently, don't do it again for now. Although the ticker
- // drops missed ticks for us, config reloads discard the old ticker and replace it
- // with a new one, possibly invoking a cleaning to happen again too soon.
- // (We divide the interval by 2 because the actual cleaning takes non-zero time,
- // and we don't want to skip cleanings if we don't have to; whereas if a cleaning
- // took the entire interval, we'd probably want to skip the next one so we aren't
- // constantly cleaning. This allows cleanings to take up to half the interval's
- // duration before we decide to skip the next one.)
- if !storageClean.IsZero() && time.Since(storageClean) < t.storageCleanInterval()/2 {
- return
- }
-
- // mark when storage cleaning was last initiated
- storageClean = time.Now()
-
- options := certmagic.CleanStorageOptions{
- OCSPStaples: true,
- ExpiredCerts: true,
- ExpiredCertGracePeriod: 24 * time.Hour * 14,
- }
-
- // avoid cleaning same storage more than once per cleaning cycle
- storagesCleaned := make(map[string]struct{})
-
- // start with the default/global storage
- storage := t.ctx.Storage()
- storageStr := fmt.Sprintf("%v", storage)
- t.logger.Info("cleaning storage unit", zap.String("description", storageStr))
- certmagic.CleanStorage(t.ctx, storage, options)
- storagesCleaned[storageStr] = struct{}{}
-
- // then clean each storage defined in ACME automation policies
- if t.Automation != nil {
- for _, ap := range t.Automation.Policies {
- if ap.storage == nil {
- continue
- }
- storageStr := fmt.Sprintf("%v", ap.storage)
- if _, ok := storagesCleaned[storageStr]; ok {
- continue
- }
- t.logger.Info("cleaning storage unit", zap.String("description", storageStr))
- certmagic.CleanStorage(t.ctx, ap.storage, options)
- storagesCleaned[storageStr] = struct{}{}
- }
- }
-
- t.logger.Info("finished cleaning storage units")
-}
-
-func (t *TLS) storageCleanInterval() time.Duration {
- if t.Automation != nil && t.Automation.StorageCleanInterval > 0 {
- return time.Duration(t.Automation.StorageCleanInterval)
- }
- return defaultStorageCleanInterval
-}
-
-// CertificateLoader is a type that can load certificates.
-// Certificates can optionally be associated with tags.
-type CertificateLoader interface {
- LoadCertificates() ([]Certificate, error)
-}
-
-// Certificate is a TLS certificate, optionally
-// associated with arbitrary tags.
-type Certificate struct {
- tls.Certificate
- Tags []string
-}
-
-// AutomateLoader will automatically manage certificates for the names
-// in the list, including obtaining and renewing certificates. Automated
-// certificates are managed according to their matching automation policy,
-// configured elsewhere in this app.
-//
-// This is a no-op certificate loader module that is treated as a special
-// case: it uses this app's automation features to load certificates for the
-// list of hostnames, rather than loading certificates manually.
-type AutomateLoader []string
-
-// CaddyModule returns the Caddy module information.
-func (AutomateLoader) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "tls.certificates.automate",
- New: func() caddy.Module { return new(AutomateLoader) },
- }
-}
-
-// CertCacheOptions configures the certificate cache.
-type CertCacheOptions struct {
- // Maximum number of certificates to allow in the
- // cache. If reached, certificates will be randomly
- // evicted to make room for new ones. Default: 0
- // (no limit).
- Capacity int `json:"capacity,omitempty"`
-}
-
-// Variables related to storage cleaning.
-var (
- defaultStorageCleanInterval = 24 * time.Hour
-
- storageClean time.Time
- storageCleanMu sync.Mutex
-)
-
-// Interface guards
-var (
- _ caddy.App = (*TLS)(nil)
- _ caddy.Provisioner = (*TLS)(nil)
- _ caddy.Validator = (*TLS)(nil)
- _ caddy.CleanerUpper = (*TLS)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/values.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/values.go
deleted file mode 100644
index 4e8c1adc..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/values.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "crypto/tls"
- "crypto/x509"
- "fmt"
-
- "github.com/caddyserver/certmagic"
- "github.com/klauspost/cpuid/v2"
-)
-
-// CipherSuiteNameSupported returns true if name is
-// a supported cipher suite.
-func CipherSuiteNameSupported(name string) bool {
- return CipherSuiteID(name) != 0
-}
-
-// CipherSuiteID returns the ID of the cipher suite associated with
-// the given name, or 0 if the name is not recognized/supported.
-func CipherSuiteID(name string) uint16 {
- for _, cs := range SupportedCipherSuites() {
- if cs.Name == name {
- return cs.ID
- }
- }
- return 0
-}
-
-// SupportedCipherSuites returns a list of all the cipher suites
-// Caddy supports. The list is NOT ordered by security preference.
-func SupportedCipherSuites() []*tls.CipherSuite {
- return tls.CipherSuites()
-}
-
-// defaultCipherSuites is the ordered list of all the cipher
-// suites we want to support by default, assuming AES-NI
-// (hardware acceleration for AES).
-var defaultCipherSuitesWithAESNI = []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
-}
-
-// defaultCipherSuites is the ordered list of all the cipher
-// suites we want to support by default, assuming lack of
-// AES-NI (NO hardware acceleration for AES).
-var defaultCipherSuitesWithoutAESNI = []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
-}
-
-// getOptimalDefaultCipherSuites returns an appropriate cipher
-// suite to use depending on the hardware support for AES.
-//
-// See https://github.com/caddyserver/caddy/issues/1674
-func getOptimalDefaultCipherSuites() []uint16 {
- if cpuid.CPU.Supports(cpuid.AESNI) {
- return defaultCipherSuitesWithAESNI
- }
- return defaultCipherSuitesWithoutAESNI
-}
-
-// SupportedCurves is the unordered map of supported curves.
-// https://golang.org/pkg/crypto/tls/#CurveID
-var SupportedCurves = map[string]tls.CurveID{
- "x25519": tls.X25519,
- "secp256r1": tls.CurveP256,
- "secp384r1": tls.CurveP384,
- "secp521r1": tls.CurveP521,
-}
-
-// supportedCertKeyTypes is all the key types that are supported
-// for certificates that are obtained through ACME.
-var supportedCertKeyTypes = map[string]certmagic.KeyType{
- "rsa2048": certmagic.RSA2048,
- "rsa4096": certmagic.RSA4096,
- "p256": certmagic.P256,
- "p384": certmagic.P384,
- "ed25519": certmagic.ED25519,
-}
-
-// defaultCurves is the list of only the curves we want to use
-// by default, in descending order of preference.
-//
-// This list should only include curves which are fast by design
-// (e.g. X25519) and those for which an optimized assembly
-// implementation exists (e.g. P256). The latter ones can be
-// found here:
-// https://github.com/golang/go/tree/master/src/crypto/elliptic
-var defaultCurves = []tls.CurveID{
- tls.X25519,
- tls.CurveP256,
-}
-
-// SupportedProtocols is a map of supported protocols.
-var SupportedProtocols = map[string]uint16{
- "tls1.2": tls.VersionTLS12,
- "tls1.3": tls.VersionTLS13,
-}
-
-// unsupportedProtocols is a map of unsupported protocols.
-// Used for logging only, not enforcement.
-var unsupportedProtocols = map[string]uint16{
- //nolint:staticcheck
- "ssl3.0": tls.VersionSSL30,
- "tls1.0": tls.VersionTLS10,
- "tls1.1": tls.VersionTLS11,
-}
-
-// publicKeyAlgorithms is the map of supported public key algorithms.
-var publicKeyAlgorithms = map[string]x509.PublicKeyAlgorithm{
- "rsa": x509.RSA,
- "dsa": x509.DSA,
- "ecdsa": x509.ECDSA,
-}
-
-// ProtocolName returns the standard name for the passed protocol version ID
-// (e.g. "TLS1.3") or a fallback representation of the ID value if the version
-// is not supported.
-func ProtocolName(id uint16) string {
- for k, v := range SupportedProtocols {
- if v == id {
- return k
- }
- }
-
- for k, v := range unsupportedProtocols {
- if v == id {
- return k
- }
- }
-
- return fmt.Sprintf("0x%04x", id)
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/zerosslissuer.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/zerosslissuer.go
deleted file mode 100644
index a8830a0b..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/zerosslissuer.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddytls
-
-import (
- "context"
- "crypto/x509"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "strings"
- "sync"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/caddyserver/certmagic"
- "github.com/mholt/acmez/acme"
- "go.uber.org/zap"
-)
-
-func init() {
- caddy.RegisterModule(new(ZeroSSLIssuer))
-}
-
-// ZeroSSLIssuer makes an ACME manager
-// for managing certificates using ACME.
-type ZeroSSLIssuer struct {
- *ACMEIssuer
-
- // The API key (or "access key") for using the ZeroSSL API.
- APIKey string `json:"api_key,omitempty"`
-
- mu sync.Mutex
- logger *zap.Logger
-}
-
-// CaddyModule returns the Caddy module information.
-func (*ZeroSSLIssuer) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "tls.issuance.zerossl",
- New: func() caddy.Module { return new(ZeroSSLIssuer) },
- }
-}
-
-// Provision sets up iss.
-func (iss *ZeroSSLIssuer) Provision(ctx caddy.Context) error {
- iss.logger = ctx.Logger(iss)
- if iss.ACMEIssuer == nil {
- iss.ACMEIssuer = new(ACMEIssuer)
- }
- if iss.ACMEIssuer.CA == "" {
- iss.ACMEIssuer.CA = certmagic.ZeroSSLProductionCA
- }
- return iss.ACMEIssuer.Provision(ctx)
-}
-
-// newAccountCallback generates EAB if not already provided. It also sets a valid default contact on the account if not set.
-func (iss *ZeroSSLIssuer) newAccountCallback(ctx context.Context, am *certmagic.ACMEManager, acct acme.Account) (acme.Account, error) {
- if am.ExternalAccount != nil {
- return acct, nil
- }
- var err error
- am.ExternalAccount, acct, err = iss.generateEABCredentials(ctx, acct)
- return acct, err
-}
-
-// generateEABCredentials generates EAB credentials using the API key if provided,
-// otherwise using the primary contact email on the issuer. If an email is not set
-// on the issuer, a default generic email is used.
-func (iss *ZeroSSLIssuer) generateEABCredentials(ctx context.Context, acct acme.Account) (*acme.EAB, acme.Account, error) {
- var endpoint string
- var body io.Reader
-
- // there are two ways to generate EAB credentials: authenticated with
- // their API key, or unauthenticated with their email address
- if iss.APIKey != "" {
- apiKey := caddy.NewReplacer().ReplaceAll(iss.APIKey, "")
- if apiKey == "" {
- return nil, acct, fmt.Errorf("missing API key: '%v'", iss.APIKey)
- }
- qs := url.Values{"access_key": []string{apiKey}}
- endpoint = fmt.Sprintf("%s/eab-credentials?%s", zerosslAPIBase, qs.Encode())
- } else {
- email := iss.Email
- if email == "" {
- iss.logger.Warn("missing email address for ZeroSSL; it is strongly recommended to set one for next time")
- email = "caddy@zerossl.com" // special email address that preserves backwards-compat, but which black-holes dashboard features, oh well
- }
- if len(acct.Contact) == 0 {
- // we borrow the email from config or the default email, so ensure it's saved with the account
- acct.Contact = []string{"mailto:" + email}
- }
- endpoint = zerosslAPIBase + "/eab-credentials-email"
- form := url.Values{"email": []string{email}}
- body = strings.NewReader(form.Encode())
- }
-
- req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, body)
- if err != nil {
- return nil, acct, fmt.Errorf("forming request: %v", err)
- }
- if body != nil {
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- }
- req.Header.Set("User-Agent", certmagic.UserAgent)
-
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return nil, acct, fmt.Errorf("performing EAB credentials request: %v", err)
- }
- defer resp.Body.Close()
-
- var result struct {
- Success bool `json:"success"`
- Error struct {
- Code int `json:"code"`
- Type string `json:"type"`
- } `json:"error"`
- EABKID string `json:"eab_kid"`
- EABHMACKey string `json:"eab_hmac_key"`
- }
- err = json.NewDecoder(resp.Body).Decode(&result)
- if err != nil {
- return nil, acct, fmt.Errorf("decoding API response: %v", err)
- }
- if result.Error.Code != 0 {
- return nil, acct, fmt.Errorf("failed getting EAB credentials: HTTP %d: %s (code %d)",
- resp.StatusCode, result.Error.Type, result.Error.Code)
- }
- if resp.StatusCode != http.StatusOK {
- return nil, acct, fmt.Errorf("failed getting EAB credentials: HTTP %d", resp.StatusCode)
- }
-
- iss.logger.Info("generated EAB credentials", zap.String("key_id", result.EABKID))
-
- return &acme.EAB{
- KeyID: result.EABKID,
- MACKey: result.EABHMACKey,
- }, acct, nil
-}
-
-// initialize modifies the template for the underlying ACMEManager
-// values by setting the CA endpoint to the ZeroSSL directory and
-// setting the NewAccountFunc callback to one which allows us to
-// generate EAB credentials only if a new account is being made.
-// Since it modifies the stored template, its effect should only
-// be needed once, but it is fine to call it repeatedly.
-func (iss *ZeroSSLIssuer) initialize() {
- iss.mu.Lock()
- defer iss.mu.Unlock()
- if iss.template.NewAccountFunc == nil {
- iss.template.NewAccountFunc = iss.newAccountCallback
- }
-}
-
-// PreCheck implements the certmagic.PreChecker interface.
-func (iss *ZeroSSLIssuer) PreCheck(ctx context.Context, names []string, interactive bool) error {
- iss.initialize()
- return iss.ACMEIssuer.PreCheck(ctx, names, interactive)
-}
-
-// Issue obtains a certificate for the given csr.
-func (iss *ZeroSSLIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) {
- iss.initialize()
- return iss.ACMEIssuer.Issue(ctx, csr)
-}
-
-// IssuerKey returns the unique issuer key for the configured CA endpoint.
-func (iss *ZeroSSLIssuer) IssuerKey() string {
- iss.initialize()
- return iss.ACMEIssuer.IssuerKey()
-}
-
-// Revoke revokes the given certificate.
-func (iss *ZeroSSLIssuer) Revoke(ctx context.Context, cert certmagic.CertificateResource, reason int) error {
- iss.initialize()
- return iss.ACMEIssuer.Revoke(ctx, cert, reason)
-}
-
-// UnmarshalCaddyfile deserializes Caddyfile tokens into iss.
-//
-// ... zerossl [] {
-// ...
-// }
-//
-// Any of the subdirectives for the ACME issuer can be used in the block.
-func (iss *ZeroSSLIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- if d.NextArg() {
- iss.APIKey = d.Val()
- if d.NextArg() {
- return d.ArgErr()
- }
- }
-
- if iss.ACMEIssuer == nil {
- iss.ACMEIssuer = new(ACMEIssuer)
- }
- err := iss.ACMEIssuer.UnmarshalCaddyfile(d.NewFromNextSegment())
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-const zerosslAPIBase = "https://api.zerossl.com/acme"
-
-// Interface guards
-var (
- _ certmagic.PreChecker = (*ZeroSSLIssuer)(nil)
- _ certmagic.Issuer = (*ZeroSSLIssuer)(nil)
- _ certmagic.Revoker = (*ZeroSSLIssuer)(nil)
- _ caddy.Provisioner = (*ZeroSSLIssuer)(nil)
- _ ConfigSetter = (*ZeroSSLIssuer)(nil)
-
- // a type which properly embeds an ACMEIssuer should implement
- // this interface so it can be treated as an ACMEIssuer
- _ interface{ GetACMEIssuer() *ACMEIssuer } = (*ZeroSSLIssuer)(nil)
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/notify/notify.go b/vendor/github.com/caddyserver/caddy/v2/notify/notify.go
deleted file mode 100644
index bca80c1f..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/notify/notify.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package notify
-
-// NotifyReadiness notifies process manager of readiness.
-func NotifyReadiness() error {
- return notifyReadiness()
-}
-
-// NotifyReloading notifies process manager of reloading.
-func NotifyReloading() error {
- return notifyReloading()
-}
-
-// NotifyStopping notifies process manager of stopping.
-func NotifyStopping() error {
- return notifyStopping()
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/notify/notify_linux.go b/vendor/github.com/caddyserver/caddy/v2/notify/notify_linux.go
deleted file mode 100644
index 8ba49d2c..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/notify/notify_linux.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package notify
-
-import (
- "io"
- "net"
- "os"
- "strings"
-)
-
-// The documentation about this IPC protocol is available here:
-// https://www.freedesktop.org/software/systemd/man/sd_notify.html
-
-func sdNotify(path, payload string) error {
- socketAddr := &net.UnixAddr{
- Name: path,
- Net: "unixgram",
- }
-
- conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
- if err != nil {
- return err
- }
- defer conn.Close()
-
- if _, err := io.Copy(conn, strings.NewReader(payload)); err != nil {
- return err
- }
- return nil
-}
-
-// notifyReadiness notifies systemd that caddy has finished its
-// initialization routines.
-func notifyReadiness() error {
- val, ok := os.LookupEnv("NOTIFY_SOCKET")
- if !ok || val == "" {
- return nil
- }
- if err := sdNotify(val, "READY=1"); err != nil {
- return err
- }
- return nil
-}
-
-// notifyReloading notifies systemd that caddy is reloading its config.
-func notifyReloading() error {
- val, ok := os.LookupEnv("NOTIFY_SOCKET")
- if !ok || val == "" {
- return nil
- }
- if err := sdNotify(val, "RELOADING=1"); err != nil {
- return err
- }
- return nil
-}
-
-// notifyStopping notifies systemd that caddy is stopping.
-func notifyStopping() error {
- val, ok := os.LookupEnv("NOTIFY_SOCKET")
- if !ok || val == "" {
- return nil
- }
- if err := sdNotify(val, "STOPPING=1"); err != nil {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/notify/notify_other.go b/vendor/github.com/caddyserver/caddy/v2/notify/notify_other.go
deleted file mode 100644
index 17f62bab..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/notify/notify_other.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !linux
-
-package notify
-
-func notifyReadiness() error {
- return nil
-}
-
-func notifyReloading() error {
- return nil
-}
-
-func notifyStopping() error {
- return nil
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/replacer.go b/vendor/github.com/caddyserver/caddy/v2/replacer.go
deleted file mode 100644
index bffc4244..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/replacer.go
+++ /dev/null
@@ -1,328 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddy
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "runtime"
- "strconv"
- "strings"
- "time"
-)
-
-// NewReplacer returns a new Replacer.
-func NewReplacer() *Replacer {
- rep := &Replacer{
- static: make(map[string]interface{}),
- }
- rep.providers = []ReplacerFunc{
- globalDefaultReplacements,
- rep.fromStatic,
- }
- return rep
-}
-
-// NewEmptyReplacer returns a new Replacer,
-// without the global default replacements.
-func NewEmptyReplacer() *Replacer {
- rep := &Replacer{
- static: make(map[string]interface{}),
- }
- rep.providers = []ReplacerFunc{
- rep.fromStatic,
- }
- return rep
-}
-
-// Replacer can replace values in strings.
-// A default/empty Replacer is not valid;
-// use NewReplacer to make one.
-type Replacer struct {
- providers []ReplacerFunc
- static map[string]interface{}
-}
-
-// Map adds mapFunc to the list of value providers.
-// mapFunc will be executed only at replace-time.
-func (r *Replacer) Map(mapFunc ReplacerFunc) {
- r.providers = append(r.providers, mapFunc)
-}
-
-// Set sets a custom variable to a static value.
-func (r *Replacer) Set(variable string, value interface{}) {
- r.static[variable] = value
-}
-
-// Get gets a value from the replacer. It returns
-// the value and whether the variable was known.
-func (r *Replacer) Get(variable string) (interface{}, bool) {
- for _, mapFunc := range r.providers {
- if val, ok := mapFunc(variable); ok {
- return val, true
- }
- }
- return nil, false
-}
-
-// GetString is the same as Get, but coerces the value to a
-// string representation.
-func (r *Replacer) GetString(variable string) (string, bool) {
- s, found := r.Get(variable)
- return toString(s), found
-}
-
-// Delete removes a variable with a static value
-// that was created using Set.
-func (r *Replacer) Delete(variable string) {
- delete(r.static, variable)
-}
-
-// fromStatic provides values from r.static.
-func (r *Replacer) fromStatic(key string) (interface{}, bool) {
- val, ok := r.static[key]
- return val, ok
-}
-
-// ReplaceOrErr is like ReplaceAll, but any placeholders
-// that are empty or not recognized will cause an error to
-// be returned.
-func (r *Replacer) ReplaceOrErr(input string, errOnEmpty, errOnUnknown bool) (string, error) {
- return r.replace(input, "", false, errOnEmpty, errOnUnknown, nil)
-}
-
-// ReplaceKnown is like ReplaceAll but only replaces
-// placeholders that are known (recognized). Unrecognized
-// placeholders will remain in the output.
-func (r *Replacer) ReplaceKnown(input, empty string) string {
- out, _ := r.replace(input, empty, false, false, false, nil)
- return out
-}
-
-// ReplaceAll efficiently replaces placeholders in input with
-// their values. All placeholders are replaced in the output
-// whether they are recognized or not. Values that are empty
-// string will be substituted with empty.
-func (r *Replacer) ReplaceAll(input, empty string) string {
- out, _ := r.replace(input, empty, true, false, false, nil)
- return out
-}
-
-// ReplaceFunc is the same as ReplaceAll, but calls f for every
-// replacement to be made, in case f wants to change or inspect
-// the replacement.
-func (r *Replacer) ReplaceFunc(input string, f ReplacementFunc) (string, error) {
- return r.replace(input, "", true, false, false, f)
-}
-
-func (r *Replacer) replace(input, empty string,
- treatUnknownAsEmpty, errOnEmpty, errOnUnknown bool,
- f ReplacementFunc) (string, error) {
- if !strings.Contains(input, string(phOpen)) {
- return input, nil
- }
-
- var sb strings.Builder
-
- // it is reasonable to assume that the output
- // will be approximately as long as the input
- sb.Grow(len(input))
-
- // iterate the input to find each placeholder
- var lastWriteCursor int
-
-scan:
- for i := 0; i < len(input); i++ {
-
- // check for escaped braces
- if i > 0 && input[i-1] == phEscape && (input[i] == phClose || input[i] == phOpen) {
- sb.WriteString(input[lastWriteCursor : i-1])
- lastWriteCursor = i
- continue
- }
-
- if input[i] != phOpen {
- continue
- }
-
- // find the end of the placeholder
- end := strings.Index(input[i:], string(phClose)) + i
- if end < i {
- continue
- }
-
- // if necessary look for the first closing brace that is not escaped
- for end > 0 && end < len(input)-1 && input[end-1] == phEscape {
- nextEnd := strings.Index(input[end+1:], string(phClose))
- if nextEnd < 0 {
- continue scan
- }
- end += nextEnd + 1
- }
-
- // write the substring from the last cursor to this point
- sb.WriteString(input[lastWriteCursor:i])
-
- // trim opening bracket
- key := input[i+1 : end]
-
- // try to get a value for this key, handle empty values accordingly
- val, found := r.Get(key)
- if !found {
- // placeholder is unknown (unrecognized); handle accordingly
- if errOnUnknown {
- return "", fmt.Errorf("unrecognized placeholder %s%s%s",
- string(phOpen), key, string(phClose))
- } else if !treatUnknownAsEmpty {
- // if treatUnknownAsEmpty is true, we'll handle an empty
- // val later; so only continue otherwise
- lastWriteCursor = i
- continue
- }
- }
-
- // apply any transformations
- if f != nil {
- var err error
- val, err = f(key, val)
- if err != nil {
- return "", err
- }
- }
-
- // convert val to a string as efficiently as possible
- valStr := toString(val)
-
- // write the value; if it's empty, either return
- // an error or write a default value
- if valStr == "" {
- if errOnEmpty {
- return "", fmt.Errorf("evaluated placeholder %s%s%s is empty",
- string(phOpen), key, string(phClose))
- } else if empty != "" {
- sb.WriteString(empty)
- }
- } else {
- sb.WriteString(valStr)
- }
-
- // advance cursor to end of placeholder
- i = end
- lastWriteCursor = i + 1
- }
-
- // flush any unwritten remainder
- sb.WriteString(input[lastWriteCursor:])
-
- return sb.String(), nil
-}
-
-func toString(val interface{}) string {
- switch v := val.(type) {
- case nil:
- return ""
- case string:
- return v
- case fmt.Stringer:
- return v.String()
- case byte:
- return string(v)
- case []byte:
- return string(v)
- case []rune:
- return string(v)
- case int:
- return strconv.Itoa(v)
- case int32:
- return strconv.Itoa(int(v))
- case int64:
- return strconv.Itoa(int(v))
- case uint:
- return strconv.Itoa(int(v))
- case uint32:
- return strconv.Itoa(int(v))
- case uint64:
- return strconv.Itoa(int(v))
- case float32:
- return strconv.FormatFloat(float64(v), 'f', -1, 32)
- case float64:
- return strconv.FormatFloat(v, 'f', -1, 64)
- case bool:
- if v {
- return "true"
- }
- return "false"
- default:
- return fmt.Sprintf("%+v", v)
- }
-}
-
-// ReplacerFunc is a function that returns a replacement
-// for the given key along with true if the function is able
-// to service that key (even if the value is blank). If the
-// function does not recognize the key, false should be
-// returned.
-type ReplacerFunc func(key string) (interface{}, bool)
-
-func globalDefaultReplacements(key string) (interface{}, bool) {
- // check environment variable
- const envPrefix = "env."
- if strings.HasPrefix(key, envPrefix) {
- return os.Getenv(key[len(envPrefix):]), true
- }
-
- switch key {
- case "system.hostname":
- // OK if there is an error; just return empty string
- name, _ := os.Hostname()
- return name, true
- case "system.slash":
- return string(filepath.Separator), true
- case "system.os":
- return runtime.GOOS, true
- case "system.arch":
- return runtime.GOARCH, true
- case "time.now":
- return nowFunc(), true
- case "time.now.common_log":
- return nowFunc().Format("02/Jan/2006:15:04:05 -0700"), true
- case "time.now.year":
- return strconv.Itoa(nowFunc().Year()), true
- case "time.now.unix":
- return strconv.FormatInt(nowFunc().Unix(), 10), true
- case "time.now.unix_ms":
- return strconv.FormatInt(nowFunc().UnixNano()/int64(time.Millisecond), 10), true
- }
-
- return nil, false
-}
-
-// ReplacementFunc is a function that is called when a
-// replacement is being performed. It receives the
-// variable (i.e. placeholder name) and the value that
-// will be the replacement, and returns the value that
-// will actually be the replacement, or an error. Note
-// that errors are sometimes ignored by replacers.
-type ReplacementFunc func(variable string, val interface{}) (interface{}, error)
-
-// nowFunc is a variable so tests can change it
-// in order to obtain a deterministic time.
-var nowFunc = time.Now
-
-// ReplacerCtxKey is the context key for a replacer.
-const ReplacerCtxKey CtxKey = "replacer"
-
-const phOpen, phClose, phEscape = '{', '}', '\\'
diff --git a/vendor/github.com/caddyserver/caddy/v2/replacer_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/replacer_fuzz.go
deleted file mode 100644
index 2c27f01a..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/replacer_fuzz.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build gofuzz
-
-package caddy
-
-func FuzzReplacer(data []byte) (score int) {
- NewReplacer().ReplaceAll(string(data), "")
- NewReplacer().ReplaceAll(NewReplacer().ReplaceAll(string(data), ""), "")
- NewReplacer().ReplaceAll(NewReplacer().ReplaceAll(string(data), ""), NewReplacer().ReplaceAll(string(data), ""))
- NewReplacer().ReplaceAll(string(data[:len(data)/2]), string(data[len(data)/2:]))
- return 0
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/sigtrap.go b/vendor/github.com/caddyserver/caddy/v2/sigtrap.go
deleted file mode 100644
index 0fce6d0d..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/sigtrap.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddy
-
-import (
- "os"
- "os/signal"
-
- "go.uber.org/zap"
-)
-
-// TrapSignals create signal/interrupt handlers as best it can for the
-// current OS. This is a rather invasive function to call in a Go program
-// that captures signals already, so in that case it would be better to
-// implement these handlers yourself.
-func TrapSignals() {
- trapSignalsCrossPlatform()
- trapSignalsPosix()
-}
-
-// trapSignalsCrossPlatform captures SIGINT or interrupt (depending
-// on the OS), which initiates a graceful shutdown. A second SIGINT
-// or interrupt will forcefully exit the process immediately.
-func trapSignalsCrossPlatform() {
- go func() {
- shutdown := make(chan os.Signal, 1)
- signal.Notify(shutdown, os.Interrupt)
-
- for i := 0; true; i++ {
- <-shutdown
-
- if i > 0 {
- Log().Warn("force quit", zap.String("signal", "SIGINT"))
- os.Exit(ExitCodeForceQuit)
- }
-
- Log().Info("shutting down", zap.String("signal", "SIGINT"))
- go exitProcessFromSignal("SIGINT")
- }
- }()
-}
-
-// exitProcessFromSignal exits the process from a system signal.
-func exitProcessFromSignal(sigName string) {
- logger := Log().With(zap.String("signal", sigName))
- exitProcess(logger)
-}
-
-// Exit codes. Generally, you should NOT
-// automatically restart the process if the
-// exit code is ExitCodeFailedStartup (1).
-const (
- ExitCodeSuccess = iota
- ExitCodeFailedStartup
- ExitCodeForceQuit
- ExitCodeFailedQuit
-)
diff --git a/vendor/github.com/caddyserver/caddy/v2/sigtrap_nonposix.go b/vendor/github.com/caddyserver/caddy/v2/sigtrap_nonposix.go
deleted file mode 100644
index 3b4595a6..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/sigtrap_nonposix.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build windows plan9 nacl js
-
-package caddy
-
-func trapSignalsPosix() {}
diff --git a/vendor/github.com/caddyserver/caddy/v2/sigtrap_posix.go b/vendor/github.com/caddyserver/caddy/v2/sigtrap_posix.go
deleted file mode 100644
index d5a03a94..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/sigtrap_posix.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows,!plan9,!nacl,!js
-
-package caddy
-
-import (
- "os"
- "os/signal"
- "syscall"
-
- "github.com/caddyserver/certmagic"
- "go.uber.org/zap"
-)
-
-// trapSignalsPosix captures POSIX-only signals.
-func trapSignalsPosix() {
- go func() {
- sigchan := make(chan os.Signal, 1)
- signal.Notify(sigchan, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGUSR1, syscall.SIGUSR2)
-
- for sig := range sigchan {
- switch sig {
- case syscall.SIGQUIT:
- Log().Info("quitting process immediately", zap.String("signal", "SIGQUIT"))
- certmagic.CleanUpOwnLocks(Log()) // try to clean up locks anyway, it's important
- os.Exit(ExitCodeForceQuit)
-
- case syscall.SIGTERM:
- Log().Info("shutting down apps, then terminating", zap.String("signal", "SIGTERM"))
- exitProcessFromSignal("SIGTERM")
-
- case syscall.SIGUSR1:
- Log().Info("not implemented", zap.String("signal", "SIGUSR1"))
-
- case syscall.SIGUSR2:
- Log().Info("not implemented", zap.String("signal", "SIGUSR2"))
-
- case syscall.SIGHUP:
- // ignore; this signal is sometimes sent outside of the user's control
- Log().Info("not implemented", zap.String("signal", "SIGHUP"))
- }
- }
- }()
-}
diff --git a/vendor/github.com/caddyserver/caddy/v2/storage.go b/vendor/github.com/caddyserver/caddy/v2/storage.go
deleted file mode 100644
index 62f9b1c6..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/storage.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddy
-
-import (
- "os"
- "path/filepath"
- "runtime"
-
- "github.com/caddyserver/certmagic"
- "go.uber.org/zap"
-)
-
-// StorageConverter is a type that can convert itself
-// to a valid, usable certmagic.Storage value. (The
-// value might be short-lived.) This interface allows
-// us to adapt any CertMagic storage implementation
-// into a consistent API for Caddy configuration.
-type StorageConverter interface {
- CertMagicStorage() (certmagic.Storage, error)
-}
-
-// HomeDir returns the best guess of the current user's home
-// directory from environment variables. If unknown, "." (the
-// current directory) is returned instead, except GOOS=android,
-// which returns "/sdcard".
-func HomeDir() string {
- home := homeDirUnsafe()
- if home == "" && runtime.GOOS == "android" {
- home = "/sdcard"
- }
- if home == "" {
- home = "."
- }
- return home
-}
-
-// homeDirUnsafe is a low-level function that returns
-// the user's home directory from environment
-// variables. Careful: if it cannot be determined, an
-// empty string is returned. If not accounting for
-// that case, use HomeDir() instead; otherwise you
-// may end up using the root of the file system.
-func homeDirUnsafe() string {
- home := os.Getenv("HOME")
- if home == "" && runtime.GOOS == "windows" {
- drive := os.Getenv("HOMEDRIVE")
- path := os.Getenv("HOMEPATH")
- home = drive + path
- if drive == "" || path == "" {
- home = os.Getenv("USERPROFILE")
- }
- }
- if home == "" && runtime.GOOS == "plan9" {
- home = os.Getenv("home")
- }
- return home
-}
-
-// AppConfigDir returns the directory where to store user's config.
-//
-// If XDG_CONFIG_HOME is set, it returns: $XDG_CONFIG_HOME/caddy.
-// Otherwise, os.UserConfigDir() is used; if successful, it appends
-// "Caddy" (Windows & Mac) or "caddy" (every other OS) to the path.
-// If it returns an error, the fallback path "./caddy" is returned.
-//
-// The config directory is not guaranteed to be different from
-// AppDataDir().
-//
-// Unlike os.UserConfigDir(), this function prefers the
-// XDG_CONFIG_HOME env var on all platforms, not just Unix.
-//
-// Ref: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
-func AppConfigDir() string {
- if basedir := os.Getenv("XDG_CONFIG_HOME"); basedir != "" {
- return filepath.Join(basedir, "caddy")
- }
- basedir, err := os.UserConfigDir()
- if err != nil {
- Log().Warn("unable to determine directory for user configuration; falling back to current directory", zap.Error(err))
- return "./caddy"
- }
- subdir := "caddy"
- switch runtime.GOOS {
- case "windows", "darwin":
- subdir = "Caddy"
- }
- return filepath.Join(basedir, subdir)
-}
-
-// AppDataDir returns a directory path that is suitable for storing
-// application data on disk. It uses the environment for finding the
-// best place to store data, and appends a "caddy" or "Caddy" (depending
-// on OS and environment) subdirectory.
-//
-// For a base directory path:
-// If XDG_DATA_HOME is set, it returns: $XDG_DATA_HOME/caddy; otherwise,
-// on Windows it returns: %AppData%/Caddy,
-// on Mac: $HOME/Library/Application Support/Caddy,
-// on Plan9: $home/lib/caddy,
-// on Android: $HOME/caddy,
-// and on everything else: $HOME/.local/share/caddy.
-//
-// If a data directory cannot be determined, it returns "./caddy"
-// (this is not ideal, and the environment should be fixed).
-//
-// The data directory is not guaranteed to be different from AppConfigDir().
-//
-// Ref: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
-func AppDataDir() string {
- if basedir := os.Getenv("XDG_DATA_HOME"); basedir != "" {
- return filepath.Join(basedir, "caddy")
- }
- switch runtime.GOOS {
- case "windows":
- appData := os.Getenv("AppData")
- if appData != "" {
- return filepath.Join(appData, "Caddy")
- }
- case "darwin":
- home := homeDirUnsafe()
- if home != "" {
- return filepath.Join(home, "Library", "Application Support", "Caddy")
- }
- case "plan9":
- home := homeDirUnsafe()
- if home != "" {
- return filepath.Join(home, "lib", "caddy")
- }
- case "android":
- home := homeDirUnsafe()
- if home != "" {
- return filepath.Join(home, "caddy")
- }
- default:
- home := homeDirUnsafe()
- if home != "" {
- return filepath.Join(home, ".local", "share", "caddy")
- }
- }
- return "./caddy"
-}
-
-// ConfigAutosavePath is the default path to which the last config will be persisted.
-var ConfigAutosavePath = filepath.Join(AppConfigDir(), "autosave.json")
-
-// DefaultStorage is Caddy's default storage module.
-var DefaultStorage = &certmagic.FileStorage{Path: AppDataDir()}
diff --git a/vendor/github.com/caddyserver/caddy/v2/usagepool.go b/vendor/github.com/caddyserver/caddy/v2/usagepool.go
deleted file mode 100644
index 6fd48f5b..00000000
--- a/vendor/github.com/caddyserver/caddy/v2/usagepool.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package caddy
-
-import (
- "fmt"
- "sync"
- "sync/atomic"
-)
-
-// UsagePool is a thread-safe map that pools values
-// based on usage (reference counting). Values are
-// only inserted if they do not already exist. There
-// are two ways to add values to the pool:
-//
-// 1) LoadOrStore will increment usage and store the
-// value immediately if it does not already exist.
-// 2) LoadOrNew will atomically check for existence
-// and construct the value immediately if it does
-// not already exist, or increment the usage
-// otherwise, then store that value in the pool.
-// When the constructed value is finally deleted
-// from the pool (when its usage reaches 0), it
-// will be cleaned up by calling Destruct().
-//
-// The use of LoadOrNew allows values to be created
-// and reused and finally cleaned up only once, even
-// though they may have many references throughout
-// their lifespan. This is helpful, for example, when
-// sharing thread-safe io.Writers that you only want
-// to open and close once.
-//
-// There is no way to overwrite existing keys in the
-// pool without first deleting it as many times as it
-// was stored. Deleting too many times will panic.
-//
-// The implementation does not use a sync.Pool because
-// UsagePool needs additional atomicity to run the
-// constructor functions when creating a new value when
-// LoadOrNew is used. (We could probably use sync.Pool
-// but we'd still have to layer our own additional locks
-// on top.)
-//
-// An empty UsagePool is NOT safe to use; always call
-// NewUsagePool() to make a new one.
-type UsagePool struct {
- sync.RWMutex
- pool map[interface{}]*usagePoolVal
-}
-
-// NewUsagePool returns a new usage pool that is ready to use.
-func NewUsagePool() *UsagePool {
- return &UsagePool{
- pool: make(map[interface{}]*usagePoolVal),
- }
-}
-
-// LoadOrNew loads the value associated with key from the pool if it
-// already exists. If the key doesn't exist, it will call construct
-// to create a new value and then stores that in the pool. An error
-// is only returned if the constructor returns an error. The loaded
-// or constructed value is returned. The loaded return value is true
-// if the value already existed and was loaded, or false if it was
-// newly constructed.
-func (up *UsagePool) LoadOrNew(key interface{}, construct Constructor) (value interface{}, loaded bool, err error) {
- var upv *usagePoolVal
- up.Lock()
- upv, loaded = up.pool[key]
- if loaded {
- atomic.AddInt32(&upv.refs, 1)
- up.Unlock()
- upv.RLock()
- value = upv.value
- err = upv.err
- upv.RUnlock()
- } else {
- upv = &usagePoolVal{refs: 1}
- upv.Lock()
- up.pool[key] = upv
- up.Unlock()
- value, err = construct()
- if err == nil {
- upv.value = value
- } else {
- // TODO: remove error'ed entries from map
- upv.err = err
- }
- upv.Unlock()
- }
- return
-}
-
-// LoadOrStore loads the value associated with key from the pool if it
-// already exists, or stores it if it does not exist. It returns the
-// value that was either loaded or stored, and true if the value already
-// existed and was
-func (up *UsagePool) LoadOrStore(key, val interface{}) (value interface{}, loaded bool) {
- var upv *usagePoolVal
- up.Lock()
- upv, loaded = up.pool[key]
- if loaded {
- atomic.AddInt32(&upv.refs, 1)
- up.Unlock()
- upv.Lock()
- if upv.err == nil {
- value = upv.value
- } else {
- upv.value = val
- upv.err = nil
- }
- upv.Unlock()
- } else {
- upv = &usagePoolVal{refs: 1, value: val}
- up.pool[key] = upv
- up.Unlock()
- value = val
- }
- return
-}
-
-// Range iterates the pool similarly to how sync.Map.Range() does:
-// it calls f for every key in the pool, and if f returns false,
-// iteration is stopped. Ranging does not affect usage counts.
-//
-// This method is somewhat naive and acquires a read lock on the
-// entire pool during iteration, so do your best to make f() really
-// fast, m'kay?
-func (up *UsagePool) Range(f func(key, value interface{}) bool) {
- up.RLock()
- defer up.RUnlock()
- for key, upv := range up.pool {
- upv.RLock()
- if upv.err != nil {
- upv.RUnlock()
- continue
- }
- val := upv.value
- upv.RUnlock()
- if !f(key, val) {
- break
- }
- }
-}
-
-// Delete decrements the usage count for key and removes the
-// value from the underlying map if the usage is 0. It returns
-// true if the usage count reached 0 and the value was deleted.
-// It panics if the usage count drops below 0; always call
-// Delete precisely as many times as LoadOrStore.
-func (up *UsagePool) Delete(key interface{}) (deleted bool, err error) {
- up.Lock()
- upv, ok := up.pool[key]
- if !ok {
- up.Unlock()
- return false, nil
- }
- refs := atomic.AddInt32(&upv.refs, -1)
- if refs == 0 {
- delete(up.pool, key)
- up.Unlock()
- upv.RLock()
- val := upv.value
- upv.RUnlock()
- if destructor, ok := val.(Destructor); ok {
- err = destructor.Destruct()
- }
- deleted = true
- } else {
- up.Unlock()
- if refs < 0 {
- panic(fmt.Sprintf("deleted more than stored: %#v (usage: %d)",
- upv.value, upv.refs))
- }
- }
- return
-}
-
-// Constructor is a function that returns a new value
-// that can destruct itself when it is no longer needed.
-type Constructor func() (Destructor, error)
-
-// Destructor is a value that can clean itself up when
-// it is deallocated.
-type Destructor interface {
- Destruct() error
-}
-
-type usagePoolVal struct {
- refs int32 // accessed atomically; must be 64-bit aligned for 32-bit systems
- value interface{}
- err error
- sync.RWMutex
-}
diff --git a/vendor/github.com/caddyserver/certmagic/.gitignore b/vendor/github.com/caddyserver/certmagic/.gitignore
deleted file mode 100644
index fbd281d1..00000000
--- a/vendor/github.com/caddyserver/certmagic/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-_gitignore/
diff --git a/vendor/github.com/caddyserver/certmagic/LICENSE.txt b/vendor/github.com/caddyserver/certmagic/LICENSE.txt
deleted file mode 100644
index 8dada3ed..00000000
--- a/vendor/github.com/caddyserver/certmagic/LICENSE.txt
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/caddyserver/certmagic/README.md b/vendor/github.com/caddyserver/certmagic/README.md
deleted file mode 100644
index 015faf84..00000000
--- a/vendor/github.com/caddyserver/certmagic/README.md
+++ /dev/null
@@ -1,522 +0,0 @@
-
-Easy and Powerful TLS Automation
-The same library used by the Caddy Web Server
-
-
-
-Caddy's [automagic TLS features](https://caddyserver.com/docs/automatic-https)—now for your own Go programs—in one powerful and easy-to-use library!
-
-CertMagic is the most mature, robust, and powerful ACME client integration for Go... and perhaps ever.
-
-With CertMagic, you can add one line to your Go application to serve securely over TLS, without ever having to touch certificates.
-
-Instead of:
-
-```go
-// plaintext HTTP, gross 🤢
-http.ListenAndServe(":80", mux)
-```
-
-Use CertMagic:
-
-```go
-// encrypted HTTPS with HTTP->HTTPS redirects - yay! 🔒ðŸ˜
-certmagic.HTTPS([]string{"example.com"}, mux)
-```
-
-That line of code will serve your HTTP router `mux` over HTTPS, complete with HTTP->HTTPS redirects. It obtains and renews the TLS certificates. It staples OCSP responses for greater privacy and security. As long as your domain name points to your server, CertMagic will keep its connections secure.
-
-Compared to other ACME client libraries for Go, only CertMagic supports the full suite of ACME features, and no other library matches CertMagic's maturity and reliability.
-
-
-
-
-CertMagic - Automatic HTTPS using Let's Encrypt
-===============================================
-
-## Menu
-
-- [Features](#features)
-- [Requirements](#requirements)
-- [Installation](#installation)
-- [Usage](#usage)
- - [Package Overview](#package-overview)
- - [Certificate authority](#certificate-authority)
- - [The `Config` type](#the-config-type)
- - [Defaults](#defaults)
- - [Providing an email address](#providing-an-email-address)
- - [Rate limiting](#rate-limiting)
- - [Development and testing](#development-and-testing)
- - [Examples](#examples)
- - [Serving HTTP handlers with HTTPS](#serving-http-handlers-with-https)
- - [Starting a TLS listener](#starting-a-tls-listener)
- - [Getting a tls.Config](#getting-a-tlsconfig)
- - [Advanced use](#advanced-use)
- - [Wildcard Certificates](#wildcard-certificates)
- - [Behind a load balancer (or in a cluster)](#behind-a-load-balancer-or-in-a-cluster)
- - [The ACME Challenges](#the-acme-challenges)
- - [HTTP Challenge](#http-challenge)
- - [TLS-ALPN Challenge](#tls-alpn-challenge)
- - [DNS Challenge](#dns-challenge)
- - [On-Demand TLS](#on-demand-tls)
- - [Storage](#storage)
- - [Cache](#cache)
-- [Contributing](#contributing)
-- [Project History](#project-history)
-- [Credits and License](#credits-and-license)
-
-
-## Features
-
-- Fully automated certificate management including issuance and renewal
-- One-liner, fully managed HTTPS servers
-- Full control over almost every aspect of the system
-- HTTP->HTTPS redirects
-- Solves all 3 ACME challenges: HTTP, TLS-ALPN, and DNS
-- Most robust error handling of _any_ ACME client
- - Challenges are randomized to avoid accidental dependence
- - Challenges are rotated to overcome certain network blockages
- - Robust retries for up to 30 days
- - Exponential backoff with carefully-tuned intervals
- - Retries with optional test/staging CA endpoint instead of production, to avoid rate limits
-- Written in Go, a language with memory-safety guarantees
-- Powered by [ACMEz](https://github.com/mholt/acmez), _the_ premier ACME client library for Go
-- All [libdns](https://github.com/libdns) DNS providers work out-of-the-box
-- Pluggable storage implementations (default: file system)
-- Wildcard certificates
-- Automatic OCSP stapling ([done right](https://gist.github.com/sleevi/5efe9ef98961ecfb4da8#gistcomment-2336055)) [keeps your sites online!](https://twitter.com/caddyserver/status/1234874273724084226)
- - Will [automatically attempt](https://twitter.com/mholt6/status/1235577699541762048) to replace [revoked certificates](https://community.letsencrypt.org/t/2020-02-29-caa-rechecking-bug/114591/3?u=mholt)!
- - Staples stored to disk in case of responder outages
-- Distributed solving of all challenges (works behind load balancers)
- - Highly efficient, coordinated management in a fleet
- - Active locking
- - Smart queueing
-- Supports "on-demand" issuance of certificates (during TLS handshakes!)
- - Caddy / CertMagic pioneered this technology
- - Custom decision functions to regulate and throttle on-demand behavior
-- Optional event hooks for observation
-- Works with any certificate authority (CA) compliant with the ACME specification
-- Certificate revocation (please, only if private key is compromised)
-- Must-Staple (optional; not default)
-- Cross-platform support! Mac, Windows, Linux, BSD, Android...
-- Scales to hundreds of thousands of names/certificates per instance
-- Use in conjunction with your own certificates
-
-
-## Requirements
-
-0. ACME server (can be a publicly-trusted CA, or your own)
-1. Public DNS name(s) you control
-2. Server reachable from public Internet
- - Or use the DNS challenge to waive this requirement
-3. Control over port 80 (HTTP) and/or 443 (HTTPS)
- - Or they can be forwarded to other ports you control
- - Or use the DNS challenge to waive this requirement
- - (This is a requirement of the ACME protocol, not a library limitation)
-4. Persistent storage
- - Typically the local file system (default)
- - Other integrations available/possible
-
-**_Before using this library, your domain names MUST be pointed (A/AAAA records) at your server (unless you use the DNS challenge)!_**
-
-
-## Installation
-
-```bash
-$ go get github.com/caddyserver/certmagic
-```
-
-
-## Usage
-
-### Package Overview
-
-#### Certificate authority
-
-This library uses Let's Encrypt by default, but you can use any certificate authority that conforms to the ACME specification. Known/common CAs are provided as consts in the package, for example `LetsEncryptStagingCA` and `LetsEncryptProductionCA`.
-
-#### The `Config` type
-
-The `certmagic.Config` struct is how you can wield the power of this fully armed and operational battle station. However, an empty/uninitialized `Config` is _not_ a valid one! In time, you will learn to use the force of `certmagic.NewDefault()` as I have.
-
-#### Defaults
-
-The default `Config` value is called `certmagic.Default`. Change its fields to suit your needs, then call `certmagic.NewDefault()` when you need a valid `Config` value. In other words, `certmagic.Default` is a template and is not valid for use directly.
-
-You can set the default values easily, for example: `certmagic.Default.Issuer = ...`.
-
-Similarly, to configure ACME-specific defaults, use `certmagic.DefaultACME`.
-
-The high-level functions in this package (`HTTPS()`, `Listen()`, `ManageSync()`, and `ManageAsync()`) use the default config exclusively. This is how most of you will interact with the package. This is suitable when all your certificates are managed the same way. However, if you need to manage certificates differently depending on their name, you will need to make your own cache and configs (keep reading).
-
-
-#### Providing an email address
-
-Although not strictly required, this is highly recommended best practice. It allows you to receive expiration emails if your certificates are expiring for some reason, and also allows the CA's engineers to potentially get in touch with you if something is wrong. I recommend setting `certmagic.DefaultACME.Email` or always setting the `Email` field of a new `Config` struct.
-
-
-#### Rate limiting
-
-To avoid firehosing the CA's servers, CertMagic has built-in rate limiting. Currently, its default limit is up to 10 transactions (obtain or renew) every 1 minute (sliding window). This can be changed by setting the `RateLimitEvents` and `RateLimitEventsWindow` variables, if desired.
-
-The CA may still enforce their own rate limits, and there's nothing (well, nothing ethical) CertMagic can do to bypass them for you.
-
-Additionally, CertMagic will retry failed validations with exponential backoff for up to 30 days, with a reasonable maximum interval between attempts (an "attempt" means trying each enabled challenge type once).
-
-
-### Development and Testing
-
-Note that Let's Encrypt imposes [strict rate limits](https://letsencrypt.org/docs/rate-limits/) at its production endpoint, so using it while developing your application may lock you out for a few days if you aren't careful!
-
-While developing your application and testing it, use [their staging endpoint](https://letsencrypt.org/docs/staging-environment/) which has much higher rate limits. Even then, don't hammer it: but it's much safer for when you're testing. When deploying, though, use their production CA because their staging CA doesn't issue trusted certificates.
-
-To use staging, set `certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA` or set `CA` of every `ACMEManager` struct.
-
-
-
-### Examples
-
-There are many ways to use this library. We'll start with the highest-level (simplest) and work down (more control).
-
-All these high-level examples use `certmagic.Default` and `certmagic.DefaultACME` for the config and the default cache and storage for serving up certificates.
-
-First, we'll follow best practices and do the following:
-
-```go
-// read and agree to your CA's legal documents
-certmagic.DefaultACME.Agreed = true
-
-// provide an email address
-certmagic.DefaultACME.Email = "you@yours.com"
-
-// use the staging endpoint while we're developing
-certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA
-```
-
-For fully-functional program examples, check out [this Twitter thread](https://twitter.com/mholt6/status/1073103805112147968) (or read it [unrolled into a single post](https://threadreaderapp.com/thread/1073103805112147968.html)). (Note that the package API has changed slightly since these posts.)
-
-
-#### Serving HTTP handlers with HTTPS
-
-```go
-err := certmagic.HTTPS([]string{"example.com", "www.example.com"}, mux)
-if err != nil {
- return err
-}
-```
-
-This starts HTTP and HTTPS listeners and redirects HTTP to HTTPS!
-
-#### Starting a TLS listener
-
-```go
-ln, err := certmagic.Listen([]string{"example.com"})
-if err != nil {
- return err
-}
-```
-
-
-#### Getting a tls.Config
-
-```go
-tlsConfig, err := certmagic.TLS([]string{"example.com"})
-if err != nil {
- return err
-}
-```
-
-
-#### Advanced use
-
-For more control (particularly, if you need a different way of managing each certificate), you'll make and use a `Cache` and a `Config` like so:
-
-```go
-cache := certmagic.NewCache(certmagic.CacheOptions{
- GetConfigForCert: func(cert certmagic.Certificate) (*certmagic.Config, error) {
- // do whatever you need to do to get the right
- // configuration for this certificate; keep in
- // mind that this config value is used as a
- // template, and will be completed with any
- // defaults that are set in the Default config
- return &certmagic.Config{
- // ...
- }, nil
- },
- ...
-})
-
-magic := certmagic.New(cache, certmagic.Config{
- // any customizations you need go here
-})
-
-myACME := certmagic.NewACMEManager(magic, certmagic.ACMEManager{
- CA: certmagic.LetsEncryptStagingCA,
- Email: "you@yours.com",
- Agreed: true,
- // plus any other customizations you need
-})
-
-magic.Issuer = myACME
-
-// this obtains certificates or renews them if necessary
-err := magic.ManageSync([]string{"example.com", "sub.example.com"})
-if err != nil {
- return err
-}
-
-// to use its certificates and solve the TLS-ALPN challenge,
-// you can get a TLS config to use in a TLS listener!
-tlsConfig := magic.TLSConfig()
-
-//// OR ////
-
-// if you already have a TLS config you don't want to replace,
-// we can simply set its GetCertificate field and append the
-// TLS-ALPN challenge protocol to the NextProtos
-myTLSConfig.GetCertificate = magic.GetCertificate
-myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol)
-
-// the HTTP challenge has to be handled by your HTTP server;
-// if you don't have one, you should have disabled it earlier
-// when you made the certmagic.Config
-httpMux = myACME.HTTPChallengeHandler(httpMux)
-```
-
-Great! This example grants you much more flexibility for advanced programs. However, _the vast majority of you will only use the high-level functions described earlier_, especially since you can still customize them by setting the package-level `Default` config.
-
-
-### Wildcard certificates
-
-At time of writing (December 2018), Let's Encrypt only issues wildcard certificates with the DNS challenge. You can easily enable the DNS challenge with CertMagic for numerous providers (see the relevant section in the docs).
-
-
-### Behind a load balancer (or in a cluster)
-
-CertMagic runs effectively behind load balancers and/or in cluster/fleet environments. In other words, you can have 10 or 1,000 servers all serving the same domain names, all sharing certificates and OCSP staples.
-
-To do so, simply ensure that each instance is using the same Storage. That is the sole criteria for determining whether an instance is part of a cluster.
-
-The default Storage is implemented using the file system, so mounting the same shared folder is sufficient (see [Storage](#storage) for more on that)! If you need an alternate Storage implementation, feel free to use one, provided that all the instances use the _same_ one. :)
-
-See [Storage](#storage) and the associated [pkg.go.dev](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Storage) for more information!
-
-
-## The ACME Challenges
-
-This section describes how to solve the ACME challenges. Challenges are how you demonstrate to the certificate authority some control over your domain name, thus authorizing them to grant you a certificate for that name. [The great innovation of ACME](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) is that verification by CAs can now be automated, rather than having to click links in emails (who ever thought that was a good idea??).
-
-If you're using the high-level convenience functions like `HTTPS()`, `Listen()`, or `TLS()`, the HTTP and/or TLS-ALPN challenges are solved for you because they also start listeners. However, if you're making a `Config` and you start your own server manually, you'll need to be sure the ACME challenges can be solved so certificates can be renewed.
-
-The HTTP and TLS-ALPN challenges are the defaults because they don't require configuration from you, but they require that your server is accessible from external IPs on low ports. If that is not possible in your situation, you can enable the DNS challenge, which will disable the HTTP and TLS-ALPN challenges and use the DNS challenge exclusively.
-
-Technically, only one challenge needs to be enabled for things to work, but using multiple is good for reliability in case a challenge is discontinued by the CA. This happened to the TLS-SNI challenge in early 2018—many popular ACME clients such as Traefik and Autocert broke, resulting in downtime for some sites, until new releases were made and patches deployed, because they used only one challenge; Caddy, however—this library's forerunner—was unaffected because it also used the HTTP challenge. If multiple challenges are enabled, they are chosen randomly to help prevent false reliance on a single challenge type. And if one fails, any remaining enabled challenges are tried before giving up.
-
-
-### HTTP Challenge
-
-Per the ACME spec, the HTTP challenge requires port 80, or at least packet forwarding from port 80. It works by serving a specific HTTP response that only the genuine server would have to a normal HTTP request at a special endpoint.
-
-If you are running an HTTP server, solving this challenge is very easy: just wrap your handler in `HTTPChallengeHandler` _or_ call `SolveHTTPChallenge()` inside your own `ServeHTTP()` method.
-
-For example, if you're using the standard library:
-
-```go
-mux := http.NewServeMux()
-mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
- fmt.Fprintf(w, "Lookit my cool website over HTTPS!")
-})
-
-http.ListenAndServe(":80", myACME.HTTPChallengeHandler(mux))
-```
-
-If wrapping your handler is not a good solution, try this inside your `ServeHTTP()` instead:
-
-```go
-magic := certmagic.NewDefault()
-myACME := certmagic.NewACMEManager(magic, certmagic.DefaultACME)
-
-func ServeHTTP(w http.ResponseWriter, req *http.Request) {
- if myACME.HandleHTTPChallenge(w, r) {
- return // challenge handled; nothing else to do
- }
- ...
-}
-```
-
-If you are not running an HTTP server, you should disable the HTTP challenge _or_ run an HTTP server whose sole job it is to solve the HTTP challenge.
-
-
-### TLS-ALPN Challenge
-
-Per the ACME spec, the TLS-ALPN challenge requires port 443, or at least packet forwarding from port 443. It works by providing a special certificate using a standard TLS extension, Application Layer Protocol Negotiation (ALPN), having a special value. This is the most convenient challenge type because it usually requires no extra configuration and uses the standard TLS port which is where the certificates are used, also.
-
-This challenge is easy to solve: just use the provided `tls.Config` when you make your TLS listener:
-
-```go
-// use this to configure a TLS listener
-tlsConfig := magic.TLSConfig()
-```
-
-Or make two simple changes to an existing `tls.Config`:
-
-```go
-myTLSConfig.GetCertificate = magic.GetCertificate
-myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol}
-```
-
-Then just make sure your TLS listener is listening on port 443:
-
-```go
-ln, err := tls.Listen("tcp", ":443", myTLSConfig)
-```
-
-
-### DNS Challenge
-
-The DNS challenge is perhaps the most useful challenge because it allows you to obtain certificates without your server needing to be publicly accessible on the Internet, and it's the only challenge by which Let's Encrypt will issue wildcard certificates.
-
-This challenge works by setting a special record in the domain's zone. To do this automatically, your DNS provider needs to offer an API by which changes can be made to domain names, and the changes need to take effect immediately for best results. CertMagic supports [all DNS providers with `libdns` implementations](https://github.com/libdns)! It always cleans up the temporary record after the challenge completes.
-
-To enable it, just set the `DNS01Solver` field on a `certmagic.ACMEManager` struct, or set the default `certmagic.ACMEManager.DNS01Solver` variable. For example, if my domains' DNS was served by Cloudflare:
-
-```go
-import "github.com/libdns/cloudflare"
-
-certmagic.DefaultACME.DNS01Solver = &certmagic.DNS01Solver{
- DNSProvider: &cloudflare.Provider{
- APIToken: "topsecret",
- },
-}
-```
-
-Now the DNS challenge will be used by default, and I can obtain certificates for wildcard domains, too. Enabling the DNS challenge disables the other challenges for that `certmagic.ACMEManager` instance.
-
-
-## On-Demand TLS
-
-Normally, certificates are obtained and renewed before a listener starts serving, and then those certificates are maintained throughout the lifetime of the program. In other words, the certificate names are static. But sometimes you don't know all the names ahead of time, or you don't want to manage all the certificates up front. This is where On-Demand TLS shines.
-
-Originally invented for use in Caddy (which was the first program to use such technology), On-Demand TLS makes it possible and easy to serve certificates for arbitrary or specific names during the lifetime of the server. When a TLS handshake is received, CertMagic will read the Server Name Indication (SNI) value and either load and present that certificate in the ServerHello, or if one does not exist, it will obtain it from a CA right then-and-there.
-
-Of course, this has some obvious security implications. You don't want to DoS a CA or allow arbitrary clients to fill your storage with spammy TLS handshakes. That's why, when you enable On-Demand issuance, you should set limits or policy to allow getting certificates. CertMagic has an implicit whitelist built-in which is sufficient for nearly everyone, but also has a more advanced way to control on-demand issuance.
-
-The simplest way to enable on-demand issuance is to set the OnDemand field of a Config (or the default package-level value):
-
-```go
-certmagic.Default.OnDemand = new(certmagic.OnDemandConfig)
-```
-
-By setting this to a non-nil value, on-demand TLS is enabled for that config. For convenient security, CertMagic's high-level abstraction functions such as `HTTPS()`, `TLS()`, `ManageSync()`, `ManageAsync()`, and `Listen()` (which all accept a list of domain names) will whitelist those names automatically so only certificates for those names can be obtained when using the Default config. Usually this is sufficient for most users.
-
-However, if you require advanced control over which domains can be issued certificates on-demand (for example, if you do not know which domain names you are managing, or just need to defer their operations until later), you should implement your own DecisionFunc:
-
-```go
-// if the decision function returns an error, a certificate
-// may not be obtained for that name at that time
-certmagic.Default.OnDemand = &certmagic.OnDemandConfig{
- DecisionFunc: func(name string) error {
- if name != "example.com" {
- return fmt.Errorf("not allowed")
- }
- return nil
- },
-}
-```
-
-The [pkg.go.dev](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#OnDemandConfig) describes how to use this in full detail, so please check it out!
-
-
-## Storage
-
-CertMagic relies on storage to store certificates and other TLS assets (OCSP staple cache, coordinating locks, etc). Persistent storage is a requirement when using CertMagic: ephemeral storage will likely lead to rate limiting on the CA-side as CertMagic will always have to get new certificates.
-
-By default, CertMagic stores assets on the local file system in `$HOME/.local/share/certmagic` (and honors `$XDG_DATA_HOME` if set). CertMagic will create the directory if it does not exist. If writes are denied, things will not be happy, so make sure CertMagic can write to it!
-
-The notion of a "cluster" or "fleet" of instances that may be serving the same site and sharing certificates, etc, is tied to storage. Simply, any instances that use the same storage facilities are considered part of the cluster. So if you deploy 100 instances of CertMagic behind a load balancer, they are all part of the same cluster if they share the same storage configuration. Sharing storage could be mounting a shared folder, or implementing some other distributed storage system such as a database server or KV store.
-
-The easiest way to change the storage being used is to set `certmagic.DefaultStorage` to a value that satisfies the [Storage interface](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Storage). Keep in mind that a valid `Storage` must be able to implement some operations atomically in order to provide locking and synchronization.
-
-If you write a Storage implementation, please add it to the [project wiki](https://github.com/caddyserver/certmagic/wiki/Storage-Implementations) so people can find it!
-
-
-## Cache
-
-All of the certificates in use are de-duplicated and cached in memory for optimal performance at handshake-time. This cache must be backed by persistent storage as described above.
-
-Most applications will not need to interact with certificate caches directly. Usually, the closest you will come is to set the package-wide `certmagic.DefaultStorage` variable (before attempting to create any Configs). However, if your use case requires using different storage facilities for different Configs (that's highly unlikely and NOT recommended! Even Caddy doesn't get that crazy), you will need to call `certmagic.NewCache()` and pass in the storage you want to use, then get new `Config` structs with `certmagic.NewWithCache()` and pass in the cache.
-
-Again, if you're needing to do this, you've probably over-complicated your application design.
-
-
-## FAQ
-
-### Can I use some of my own certificates while using CertMagic?
-
-Yes, just call the relevant method on the `Config` to add your own certificate to the cache:
-
-- [`CacheUnmanagedCertificatePEMBytes()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedCertificatePEMBytes)
-- [`CacheUnmanagedCertificatePEMFile()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedCertificatePEMFile)
-- [`CacheUnmanagedTLSCertificate()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedTLSCertificate)
-
-Keep in mind that unmanaged certificates are (obviously) not renewed for you, so you'll have to replace them when you do. However, OCSP stapling is performed even for unmanaged certificates that qualify.
-
-
-### Does CertMagic obtain SAN certificates?
-
-Technically all certificates these days are SAN certificates because CommonName is deprecated. But if you're asking whether CertMagic issues and manages certificates with multiple SANs, the answer is no. But it does support serving them, if you provide your own.
-
-
-### How can I listen on ports 80 and 443? Do I have to run as root?
-
-On Linux, you can use `setcap` to grant your binary the permission to bind low ports:
-
-```bash
-$ sudo setcap cap_net_bind_service=+ep /path/to/your/binary
-```
-
-and then you will not need to run with root privileges.
-
-
-## Contributing
-
-We welcome your contributions! Please see our **[contributing guidelines](https://github.com/caddyserver/certmagic/blob/master/.github/CONTRIBUTING.md)** for instructions.
-
-
-## Project History
-
-CertMagic is the core of Caddy's advanced TLS automation code, extracted into a library. The underlying ACME client implementation is [ACMEz](https://github.com/mholt/acmez). CertMagic's code was originally a central part of Caddy even before Let's Encrypt entered public beta in 2015.
-
-In the years since then, Caddy's TLS automation techniques have been widely adopted, tried and tested in production, and served millions of sites and secured trillions of connections.
-
-Now, CertMagic is _the actual library used by Caddy_. It's incredibly powerful and feature-rich, but also easy to use for simple Go programs: one line of code can enable fully-automated HTTPS applications with HTTP->HTTPS redirects.
-
-Caddy is known for its robust HTTPS+ACME features. When ACME certificate authorities have had outages, in some cases Caddy was the only major client that didn't experience any downtime. Caddy can weather OCSP outages lasting days, or CA outages lasting weeks, without taking your sites offline.
-
-Caddy was also the first to sport "on-demand" issuance technology, which obtains certificates during the first TLS handshake for an allowed SNI name.
-
-Consequently, CertMagic brings all these (and more) features and capabilities right into your own Go programs.
-
-You can [watch a 2016 dotGo talk](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) by the author of this library about using ACME to automate certificate management in Go programs:
-
-[![Matthew Holt speaking at dotGo 2016 about ACME in Go](https://user-images.githubusercontent.com/1128849/49921557-2d506780-fe6b-11e8-97bf-6053b6b4eb48.png)](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme)
-
-
-
-## Credits and License
-
-CertMagic is a project by [Matthew Holt](https://twitter.com/mholt6), who is the author; and various contributors, who are credited in the commit history of either CertMagic or Caddy.
-
-CertMagic is licensed under Apache 2.0, an open source license. For convenience, its main points are summarized as follows (but this is no replacement for the actual license text):
-
-- The author owns the copyright to this code
-- Use, distribute, and modify the software freely
-- Private and internal use is allowed
-- License text and copyright notices must stay intact and be included with distributions
-- Any and all changes to the code must be documented
diff --git a/vendor/github.com/caddyserver/certmagic/account.go b/vendor/github.com/caddyserver/certmagic/account.go
deleted file mode 100644
index 8633f92f..00000000
--- a/vendor/github.com/caddyserver/certmagic/account.go
+++ /dev/null
@@ -1,419 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "bufio"
- "bytes"
- "context"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "encoding/json"
- "fmt"
- "io"
- "os"
- "path"
- "sort"
- "strings"
- "sync"
-
- "github.com/mholt/acmez/acme"
-)
-
-// getAccount either loads or creates a new account, depending on if
-// an account can be found in storage for the given CA + email combo.
-func (am *ACMEManager) getAccount(ca, email string) (acme.Account, error) {
- acct, err := am.loadAccount(ca, email)
- if err != nil {
- if _, ok := err.(ErrNotExist); ok {
- return am.newAccount(email)
- }
- return acct, err
- }
- return acct, err
-}
-
-// loadAccount loads an account from storage, but does not create a new one.
-func (am *ACMEManager) loadAccount(ca, email string) (acme.Account, error) {
- regBytes, err := am.config.Storage.Load(am.storageKeyUserReg(ca, email))
- if err != nil {
- return acme.Account{}, err
- }
- keyBytes, err := am.config.Storage.Load(am.storageKeyUserPrivateKey(ca, email))
- if err != nil {
- return acme.Account{}, err
- }
-
- var acct acme.Account
- err = json.Unmarshal(regBytes, &acct)
- if err != nil {
- return acct, err
- }
- acct.PrivateKey, err = decodePrivateKey(keyBytes)
- if err != nil {
- return acct, fmt.Errorf("could not decode account's private key: %v", err)
- }
-
- return acct, nil
-}
-
-// newAccount generates a new private key for a new ACME account, but
-// it does not register or save the account.
-func (*ACMEManager) newAccount(email string) (acme.Account, error) {
- var acct acme.Account
- if email != "" {
- acct.Contact = []string{"mailto:" + email} // TODO: should we abstract the contact scheme?
- }
- privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- if err != nil {
- return acct, fmt.Errorf("generating private key: %v", err)
- }
- acct.PrivateKey = privateKey
- return acct, nil
-}
-
-// GetAccount first tries loading the account with the associated private key from storage.
-// If it does not exist in storage, it will be retrieved from the ACME server and added to storage.
-// The account must already exist; it does not create a new account.
-func (am *ACMEManager) GetAccount(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
- account, err := am.loadAccountByKey(ctx, privateKeyPEM)
- if err != nil {
- if _, ok := err.(ErrNotExist); ok {
- account, err = am.lookUpAccount(ctx, privateKeyPEM)
- } else {
- return account, err
- }
- }
- return account, err
-}
-
-// loadAccountByKey loads the account with the given private key from storage, if it exists.
-// If it does not exist, an error of type ErrNotExist is returned. This is not very efficient
-// for lots of accounts.
-func (am *ACMEManager) loadAccountByKey(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
- accountList, err := am.config.Storage.List(am.storageKeyUsersPrefix(am.CA), false)
- if err != nil {
- return acme.Account{}, err
- }
- for _, accountFolderKey := range accountList {
- email := path.Base(accountFolderKey)
- keyBytes, err := am.config.Storage.Load(am.storageKeyUserPrivateKey(am.CA, email))
- if err != nil {
- return acme.Account{}, err
- }
- if bytes.Equal(bytes.TrimSpace(keyBytes), bytes.TrimSpace(privateKeyPEM)) {
- return am.loadAccount(am.CA, email)
- }
- }
- return acme.Account{}, ErrNotExist(fmt.Errorf("no account found with that key"))
-}
-
-// lookUpAccount looks up the account associated with privateKeyPEM from the ACME server.
-// If the account is found by the server, it will be saved to storage and returned.
-func (am *ACMEManager) lookUpAccount(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
- client, err := am.newACMEClient(false)
- if err != nil {
- return acme.Account{}, fmt.Errorf("creating ACME client: %v", err)
- }
-
- privateKey, err := decodePrivateKey([]byte(privateKeyPEM))
- if err != nil {
- return acme.Account{}, fmt.Errorf("decoding private key: %v", err)
- }
-
- // look up the account
- account := acme.Account{PrivateKey: privateKey}
- account, err = client.GetAccount(ctx, account)
- if err != nil {
- return acme.Account{}, fmt.Errorf("looking up account with server: %v", err)
- }
-
- // save the account details to storage
- err = am.saveAccount(client.Directory, account)
- if err != nil {
- return account, fmt.Errorf("could not save account to storage: %v", err)
- }
-
- return account, nil
-}
-
-// saveAccount persists an ACME account's info and private key to storage.
-// It does NOT register the account via ACME or prompt the user.
-func (am *ACMEManager) saveAccount(ca string, account acme.Account) error {
- regBytes, err := json.MarshalIndent(account, "", "\t")
- if err != nil {
- return err
- }
- keyBytes, err := encodePrivateKey(account.PrivateKey)
- if err != nil {
- return err
- }
- // extract primary contact (email), without scheme (e.g. "mailto:")
- primaryContact := getPrimaryContact(account)
- all := []keyValue{
- {
- key: am.storageKeyUserReg(ca, primaryContact),
- value: regBytes,
- },
- {
- key: am.storageKeyUserPrivateKey(ca, primaryContact),
- value: keyBytes,
- },
- }
- return storeTx(am.config.Storage, all)
-}
-
-// getEmail does everything it can to obtain an email address
-// from the user within the scope of memory and storage to use
-// for ACME TLS. If it cannot get an email address, it does nothing
-// (If user is prompted, it will warn the user of
-// the consequences of an empty email.) This function MAY prompt
-// the user for input. If allowPrompts is false, the user
-// will NOT be prompted and an empty email may be returned.
-func (am *ACMEManager) getEmail(allowPrompts bool) error {
- leEmail := am.Email
-
- // First try package default email, or a discovered email address
- if leEmail == "" {
- leEmail = DefaultACME.Email
- }
- if leEmail == "" {
- discoveredEmailMu.Lock()
- leEmail = discoveredEmail
- discoveredEmailMu.Unlock()
- }
-
- // Then try to get most recent user email from storage
- var gotRecentEmail bool
- if leEmail == "" {
- leEmail, gotRecentEmail = am.mostRecentAccountEmail(am.CA)
- }
- if !gotRecentEmail && leEmail == "" && allowPrompts {
- // Looks like there is no email address readily available,
- // so we will have to ask the user if we can.
- var err error
- leEmail, err = am.promptUserForEmail()
- if err != nil {
- return err
- }
-
- // User might have just signified their agreement
- am.Agreed = DefaultACME.Agreed
- }
-
- // save the email for later and ensure it is consistent
- // for repeated use; then update cfg with the email
- leEmail = strings.TrimSpace(strings.ToLower(leEmail))
- discoveredEmailMu.Lock()
- if discoveredEmail == "" {
- discoveredEmail = leEmail
- }
- discoveredEmailMu.Unlock()
- am.Email = leEmail
-
- return nil
-}
-
-// promptUserForEmail prompts the user for an email address
-// and returns the email address they entered (which could
-// be the empty string). If no error is returned, then Agreed
-// will also be set to true, since continuing through the
-// prompt signifies agreement.
-func (am *ACMEManager) promptUserForEmail() (string, error) {
- // prompt the user for an email address and terms agreement
- reader := bufio.NewReader(stdin)
- am.promptUserAgreement("")
- fmt.Println("Please enter your email address to signify agreement and to be notified")
- fmt.Println("in case of issues. You can leave it blank, but we don't recommend it.")
- fmt.Print(" Email address: ")
- leEmail, err := reader.ReadString('\n')
- if err != nil && err != io.EOF {
- return "", fmt.Errorf("reading email address: %v", err)
- }
- leEmail = strings.TrimSpace(leEmail)
- DefaultACME.Agreed = true
- return leEmail, nil
-}
-
-// promptUserAgreement simply outputs the standard user
-// agreement prompt with the given agreement URL.
-// It outputs a newline after the message.
-func (am *ACMEManager) promptUserAgreement(agreementURL string) {
- userAgreementPrompt := `Your sites will be served over HTTPS automatically using an automated CA.
-By continuing, you agree to the CA's terms of service`
- if agreementURL == "" {
- fmt.Printf("\n\n%s.\n", userAgreementPrompt)
- return
- }
- fmt.Printf("\n\n%s at:\n %s\n", userAgreementPrompt, agreementURL)
-}
-
-// askUserAgreement prompts the user to agree to the agreement
-// at the given agreement URL via stdin. It returns whether the
-// user agreed or not.
-func (am *ACMEManager) askUserAgreement(agreementURL string) bool {
- am.promptUserAgreement(agreementURL)
- fmt.Print("Do you agree to the terms? (y/n): ")
-
- reader := bufio.NewReader(stdin)
- answer, err := reader.ReadString('\n')
- if err != nil {
- return false
- }
- answer = strings.ToLower(strings.TrimSpace(answer))
-
- return answer == "y" || answer == "yes"
-}
-
-func storageKeyACMECAPrefix(issuerKey string) string {
- return path.Join(prefixACME, StorageKeys.Safe(issuerKey))
-}
-
-func (am *ACMEManager) storageKeyCAPrefix(caURL string) string {
- return storageKeyACMECAPrefix(am.issuerKey(caURL))
-}
-
-func (am *ACMEManager) storageKeyUsersPrefix(caURL string) string {
- return path.Join(am.storageKeyCAPrefix(caURL), "users")
-}
-
-func (am *ACMEManager) storageKeyUserPrefix(caURL, email string) string {
- if email == "" {
- email = emptyEmail
- }
- return path.Join(am.storageKeyUsersPrefix(caURL), StorageKeys.Safe(email))
-}
-
-func (am *ACMEManager) storageKeyUserReg(caURL, email string) string {
- return am.storageSafeUserKey(caURL, email, "registration", ".json")
-}
-
-func (am *ACMEManager) storageKeyUserPrivateKey(caURL, email string) string {
- return am.storageSafeUserKey(caURL, email, "private", ".key")
-}
-
-// storageSafeUserKey returns a key for the given email, with the default
-// filename, and the filename ending in the given extension.
-func (am *ACMEManager) storageSafeUserKey(ca, email, defaultFilename, extension string) string {
- if email == "" {
- email = emptyEmail
- }
- email = strings.ToLower(email)
- filename := am.emailUsername(email)
- if filename == "" {
- filename = defaultFilename
- }
- filename = StorageKeys.Safe(filename)
- return path.Join(am.storageKeyUserPrefix(ca, email), filename+extension)
-}
-
-// emailUsername returns the username portion of an email address (part before
-// '@') or the original input if it can't find the "@" symbol.
-func (*ACMEManager) emailUsername(email string) string {
- at := strings.Index(email, "@")
- if at == -1 {
- return email
- } else if at == 0 {
- return email[1:]
- }
- return email[:at]
-}
-
-// mostRecentAccountEmail finds the most recently-written account file
-// in storage. Since this is part of a complex sequence to get a user
-// account, errors here are discarded to simplify code flow in
-// the caller, and errors are not important here anyway.
-func (am *ACMEManager) mostRecentAccountEmail(caURL string) (string, bool) {
- accountList, err := am.config.Storage.List(am.storageKeyUsersPrefix(caURL), false)
- if err != nil || len(accountList) == 0 {
- return "", false
- }
-
- // get all the key infos ahead of sorting, because
- // we might filter some out
- stats := make(map[string]KeyInfo)
- for i := 0; i < len(accountList); i++ {
- u := accountList[i]
- keyInfo, err := am.config.Storage.Stat(u)
- if err != nil {
- continue
- }
- if keyInfo.IsTerminal {
- // I found a bug when macOS created a .DS_Store file in
- // the users folder, and CertMagic tried to use that as
- // the user email because it was newer than the other one
- // which existed... sure, this isn't a perfect fix but
- // frankly one's OS shouldn't mess with the data folder
- // in the first place.
- accountList = append(accountList[:i], accountList[i+1:]...)
- i--
- continue
- }
- stats[u] = keyInfo
- }
-
- sort.Slice(accountList, func(i, j int) bool {
- iInfo := stats[accountList[i]]
- jInfo := stats[accountList[j]]
- return jInfo.Modified.Before(iInfo.Modified)
- })
-
- if len(accountList) == 0 {
- return "", false
- }
-
- account, err := am.getAccount(caURL, path.Base(accountList[0]))
- if err != nil {
- return "", false
- }
-
- return getPrimaryContact(account), true
-}
-
-// getPrimaryContact returns the first contact on the account (if any)
-// without the scheme. (I guess we assume an email address.)
-func getPrimaryContact(account acme.Account) string {
- // TODO: should this be abstracted with some lower-level helper?
- var primaryContact string
- if len(account.Contact) > 0 {
- primaryContact = account.Contact[0]
- if idx := strings.Index(primaryContact, ":"); idx >= 0 {
- primaryContact = primaryContact[idx+1:]
- }
- }
- return primaryContact
-}
-
-// When an email address is not explicitly specified, we can remember
-// the last one we discovered to avoid having to ask again later.
-// (We used to store this in DefaultACME.Email but it was racey; see #127)
-var (
- discoveredEmail string
- discoveredEmailMu sync.Mutex
-)
-
-// agreementTestURL is set during tests to skip requiring
-// setting up an entire ACME CA endpoint.
-var agreementTestURL string
-
-// stdin is used to read the user's input if prompted;
-// this is changed by tests during tests.
-var stdin = io.ReadWriter(os.Stdin)
-
-// The name of the folder for accounts where the email
-// address was not provided; default 'username' if you will,
-// but only for local/storage use, not with the CA.
-const emptyEmail = "default"
diff --git a/vendor/github.com/caddyserver/certmagic/acmeclient.go b/vendor/github.com/caddyserver/certmagic/acmeclient.go
deleted file mode 100644
index cc876b92..00000000
--- a/vendor/github.com/caddyserver/certmagic/acmeclient.go
+++ /dev/null
@@ -1,384 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- weakrand "math/rand"
- "net"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/mholt/acmez"
- "github.com/mholt/acmez/acme"
- "go.uber.org/zap"
-)
-
-func init() {
- weakrand.Seed(time.Now().UnixNano())
-}
-
-// acmeClient holds state necessary to perform ACME operations
-// for certificate management with an ACME account. Call
-// ACMEManager.newACMEClientWithAccount() to get a valid one.
-type acmeClient struct {
- mgr *ACMEManager
- acmeClient *acmez.Client
- account acme.Account
-}
-
-// newACMEClientWithAccount creates an ACME client ready to use with an account, including
-// loading one from storage or registering a new account with the CA if necessary. If
-// useTestCA is true, am.TestCA will be used if set; otherwise, the primary CA will be used.
-func (am *ACMEManager) newACMEClientWithAccount(ctx context.Context, useTestCA, interactive bool) (*acmeClient, error) {
- // first, get underlying ACME client
- client, err := am.newACMEClient(useTestCA)
- if err != nil {
- return nil, err
- }
-
- // look up or create the ACME account
- var account acme.Account
- if am.AccountKeyPEM != "" {
- account, err = am.GetAccount(ctx, []byte(am.AccountKeyPEM))
- } else {
- account, err = am.getAccount(client.Directory, am.Email)
- }
- if err != nil {
- return nil, fmt.Errorf("getting ACME account: %v", err)
- }
-
- // register account if it is new
- if account.Status == "" {
- if am.NewAccountFunc != nil {
- account, err = am.NewAccountFunc(ctx, am, account)
- if err != nil {
- return nil, fmt.Errorf("account pre-registration callback: %v", err)
- }
- }
-
- // agree to terms
- if interactive {
- if !am.Agreed {
- var termsURL string
- dir, err := client.GetDirectory(ctx)
- if err != nil {
- return nil, fmt.Errorf("getting directory: %w", err)
- }
- if dir.Meta != nil {
- termsURL = dir.Meta.TermsOfService
- }
- if termsURL != "" {
- am.Agreed = am.askUserAgreement(termsURL)
- if !am.Agreed {
- return nil, fmt.Errorf("user must agree to CA terms")
- }
- }
- }
- } else {
- // can't prompt a user who isn't there; they should
- // have reviewed the terms beforehand
- am.Agreed = true
- }
- account.TermsOfServiceAgreed = am.Agreed
-
- // associate account with external binding, if configured
- if am.ExternalAccount != nil {
- err := account.SetExternalAccountBinding(ctx, client.Client, *am.ExternalAccount)
- if err != nil {
- return nil, err
- }
- }
-
- // create account
- account, err = client.NewAccount(ctx, account)
- if err != nil {
- return nil, fmt.Errorf("registering account %v with server: %w", account.Contact, err)
- }
-
- // persist the account to storage
- err = am.saveAccount(client.Directory, account)
- if err != nil {
- return nil, fmt.Errorf("could not save account %v: %v", account.Contact, err)
- }
- }
-
- c := &acmeClient{
- mgr: am,
- acmeClient: client,
- account: account,
- }
-
- return c, nil
-}
-
-// newACMEClient creates a new underlying ACME client using the settings in am,
-// independent of any particular ACME account. If useTestCA is true, am.TestCA
-// will be used if it is set; otherwise, the primary CA will be used.
-func (am *ACMEManager) newACMEClient(useTestCA bool) (*acmez.Client, error) {
- // ensure defaults are filled in
- var caURL string
- if useTestCA {
- caURL = am.TestCA
- }
- if caURL == "" {
- caURL = am.CA
- }
- if caURL == "" {
- caURL = DefaultACME.CA
- }
- certObtainTimeout := am.CertObtainTimeout
- if certObtainTimeout == 0 {
- certObtainTimeout = DefaultACME.CertObtainTimeout
- }
-
- // ensure endpoint is secure (assume HTTPS if scheme is missing)
- if !strings.Contains(caURL, "://") {
- caURL = "https://" + caURL
- }
- u, err := url.Parse(caURL)
- if err != nil {
- return nil, err
- }
- if u.Scheme != "https" && !isLoopback(u.Host) && !isInternal(u.Host) {
- return nil, fmt.Errorf("%s: insecure CA URL (HTTPS required)", caURL)
- }
-
- // set up the dialers and resolver for the ACME client's HTTP client
- dialer := &net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 2 * time.Minute,
- }
- if am.Resolver != "" {
- dialer.Resolver = &net.Resolver{
- PreferGo: true,
- Dial: func(ctx context.Context, network, _ string) (net.Conn, error) {
- return (&net.Dialer{
- Timeout: 15 * time.Second,
- }).DialContext(ctx, network, am.Resolver)
- },
- }
- }
-
- // TODO: we could potentially reuse the HTTP transport and client
- hc := am.httpClient // TODO: is this racey?
- if am.httpClient == nil {
- transport := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: dialer.DialContext,
- TLSHandshakeTimeout: 15 * time.Second,
- ResponseHeaderTimeout: 15 * time.Second,
- ExpectContinueTimeout: 2 * time.Second,
- ForceAttemptHTTP2: true,
- }
- if am.TrustedRoots != nil {
- transport.TLSClientConfig = &tls.Config{
- RootCAs: am.TrustedRoots,
- }
- }
-
- hc = &http.Client{
- Transport: transport,
- Timeout: HTTPTimeout,
- }
-
- am.httpClient = hc
- }
-
- client := &acmez.Client{
- Client: &acme.Client{
- Directory: caURL,
- PollTimeout: certObtainTimeout,
- UserAgent: buildUAString(),
- HTTPClient: hc,
- },
- ChallengeSolvers: make(map[string]acmez.Solver),
- }
- if am.Logger != nil {
- l := am.Logger.Named("acme_client")
- client.Client.Logger, client.Logger = l, l
- }
-
- // configure challenges (most of the time, DNS challenge is
- // exclusive of other ones because it is usually only used
- // in situations where the default challenges would fail)
- if am.DNS01Solver == nil {
- // enable HTTP-01 challenge
- if !am.DisableHTTPChallenge {
- useHTTPPort := HTTPChallengePort
- if HTTPPort > 0 && HTTPPort != HTTPChallengePort {
- useHTTPPort = HTTPPort
- }
- if am.AltHTTPPort > 0 {
- useHTTPPort = am.AltHTTPPort
- }
- client.ChallengeSolvers[acme.ChallengeTypeHTTP01] = distributedSolver{
- storage: am.config.Storage,
- storageKeyIssuerPrefix: am.storageKeyCAPrefix(client.Directory),
- solver: &httpSolver{
- acmeManager: am,
- address: net.JoinHostPort(am.ListenHost, strconv.Itoa(useHTTPPort)),
- },
- }
- }
-
- // enable TLS-ALPN-01 challenge
- if !am.DisableTLSALPNChallenge {
- useTLSALPNPort := TLSALPNChallengePort
- if HTTPSPort > 0 && HTTPSPort != TLSALPNChallengePort {
- useTLSALPNPort = HTTPSPort
- }
- if am.AltTLSALPNPort > 0 {
- useTLSALPNPort = am.AltTLSALPNPort
- }
- client.ChallengeSolvers[acme.ChallengeTypeTLSALPN01] = distributedSolver{
- storage: am.config.Storage,
- storageKeyIssuerPrefix: am.storageKeyCAPrefix(client.Directory),
- solver: &tlsALPNSolver{
- config: am.config,
- address: net.JoinHostPort(am.ListenHost, strconv.Itoa(useTLSALPNPort)),
- },
- }
- }
- } else {
- // use DNS challenge exclusively
- client.ChallengeSolvers[acme.ChallengeTypeDNS01] = am.DNS01Solver
- }
-
- // wrap solvers in our wrapper so that we can keep track of challenge
- // info: this is useful for solving challenges globally as a process;
- // for example, usually there is only one process that can solve the
- // HTTP and TLS-ALPN challenges, and only one server in that process
- // that can bind the necessary port(s), so if a server listening on
- // a different port needed a certificate, it would have to know about
- // the other server listening on that port, and somehow convey its
- // challenge info or share its config, but this isn't always feasible;
- // what the wrapper does is it accesses a global challenge memory so
- // that unrelated servers in this process can all solve each others'
- // challenges without having to know about each other - Caddy's admin
- // endpoint uses this functionality since it and the HTTP/TLS modules
- // do not know about each other
- // (doing this here in a separate loop ensures that even if we expose
- // solver config to users later, we will even wrap their own solvers)
- for name, solver := range client.ChallengeSolvers {
- client.ChallengeSolvers[name] = solverWrapper{solver}
- }
-
- return client, nil
-}
-
-func (c *acmeClient) throttle(ctx context.Context, names []string) error {
- // throttling is scoped to CA + account email
- rateLimiterKey := c.acmeClient.Directory + "," + c.mgr.Email
- rateLimitersMu.Lock()
- rl, ok := rateLimiters[rateLimiterKey]
- if !ok {
- rl = NewRateLimiter(RateLimitEvents, RateLimitEventsWindow)
- rateLimiters[rateLimiterKey] = rl
- // TODO: stop rate limiter when it is garbage-collected...
- }
- rateLimitersMu.Unlock()
- if c.mgr.Logger != nil {
- c.mgr.Logger.Info("waiting on internal rate limiter",
- zap.Strings("identifiers", names),
- zap.String("ca", c.acmeClient.Directory),
- zap.String("account", c.mgr.Email),
- )
- }
- err := rl.Wait(ctx)
- if err != nil {
- return err
- }
- if c.mgr.Logger != nil {
- c.mgr.Logger.Info("done waiting on internal rate limiter",
- zap.Strings("identifiers", names),
- zap.String("ca", c.acmeClient.Directory),
- zap.String("account", c.mgr.Email),
- )
- }
- return nil
-}
-
-func (c *acmeClient) usingTestCA() bool {
- return c.mgr.TestCA != "" && c.acmeClient.Directory == c.mgr.TestCA
-}
-
-func (c *acmeClient) revoke(ctx context.Context, cert *x509.Certificate, reason int) error {
- return c.acmeClient.RevokeCertificate(ctx, c.account,
- cert, c.account.PrivateKey, reason)
-}
-
-func buildUAString() string {
- ua := "CertMagic"
- if UserAgent != "" {
- ua = UserAgent + " " + ua
- }
- return ua
-}
-
-// These internal rate limits are designed to prevent accidentally
-// firehosing a CA's ACME endpoints. They are not intended to
-// replace or replicate the CA's actual rate limits.
-//
-// Let's Encrypt's rate limits can be found here:
-// https://letsencrypt.org/docs/rate-limits/
-//
-// Currently (as of December 2019), Let's Encrypt's most relevant
-// rate limit for large deployments is 300 new orders per account
-// per 3 hours (on average, or best case, that's about 1 every 36
-// seconds, or 2 every 72 seconds, etc.); but it's not reasonable
-// to try to assume that our internal state is the same as the CA's
-// (due to process restarts, config changes, failed validations,
-// etc.) and ultimately, only the CA's actual rate limiter is the
-// authority. Thus, our own rate limiters do not attempt to enforce
-// external rate limits. Doing so causes problems when the domains
-// are not in our control (i.e. serving customer sites) and/or lots
-// of domains fail validation: they clog our internal rate limiter
-// and nearly starve out (or at least slow down) the other domains
-// that need certificates. Failed transactions are already retried
-// with exponential backoff, so adding in rate limiting can slow
-// things down even more.
-//
-// Instead, the point of our internal rate limiter is to avoid
-// hammering the CA's endpoint when there are thousands or even
-// millions of certificates under management. Our goal is to
-// allow small bursts in a relatively short timeframe so as to
-// not block any one domain for too long, without unleashing
-// thousands of requests to the CA at once.
-var (
- rateLimiters = make(map[string]*RingBufferRateLimiter)
- rateLimitersMu sync.RWMutex
-
- // RateLimitEvents is how many new events can be allowed
- // in RateLimitEventsWindow.
- RateLimitEvents = 20
-
- // RateLimitEventsWindow is the size of the sliding
- // window that throttles events.
- RateLimitEventsWindow = 1 * time.Minute
-)
-
-// Some default values passed down to the underlying ACME client.
-var (
- UserAgent string
- HTTPTimeout = 30 * time.Second
-)
diff --git a/vendor/github.com/caddyserver/certmagic/acmemanager.go b/vendor/github.com/caddyserver/certmagic/acmemanager.go
deleted file mode 100644
index 82b6cc12..00000000
--- a/vendor/github.com/caddyserver/certmagic/acmemanager.go
+++ /dev/null
@@ -1,466 +0,0 @@
-package certmagic
-
-import (
- "context"
- "crypto/x509"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "sort"
- "strings"
- "time"
-
- "github.com/mholt/acmez"
- "github.com/mholt/acmez/acme"
- "go.uber.org/zap"
-)
-
-// ACMEManager gets certificates using ACME. It implements the PreChecker,
-// Issuer, and Revoker interfaces.
-//
-// It is NOT VALID to use an ACMEManager without calling NewACMEManager().
-// It fills in any default values from DefaultACME as well as setting up
-// internal state that is necessary for valid use. Always call
-// NewACMEManager() to get a valid ACMEManager value.
-type ACMEManager struct {
- // The endpoint of the directory for the ACME
- // CA we are to use
- CA string
-
- // TestCA is the endpoint of the directory for
- // an ACME CA to use to test domain validation,
- // but any certs obtained from this CA are
- // discarded
- TestCA string
-
- // The email address to use when creating or
- // selecting an existing ACME server account
- Email string
-
- // The PEM-encoded private key of the ACME
- // account to use; only needed if the account
- // is already created on the server and
- // can be looked up with the ACME protocol
- AccountKeyPEM string
-
- // Set to true if agreed to the CA's
- // subscriber agreement
- Agreed bool
-
- // An optional external account to associate
- // with this ACME account
- ExternalAccount *acme.EAB
-
- // Disable all HTTP challenges
- DisableHTTPChallenge bool
-
- // Disable all TLS-ALPN challenges
- DisableTLSALPNChallenge bool
-
- // The host (ONLY the host, not port) to listen
- // on if necessary to start a listener to solve
- // an ACME challenge
- ListenHost string
-
- // The alternate port to use for the ACME HTTP
- // challenge; if non-empty, this port will be
- // used instead of HTTPChallengePort to spin up
- // a listener for the HTTP challenge
- AltHTTPPort int
-
- // The alternate port to use for the ACME
- // TLS-ALPN challenge; the system must forward
- // TLSALPNChallengePort to this port for
- // challenge to succeed
- AltTLSALPNPort int
-
- // The solver for the dns-01 challenge;
- // usually this is a DNS01Solver value
- // from this package
- DNS01Solver acmez.Solver
-
- // TrustedRoots specifies a pool of root CA
- // certificates to trust when communicating
- // over a network to a peer.
- TrustedRoots *x509.CertPool
-
- // The maximum amount of time to allow for
- // obtaining a certificate. If empty, the
- // default from the underlying ACME lib is
- // used. If set, it must not be too low so
- // as to cancel challenges too early.
- CertObtainTimeout time.Duration
-
- // Address of custom DNS resolver to be used
- // when communicating with ACME server
- Resolver string
-
- // Callback function that is called before a
- // new ACME account is registered with the CA;
- // it allows for last-second config changes
- // of the ACMEManager and the Account.
- // (TODO: this feature is still EXPERIMENTAL and subject to change)
- NewAccountFunc func(context.Context, *ACMEManager, acme.Account) (acme.Account, error)
-
- // Preferences for selecting alternate
- // certificate chains
- PreferredChains ChainPreference
-
- // Set a logger to enable logging
- Logger *zap.Logger
-
- config *Config
- httpClient *http.Client
-}
-
-// NewACMEManager constructs a valid ACMEManager based on a template
-// configuration; any empty values will be filled in by defaults in
-// DefaultACME, and if any required values are still empty, sensible
-// defaults will be used.
-//
-// Typically, you'll create the Config first with New() or NewDefault(),
-// then call NewACMEManager(), then assign the return value to the Issuers
-// field of the Config.
-func NewACMEManager(cfg *Config, template ACMEManager) *ACMEManager {
- if cfg == nil {
- panic("cannot make valid ACMEManager without an associated CertMagic config")
- }
- if template.CA == "" {
- template.CA = DefaultACME.CA
- }
- if template.TestCA == "" && template.CA == DefaultACME.CA {
- // only use the default test CA if the CA is also
- // the default CA; no point in testing against
- // Let's Encrypt's staging server if we are not
- // using their production server too
- template.TestCA = DefaultACME.TestCA
- }
- if template.Email == "" {
- template.Email = DefaultACME.Email
- }
- if template.AccountKeyPEM == "" {
- template.AccountKeyPEM = DefaultACME.AccountKeyPEM
- }
- if !template.Agreed {
- template.Agreed = DefaultACME.Agreed
- }
- if template.ExternalAccount == nil {
- template.ExternalAccount = DefaultACME.ExternalAccount
- }
- if !template.DisableHTTPChallenge {
- template.DisableHTTPChallenge = DefaultACME.DisableHTTPChallenge
- }
- if !template.DisableTLSALPNChallenge {
- template.DisableTLSALPNChallenge = DefaultACME.DisableTLSALPNChallenge
- }
- if template.ListenHost == "" {
- template.ListenHost = DefaultACME.ListenHost
- }
- if template.AltHTTPPort == 0 {
- template.AltHTTPPort = DefaultACME.AltHTTPPort
- }
- if template.AltTLSALPNPort == 0 {
- template.AltTLSALPNPort = DefaultACME.AltTLSALPNPort
- }
- if template.DNS01Solver == nil {
- template.DNS01Solver = DefaultACME.DNS01Solver
- }
- if template.TrustedRoots == nil {
- template.TrustedRoots = DefaultACME.TrustedRoots
- }
- if template.CertObtainTimeout == 0 {
- template.CertObtainTimeout = DefaultACME.CertObtainTimeout
- }
- if template.Resolver == "" {
- template.Resolver = DefaultACME.Resolver
- }
- if template.NewAccountFunc == nil {
- template.NewAccountFunc = DefaultACME.NewAccountFunc
- }
- if template.Logger == nil {
- template.Logger = DefaultACME.Logger
- }
- template.config = cfg
- return &template
-}
-
-// IssuerKey returns the unique issuer key for the
-// confgured CA endpoint.
-func (am *ACMEManager) IssuerKey() string {
- return am.issuerKey(am.CA)
-}
-
-func (*ACMEManager) issuerKey(ca string) string {
- key := ca
- if caURL, err := url.Parse(key); err == nil {
- key = caURL.Host
- if caURL.Path != "" {
- // keep the path, but make sure it's a single
- // component (i.e. no forward slashes, and for
- // good measure, no backward slashes either)
- const hyphen = "-"
- repl := strings.NewReplacer(
- "/", hyphen,
- "\\", hyphen,
- )
- path := strings.Trim(repl.Replace(caURL.Path), hyphen)
- if path != "" {
- key += hyphen + path
- }
- }
- }
- return key
-}
-
-// PreCheck performs a few simple checks before obtaining or
-// renewing a certificate with ACME, and returns whether this
-// batch is eligible for certificates if using Let's Encrypt.
-// It also ensures that an email address is available.
-func (am *ACMEManager) PreCheck(_ context.Context, names []string, interactive bool) error {
- publicCA := strings.Contains(am.CA, "api.letsencrypt.org") || strings.Contains(am.CA, "acme.zerossl.com")
- if publicCA {
- for _, name := range names {
- if !SubjectQualifiesForPublicCert(name) {
- return fmt.Errorf("subject does not qualify for a public certificate: %s", name)
- }
- }
- }
- return am.getEmail(interactive)
-}
-
-// Issue implements the Issuer interface. It obtains a certificate for the given csr using
-// the ACME configuration am.
-func (am *ACMEManager) Issue(ctx context.Context, csr *x509.CertificateRequest) (*IssuedCertificate, error) {
- if am.config == nil {
- panic("missing config pointer (must use NewACMEManager)")
- }
-
- var isRetry bool
- if attempts, ok := ctx.Value(AttemptsCtxKey).(*int); ok {
- isRetry = *attempts > 0
- }
-
- cert, usedTestCA, err := am.doIssue(ctx, csr, isRetry)
- if err != nil {
- return nil, err
- }
-
- // important to note that usedTestCA is not necessarily the same as isRetry
- // (usedTestCA can be true if the main CA and the test CA happen to be the same)
- if isRetry && usedTestCA && am.CA != am.TestCA {
- // succeeded with testing endpoint, so try again with production endpoint
- // (only if the production endpoint is different from the testing endpoint)
- // TODO: This logic is imperfect and could benefit from some refinement.
- // The two CA endpoints likely have different states, which could cause one
- // to succeed and the other to fail, even if it's not a validation error.
- // Two common cases would be:
- // 1) Rate limiter state. This is more likely to cause prod to fail while
- // staging succeeds, since prod usually has tighter rate limits. Thus, if
- // initial attempt failed in prod due to rate limit, first retry (on staging)
- // might succeed, and then trying prod again right way would probably still
- // fail; normally this would terminate retries but the right thing to do in
- // this case is to back off and retry again later. We could refine this logic
- // to stick with the production endpoint on retries unless the error changes.
- // 2) Cached authorizations state. If a domain validates successfully with
- // one endpoint, but then the other endpoint is used, it might fail, e.g. if
- // DNS was just changed or is still propagating. In this case, the second CA
- // should continue to be retried with backoff, without switching back to the
- // other endpoint. This is more likely to happen if a user is testing with
- // the staging CA as the main CA, then changes their configuration once they
- // think they are ready for the production endpoint.
- cert, _, err = am.doIssue(ctx, csr, false)
- if err != nil {
- // succeeded with test CA but failed just now with the production CA;
- // either we are observing differing internal states of each CA that will
- // work out with time, or there is a bug/misconfiguration somewhere
- // externally; it is hard to tell which! one easy cue is whether the
- // error is specifically a 429 (Too Many Requests); if so, we should
- // probably keep retrying
- var problem acme.Problem
- if errors.As(err, &problem) {
- if problem.Status == http.StatusTooManyRequests {
- // DON'T abort retries; the test CA succeeded (even
- // if it's cached, it recently succeeded!) so we just
- // need to keep trying (with backoff) until this CA's
- // rate limits expire...
- // TODO: as mentioned in comment above, we would benefit
- // by pinning the main CA at this point instead of
- // needlessly retrying with the test CA first each time
- return nil, err
- }
- }
- return nil, ErrNoRetry{err}
- }
- }
-
- return cert, err
-}
-
-func (am *ACMEManager) doIssue(ctx context.Context, csr *x509.CertificateRequest, useTestCA bool) (*IssuedCertificate, bool, error) {
- client, err := am.newACMEClientWithAccount(ctx, useTestCA, false)
- if err != nil {
- return nil, false, err
- }
- usingTestCA := client.usingTestCA()
-
- nameSet := namesFromCSR(csr)
-
- if !useTestCA {
- if err := client.throttle(ctx, nameSet); err != nil {
- return nil, usingTestCA, err
- }
- }
-
- certChains, err := client.acmeClient.ObtainCertificateUsingCSR(ctx, client.account, csr)
- if err != nil {
- return nil, usingTestCA, fmt.Errorf("%v %w (ca=%s)", nameSet, err, client.acmeClient.Directory)
- }
- if len(certChains) == 0 {
- return nil, usingTestCA, fmt.Errorf("no certificate chains")
- }
-
- preferredChain := am.selectPreferredChain(certChains)
-
- ic := &IssuedCertificate{
- Certificate: preferredChain.ChainPEM,
- Metadata: preferredChain,
- }
-
- return ic, usingTestCA, nil
-}
-
-// selectPreferredChain sorts and then filters the certificate chains to find the optimal
-// chain preferred by the client. If there's only one chain, that is returned without any
-// processing. If there are no matches, the first chain is returned.
-func (am *ACMEManager) selectPreferredChain(certChains []acme.Certificate) acme.Certificate {
- if len(certChains) == 1 {
- if am.Logger != nil && (len(am.PreferredChains.AnyCommonName) > 0 || len(am.PreferredChains.RootCommonName) > 0) {
- am.Logger.Debug("there is only one chain offered; selecting it regardless of preferences",
- zap.String("chain_url", certChains[0].URL))
- }
- return certChains[0]
- }
-
- if am.PreferredChains.Smallest != nil {
- if *am.PreferredChains.Smallest {
- sort.Slice(certChains, func(i, j int) bool {
- return len(certChains[i].ChainPEM) < len(certChains[j].ChainPEM)
- })
- } else {
- sort.Slice(certChains, func(i, j int) bool {
- return len(certChains[i].ChainPEM) > len(certChains[j].ChainPEM)
- })
- }
- }
-
- if len(am.PreferredChains.AnyCommonName) > 0 || len(am.PreferredChains.RootCommonName) > 0 {
- // in order to inspect, we need to decode their PEM contents
- decodedChains := make([][]*x509.Certificate, len(certChains))
- for i, chain := range certChains {
- certs, err := parseCertsFromPEMBundle(chain.ChainPEM)
- if err != nil {
- if am.Logger != nil {
- am.Logger.Error("unable to parse PEM certificate chain",
- zap.Int("chain", i),
- zap.Error(err))
- }
- continue
- }
- decodedChains[i] = certs
- }
-
- if len(am.PreferredChains.AnyCommonName) > 0 {
- for _, prefAnyCN := range am.PreferredChains.AnyCommonName {
- for i, chain := range decodedChains {
- for _, cert := range chain {
- if cert.Issuer.CommonName == prefAnyCN {
- if am.Logger != nil {
- am.Logger.Debug("found preferred certificate chain by issuer common name",
- zap.String("preference", prefAnyCN),
- zap.Int("chain", i))
- }
- return certChains[i]
- }
- }
- }
- }
- }
-
- if len(am.PreferredChains.RootCommonName) > 0 {
- for _, prefRootCN := range am.PreferredChains.RootCommonName {
- for i, chain := range decodedChains {
- if chain[len(chain)-1].Issuer.CommonName == prefRootCN {
- if am.Logger != nil {
- am.Logger.Debug("found preferred certificate chain by root common name",
- zap.String("preference", prefRootCN),
- zap.Int("chain", i))
- }
- return certChains[i]
- }
- }
- }
- }
-
- if am.Logger != nil {
- am.Logger.Warn("did not find chain matching preferences; using first")
- }
- }
-
- return certChains[0]
-}
-
-// Revoke implements the Revoker interface. It revokes the given certificate.
-func (am *ACMEManager) Revoke(ctx context.Context, cert CertificateResource, reason int) error {
- client, err := am.newACMEClientWithAccount(ctx, false, false)
- if err != nil {
- return err
- }
-
- certs, err := parseCertsFromPEMBundle(cert.CertificatePEM)
- if err != nil {
- return err
- }
-
- return client.revoke(ctx, certs[0], reason)
-}
-
-// ChainPreference describes the client's preferred certificate chain,
-// useful if the CA offers alternate chains. The first matching chain
-// will be selected.
-type ChainPreference struct {
- // Prefer chains with the fewest number of bytes.
- Smallest *bool
-
- // Select first chain having a root with one of
- // these common names.
- RootCommonName []string
-
- // Select first chain that has any issuer with one
- // of these common names.
- AnyCommonName []string
-}
-
-// DefaultACME specifies default settings to use for ACMEManagers.
-// Using this value is optional but can be convenient.
-var DefaultACME = ACMEManager{
- CA: LetsEncryptProductionCA,
- TestCA: LetsEncryptStagingCA,
-}
-
-// Some well-known CA endpoints available to use.
-const (
- LetsEncryptStagingCA = "https://acme-staging-v02.api.letsencrypt.org/directory"
- LetsEncryptProductionCA = "https://acme-v02.api.letsencrypt.org/directory"
- ZeroSSLProductionCA = "https://acme.zerossl.com/v2/DV90"
-)
-
-// prefixACME is the storage key prefix used for ACME-specific assets.
-const prefixACME = "acme"
-
-// Interface guards
-var (
- _ PreChecker = (*ACMEManager)(nil)
- _ Issuer = (*ACMEManager)(nil)
- _ Revoker = (*ACMEManager)(nil)
-)
diff --git a/vendor/github.com/caddyserver/certmagic/async.go b/vendor/github.com/caddyserver/certmagic/async.go
deleted file mode 100644
index 67627b25..00000000
--- a/vendor/github.com/caddyserver/certmagic/async.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package certmagic
-
-import (
- "context"
- "errors"
- "log"
- "runtime"
- "sync"
- "time"
-
- "go.uber.org/zap"
-)
-
-var jm = &jobManager{maxConcurrentJobs: 1000}
-
-type jobManager struct {
- mu sync.Mutex
- maxConcurrentJobs int
- activeWorkers int
- queue []namedJob
- names map[string]struct{}
-}
-
-type namedJob struct {
- name string
- job func() error
- logger *zap.Logger
-}
-
-// Submit enqueues the given job with the given name. If name is non-empty
-// and a job with the same name is already enqueued or running, this is a
-// no-op. If name is empty, no duplicate prevention will occur. The job
-// manager will then run this job as soon as it is able.
-func (jm *jobManager) Submit(logger *zap.Logger, name string, job func() error) {
- jm.mu.Lock()
- defer jm.mu.Unlock()
- if jm.names == nil {
- jm.names = make(map[string]struct{})
- }
- if name != "" {
- // prevent duplicate jobs
- if _, ok := jm.names[name]; ok {
- return
- }
- jm.names[name] = struct{}{}
- }
- jm.queue = append(jm.queue, namedJob{name, job, logger})
- if jm.activeWorkers < jm.maxConcurrentJobs {
- jm.activeWorkers++
- go jm.worker()
- }
-}
-
-func (jm *jobManager) worker() {
- defer func() {
- if err := recover(); err != nil {
- buf := make([]byte, stackTraceBufferSize)
- buf = buf[:runtime.Stack(buf, false)]
- log.Printf("panic: certificate worker: %v\n%s", err, buf)
- }
- }()
-
- for {
- jm.mu.Lock()
- if len(jm.queue) == 0 {
- jm.activeWorkers--
- jm.mu.Unlock()
- return
- }
- next := jm.queue[0]
- jm.queue = jm.queue[1:]
- jm.mu.Unlock()
- if err := next.job(); err != nil {
- if next.logger != nil {
- next.logger.Error("job failed", zap.Error(err))
- }
- }
- if next.name != "" {
- jm.mu.Lock()
- delete(jm.names, next.name)
- jm.mu.Unlock()
- }
- }
-}
-
-func doWithRetry(ctx context.Context, log *zap.Logger, f func(context.Context) error) error {
- var attempts int
- ctx = context.WithValue(ctx, AttemptsCtxKey, &attempts)
-
- // the initial intervalIndex is -1, signaling
- // that we should not wait for the first attempt
- start, intervalIndex := time.Now(), -1
- var err error
-
- for time.Since(start) < maxRetryDuration {
- var wait time.Duration
- if intervalIndex >= 0 {
- wait = retryIntervals[intervalIndex]
- }
- timer := time.NewTimer(wait)
- select {
- case <-ctx.Done():
- timer.Stop()
- return context.Canceled
- case <-timer.C:
- err = f(ctx)
- attempts++
- if err == nil || errors.Is(err, context.Canceled) {
- return err
- }
- var errNoRetry ErrNoRetry
- if errors.As(err, &errNoRetry) {
- return err
- }
- if intervalIndex < len(retryIntervals)-1 {
- intervalIndex++
- }
- if time.Since(start) < maxRetryDuration {
- if log != nil {
- log.Error("will retry",
- zap.Error(err),
- zap.Int("attempt", attempts),
- zap.Duration("retrying_in", retryIntervals[intervalIndex]),
- zap.Duration("elapsed", time.Since(start)),
- zap.Duration("max_duration", maxRetryDuration))
- }
- } else {
- if log != nil {
- log.Error("final attempt; giving up",
- zap.Error(err),
- zap.Int("attempt", attempts),
- zap.Duration("elapsed", time.Since(start)),
- zap.Duration("max_duration", maxRetryDuration))
- }
- return nil
- }
- }
- }
- return err
-}
-
-// ErrNoRetry is an error type which signals
-// to stop retries early.
-type ErrNoRetry struct{ Err error }
-
-// Unwrap makes it so that e wraps e.Err.
-func (e ErrNoRetry) Unwrap() error { return e.Err }
-func (e ErrNoRetry) Error() string { return e.Err.Error() }
-
-type retryStateCtxKey struct{}
-
-// AttemptsCtxKey is the context key for the value
-// that holds the attempt counter. The value counts
-// how many times the operation has been attempted.
-// A value of 0 means first attempt.
-var AttemptsCtxKey retryStateCtxKey
-
-// retryIntervals are based on the idea of exponential
-// backoff, but weighed a little more heavily to the
-// front. We figure that intermittent errors would be
-// resolved after the first retry, but any errors after
-// that would probably require at least a few minutes
-// to clear up: either for DNS to propagate, for the
-// administrator to fix their DNS or network properties,
-// or some other external factor needs to change. We
-// chose intervals that we think will be most useful
-// without introducing unnecessary delay. The last
-// interval in this list will be used until the time
-// of maxRetryDuration has elapsed.
-var retryIntervals = []time.Duration{
- 1 * time.Minute,
- 2 * time.Minute,
- 2 * time.Minute,
- 5 * time.Minute, // elapsed: 10 min
- 10 * time.Minute,
- 20 * time.Minute,
- 20 * time.Minute, // elapsed: 1 hr
- 30 * time.Minute,
- 30 * time.Minute, // elapsed: 2 hr
- 1 * time.Hour,
- 3 * time.Hour, // elapsed: 6 hr
- 6 * time.Hour, // for up to maxRetryDuration
-}
-
-// maxRetryDuration is the maximum duration to try
-// doing retries using the above intervals.
-const maxRetryDuration = 24 * time.Hour * 30
diff --git a/vendor/github.com/caddyserver/certmagic/cache.go b/vendor/github.com/caddyserver/certmagic/cache.go
deleted file mode 100644
index 673379af..00000000
--- a/vendor/github.com/caddyserver/certmagic/cache.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "fmt"
- weakrand "math/rand" // seeded elsewhere
- "strings"
- "sync"
- "time"
-
- "go.uber.org/zap"
-)
-
-// Cache is a structure that stores certificates in memory.
-// A Cache indexes certificates by name for quick access
-// during TLS handshakes, and avoids duplicating certificates
-// in memory. Generally, there should only be one per process.
-// However, that is not a strict requirement; but using more
-// than one is a code smell, and may indicate an
-// over-engineered design.
-//
-// An empty cache is INVALID and must not be used. Be sure
-// to call NewCache to get a valid value.
-//
-// These should be very long-lived values and must not be
-// copied. Before all references leave scope to be garbage
-// collected, ensure you call Stop() to stop maintenance on
-// the certificates stored in this cache and release locks.
-//
-// Caches are not usually manipulated directly; create a
-// Config value with a pointer to a Cache, and then use
-// the Config to interact with the cache. Caches are
-// agnostic of any particular storage or ACME config,
-// since each certificate may be managed and stored
-// differently.
-type Cache struct {
- // User configuration of the cache
- options CacheOptions
-
- // The cache is keyed by certificate hash
- cache map[string]Certificate
-
- // cacheIndex is a map of SAN to cache key (cert hash)
- cacheIndex map[string][]string
-
- // Protects the cache and index maps
- mu sync.RWMutex
-
- // Close this channel to cancel asset maintenance
- stopChan chan struct{}
-
- // Used to signal when stopping is completed
- doneChan chan struct{}
-
- logger *zap.Logger
-}
-
-// NewCache returns a new, valid Cache for efficiently
-// accessing certificates in memory. It also begins a
-// maintenance goroutine to tend to the certificates
-// in the cache. Call Stop() when you are done with the
-// cache so it can clean up locks and stuff.
-//
-// Most users of this package will not need to call this
-// because a default certificate cache is created for you.
-// Only advanced use cases require creating a new cache.
-//
-// This function panics if opts.GetConfigForCert is not
-// set. The reason is that a cache absolutely needs to
-// be able to get a Config with which to manage TLS
-// assets, and it is not safe to assume that the Default
-// config is always the correct one, since you have
-// created the cache yourself.
-//
-// See the godoc for Cache to use it properly. When
-// no longer needed, caches should be stopped with
-// Stop() to clean up resources even if the process
-// is being terminated, so that it can clean up
-// any locks for other processes to unblock!
-func NewCache(opts CacheOptions) *Cache {
- // assume default options if necessary
- if opts.OCSPCheckInterval <= 0 {
- opts.OCSPCheckInterval = DefaultOCSPCheckInterval
- }
- if opts.RenewCheckInterval <= 0 {
- opts.RenewCheckInterval = DefaultRenewCheckInterval
- }
- if opts.Capacity < 0 {
- opts.Capacity = 0
- }
-
- // this must be set, because we cannot not
- // safely assume that the Default Config
- // is always the correct one to use
- if opts.GetConfigForCert == nil {
- panic("cache must be initialized with a GetConfigForCert callback")
- }
-
- c := &Cache{
- options: opts,
- cache: make(map[string]Certificate),
- cacheIndex: make(map[string][]string),
- stopChan: make(chan struct{}),
- doneChan: make(chan struct{}),
- logger: opts.Logger,
- }
-
- go c.maintainAssets(0)
-
- return c
-}
-
-// Stop stops the maintenance goroutine for
-// certificates in certCache. It blocks until
-// stopping is complete. Once a cache is
-// stopped, it cannot be reused.
-func (certCache *Cache) Stop() {
- close(certCache.stopChan) // signal to stop
- <-certCache.doneChan // wait for stop to complete
-}
-
-// CacheOptions is used to configure certificate caches.
-// Once a cache has been created with certain options,
-// those settings cannot be changed.
-type CacheOptions struct {
- // REQUIRED. A function that returns a configuration
- // used for managing a certificate, or for accessing
- // that certificate's asset storage (e.g. for
- // OCSP staples, etc). The returned Config MUST
- // be associated with the same Cache as the caller.
- //
- // The reason this is a callback function, dynamically
- // returning a Config (instead of attaching a static
- // pointer to a Config on each certificate) is because
- // the config for how to manage a domain's certificate
- // might change from maintenance to maintenance. The
- // cache is so long-lived, we cannot assume that the
- // host's situation will always be the same; e.g. the
- // certificate might switch DNS providers, so the DNS
- // challenge (if used) would need to be adjusted from
- // the last time it was run ~8 weeks ago.
- GetConfigForCert ConfigGetter
-
- // How often to check certificates for renewal;
- // if unset, DefaultOCSPCheckInterval will be used.
- OCSPCheckInterval time.Duration
-
- // How often to check certificates for renewal;
- // if unset, DefaultRenewCheckInterval will be used.
- RenewCheckInterval time.Duration
-
- // Maximum number of certificates to allow in the cache.
- // If reached, certificates will be randomly evicted to
- // make room for new ones. 0 means unlimited.
- Capacity int
-
- // Set a logger to enable logging
- Logger *zap.Logger
-}
-
-// ConfigGetter is a function that returns a prepared,
-// valid config that should be used when managing the
-// given certificate or its assets.
-type ConfigGetter func(Certificate) (*Config, error)
-
-// cacheCertificate calls unsyncedCacheCertificate with a write lock.
-//
-// This function is safe for concurrent use.
-func (certCache *Cache) cacheCertificate(cert Certificate) {
- certCache.mu.Lock()
- certCache.unsyncedCacheCertificate(cert)
- certCache.mu.Unlock()
-}
-
-// unsyncedCacheCertificate adds cert to the in-memory cache unless
-// it already exists in the cache (according to cert.Hash). It
-// updates the name index.
-//
-// This function is NOT safe for concurrent use. Callers MUST acquire
-// a write lock on certCache.mu first.
-func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
- // no-op if this certificate already exists in the cache
- if _, ok := certCache.cache[cert.hash]; ok {
- if certCache.logger != nil {
- certCache.logger.Debug("certificate already cached",
- zap.Strings("subjects", cert.Names),
- zap.Time("expiration", cert.Leaf.NotAfter),
- zap.Bool("managed", cert.managed),
- zap.String("issuer_key", cert.issuerKey),
- zap.String("hash", cert.hash))
- }
- return
- }
-
- // if the cache is at capacity, make room for new cert
- cacheSize := len(certCache.cache)
- if certCache.options.Capacity > 0 && cacheSize >= certCache.options.Capacity {
- // Go maps are "nondeterministic" but not actually random,
- // so although we could just chop off the "front" of the
- // map with less code, that is a heavily skewed eviction
- // strategy; generating random numbers is cheap and
- // ensures a much better distribution.
- rnd := weakrand.Intn(cacheSize)
- i := 0
- for _, randomCert := range certCache.cache {
- if i == rnd {
- if certCache.logger != nil {
- certCache.logger.Debug("cache full; evicting random certificate",
- zap.Strings("removing_subjects", randomCert.Names),
- zap.String("removing_hash", randomCert.hash),
- zap.Strings("inserting_subjects", cert.Names),
- zap.String("inserting_hash", cert.hash))
- }
- certCache.removeCertificate(randomCert)
- break
- }
- i++
- }
- }
-
- // store the certificate
- certCache.cache[cert.hash] = cert
-
- // update the index so we can access it by name
- for _, name := range cert.Names {
- certCache.cacheIndex[name] = append(certCache.cacheIndex[name], cert.hash)
- }
-
- if certCache.logger != nil {
- certCache.logger.Debug("added certificate to cache",
- zap.Strings("subjects", cert.Names),
- zap.Time("expiration", cert.Leaf.NotAfter),
- zap.Bool("managed", cert.managed),
- zap.String("issuer_key", cert.issuerKey),
- zap.String("hash", cert.hash))
- }
-}
-
-// removeCertificate removes cert from the cache.
-//
-// This function is NOT safe for concurrent use; callers
-// MUST first acquire a write lock on certCache.mu.
-func (certCache *Cache) removeCertificate(cert Certificate) {
- // delete all mentions of this cert from the name index
- for _, name := range cert.Names {
- keyList := certCache.cacheIndex[name]
- for i := 0; i < len(keyList); i++ {
- if keyList[i] == cert.hash {
- keyList = append(keyList[:i], keyList[i+1:]...)
- i--
- }
- }
- if len(keyList) == 0 {
- delete(certCache.cacheIndex, name)
- } else {
- certCache.cacheIndex[name] = keyList
- }
- }
-
- // delete the actual cert from the cache
- delete(certCache.cache, cert.hash)
-
- if certCache.logger != nil {
- certCache.logger.Debug("removed certificate from cache",
- zap.Strings("subjects", cert.Names),
- zap.Time("expiration", cert.Leaf.NotAfter),
- zap.Bool("managed", cert.managed),
- zap.String("issuer_key", cert.issuerKey),
- zap.String("hash", cert.hash))
- }
-}
-
-// replaceCertificate atomically replaces oldCert with newCert in
-// the cache.
-//
-// This method is safe for concurrent use.
-func (certCache *Cache) replaceCertificate(oldCert, newCert Certificate) {
- certCache.mu.Lock()
- certCache.removeCertificate(oldCert)
- certCache.unsyncedCacheCertificate(newCert)
- certCache.mu.Unlock()
- if certCache.logger != nil {
- certCache.logger.Info("replaced certificate in cache",
- zap.Strings("subjects", newCert.Names),
- zap.Time("new_expiration", newCert.Leaf.NotAfter))
- }
-}
-
-func (certCache *Cache) getAllMatchingCerts(name string) []Certificate {
- certCache.mu.RLock()
- defer certCache.mu.RUnlock()
-
- allCertKeys := certCache.cacheIndex[name]
-
- certs := make([]Certificate, len(allCertKeys))
- for i := range allCertKeys {
- certs[i] = certCache.cache[allCertKeys[i]]
- }
-
- return certs
-}
-
-func (certCache *Cache) getAllCerts() []Certificate {
- certCache.mu.RLock()
- defer certCache.mu.RUnlock()
- certs := make([]Certificate, 0, len(certCache.cache))
- for _, cert := range certCache.cache {
- certs = append(certs, cert)
- }
- return certs
-}
-
-func (certCache *Cache) getConfig(cert Certificate) (*Config, error) {
- cfg, err := certCache.options.GetConfigForCert(cert)
- if err != nil {
- return nil, err
- }
- if cfg.certCache != nil && cfg.certCache != certCache {
- return nil, fmt.Errorf("config returned for certificate %v is not nil and points to different cache; got %p, expected %p (this one)",
- cert.Names, cfg.certCache, certCache)
- }
- return cfg, nil
-}
-
-// AllMatchingCertificates returns a list of all certificates that could
-// be used to serve the given SNI name, including exact SAN matches and
-// wildcard matches.
-func (certCache *Cache) AllMatchingCertificates(name string) []Certificate {
- // get exact matches first
- certs := certCache.getAllMatchingCerts(name)
-
- // then look for wildcard matches by replacing each
- // label of the domain name with wildcards
- labels := strings.Split(name, ".")
- for i := range labels {
- labels[i] = "*"
- candidate := strings.Join(labels, ".")
- certs = append(certs, certCache.getAllMatchingCerts(candidate)...)
- }
-
- return certs
-}
-
-var (
- defaultCache *Cache
- defaultCacheMu sync.Mutex
-)
diff --git a/vendor/github.com/caddyserver/certmagic/certificates.go b/vendor/github.com/caddyserver/certmagic/certificates.go
deleted file mode 100644
index 067bfc50..00000000
--- a/vendor/github.com/caddyserver/certmagic/certificates.go
+++ /dev/null
@@ -1,414 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "net"
- "strings"
- "time"
-
- "go.uber.org/zap"
- "golang.org/x/crypto/ocsp"
-)
-
-// Certificate is a tls.Certificate with associated metadata tacked on.
-// Even if the metadata can be obtained by parsing the certificate,
-// we are more efficient by extracting the metadata onto this struct,
-// but at the cost of slightly higher memory use.
-type Certificate struct {
- tls.Certificate
-
- // Names is the list of subject names this
- // certificate is signed for.
- Names []string
-
- // Optional; user-provided, and arbitrary.
- Tags []string
-
- // OCSP contains the certificate's parsed OCSP response.
- ocsp *ocsp.Response
-
- // The hex-encoded hash of this cert's chain's bytes.
- hash string
-
- // Whether this certificate is under our management.
- managed bool
-
- // The unique string identifying the issuer of this certificate.
- issuerKey string
-}
-
-// NeedsRenewal returns true if the certificate is
-// expiring soon (according to cfg) or has expired.
-func (cert Certificate) NeedsRenewal(cfg *Config) bool {
- return currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio)
-}
-
-// Expired returns true if the certificate has expired.
-func (cert Certificate) Expired() bool {
- if cert.Leaf == nil {
- // ideally cert.Leaf would never be nil, but this can happen for
- // "synthetic" certs like those made to solve the TLS-ALPN challenge
- // which adds a special cert directly to the cache, since
- // tls.X509KeyPair() discards the leaf; oh well
- return false
- }
- return time.Now().After(cert.Leaf.NotAfter)
-}
-
-// currentlyInRenewalWindow returns true if the current time is
-// within the renewal window, according to the given start/end
-// dates and the ratio of the renewal window. If true is returned,
-// the certificate being considered is due for renewal.
-func currentlyInRenewalWindow(notBefore, notAfter time.Time, renewalWindowRatio float64) bool {
- if notAfter.IsZero() {
- return false
- }
- lifetime := notAfter.Sub(notBefore)
- if renewalWindowRatio == 0 {
- renewalWindowRatio = DefaultRenewalWindowRatio
- }
- renewalWindow := time.Duration(float64(lifetime) * renewalWindowRatio)
- renewalWindowStart := notAfter.Add(-renewalWindow)
- return time.Now().After(renewalWindowStart)
-}
-
-// HasTag returns true if cert.Tags has tag.
-func (cert Certificate) HasTag(tag string) bool {
- for _, t := range cert.Tags {
- if t == tag {
- return true
- }
- }
- return false
-}
-
-// CacheManagedCertificate loads the certificate for domain into the
-// cache, from the TLS storage for managed certificates. It returns a
-// copy of the Certificate that was put into the cache.
-//
-// This is a lower-level method; normally you'll call Manage() instead.
-//
-// This method is safe for concurrent use.
-func (cfg *Config) CacheManagedCertificate(domain string) (Certificate, error) {
- cert, err := cfg.loadManagedCertificate(domain)
- if err != nil {
- return cert, err
- }
- cfg.certCache.cacheCertificate(cert)
- cfg.emit("cached_managed_cert", cert.Names)
- return cert, nil
-}
-
-// loadManagedCertificate loads the managed certificate for domain from any
-// of the configured issuers' storage locations, but it does not add it to
-// the cache. It just loads from storage and returns it.
-func (cfg *Config) loadManagedCertificate(domain string) (Certificate, error) {
- certRes, err := cfg.loadCertResourceAnyIssuer(domain)
- if err != nil {
- return Certificate{}, err
- }
- cert, err := cfg.makeCertificateWithOCSP(certRes.CertificatePEM, certRes.PrivateKeyPEM)
- if err != nil {
- return cert, err
- }
- cert.managed = true
- cert.issuerKey = certRes.issuerKey
- return cert, nil
-}
-
-// CacheUnmanagedCertificatePEMFile loads a certificate for host using certFile
-// and keyFile, which must be in PEM format. It stores the certificate in
-// the in-memory cache.
-//
-// This method is safe for concurrent use.
-func (cfg *Config) CacheUnmanagedCertificatePEMFile(certFile, keyFile string, tags []string) error {
- cert, err := cfg.makeCertificateFromDiskWithOCSP(cfg.Storage, certFile, keyFile)
- if err != nil {
- return err
- }
- cert.Tags = tags
- cfg.certCache.cacheCertificate(cert)
- cfg.emit("cached_unmanaged_cert", cert.Names)
- return nil
-}
-
-// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache.
-// It staples OCSP if possible.
-//
-// This method is safe for concurrent use.
-func (cfg *Config) CacheUnmanagedTLSCertificate(tlsCert tls.Certificate, tags []string) error {
- var cert Certificate
- err := fillCertFromLeaf(&cert, tlsCert)
- if err != nil {
- return err
- }
- _, err = stapleOCSP(cfg.OCSP, cfg.Storage, &cert, nil)
- if err != nil && cfg.Logger != nil {
- cfg.Logger.Warn("stapling OCSP", zap.Error(err))
- }
- cfg.emit("cached_unmanaged_cert", cert.Names)
- cert.Tags = tags
- cfg.certCache.cacheCertificate(cert)
- return nil
-}
-
-// CacheUnmanagedCertificatePEMBytes makes a certificate out of the PEM bytes
-// of the certificate and key, then caches it in memory.
-//
-// This method is safe for concurrent use.
-func (cfg *Config) CacheUnmanagedCertificatePEMBytes(certBytes, keyBytes []byte, tags []string) error {
- cert, err := cfg.makeCertificateWithOCSP(certBytes, keyBytes)
- if err != nil {
- return err
- }
- cert.Tags = tags
- cfg.certCache.cacheCertificate(cert)
- cfg.emit("cached_unmanaged_cert", cert.Names)
- return nil
-}
-
-// makeCertificateFromDiskWithOCSP makes a Certificate by loading the
-// certificate and key files. It fills out all the fields in
-// the certificate except for the Managed and OnDemand flags.
-// (It is up to the caller to set those.) It staples OCSP.
-func (cfg Config) makeCertificateFromDiskWithOCSP(storage Storage, certFile, keyFile string) (Certificate, error) {
- certPEMBlock, err := ioutil.ReadFile(certFile)
- if err != nil {
- return Certificate{}, err
- }
- keyPEMBlock, err := ioutil.ReadFile(keyFile)
- if err != nil {
- return Certificate{}, err
- }
- return cfg.makeCertificateWithOCSP(certPEMBlock, keyPEMBlock)
-}
-
-// makeCertificateWithOCSP is the same as makeCertificate except that it also
-// staples OCSP to the certificate.
-func (cfg Config) makeCertificateWithOCSP(certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
- cert, err := makeCertificate(certPEMBlock, keyPEMBlock)
- if err != nil {
- return cert, err
- }
- _, err = stapleOCSP(cfg.OCSP, cfg.Storage, &cert, certPEMBlock)
- if err != nil && cfg.Logger != nil {
- cfg.Logger.Warn("stapling OCSP", zap.Error(err))
- }
- return cert, nil
-}
-
-// makeCertificate turns a certificate PEM bundle and a key PEM block into
-// a Certificate with necessary metadata from parsing its bytes filled into
-// its struct fields for convenience (except for the OnDemand and Managed
-// flags; it is up to the caller to set those properties!). This function
-// does NOT staple OCSP.
-func makeCertificate(certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
- var cert Certificate
-
- // Convert to a tls.Certificate
- tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
- if err != nil {
- return cert, err
- }
-
- // Extract necessary metadata
- err = fillCertFromLeaf(&cert, tlsCert)
- if err != nil {
- return cert, err
- }
-
- return cert, nil
-}
-
-// fillCertFromLeaf populates cert from tlsCert. If it succeeds, it
-// guarantees that cert.Leaf is non-nil.
-func fillCertFromLeaf(cert *Certificate, tlsCert tls.Certificate) error {
- if len(tlsCert.Certificate) == 0 {
- return fmt.Errorf("certificate is empty")
- }
- cert.Certificate = tlsCert
-
- // the leaf cert should be the one for the site; we must set
- // the tls.Certificate.Leaf field so that TLS handshakes are
- // more efficient
- leaf, err := x509.ParseCertificate(tlsCert.Certificate[0])
- if err != nil {
- return err
- }
- cert.Certificate.Leaf = leaf
-
- // for convenience, we do want to assemble all the
- // subjects on the certificate into one list
- if leaf.Subject.CommonName != "" { // TODO: CommonName is deprecated
- cert.Names = []string{strings.ToLower(leaf.Subject.CommonName)}
- }
- for _, name := range leaf.DNSNames {
- if name != leaf.Subject.CommonName { // TODO: CommonName is deprecated
- cert.Names = append(cert.Names, strings.ToLower(name))
- }
- }
- for _, ip := range leaf.IPAddresses {
- if ipStr := ip.String(); ipStr != leaf.Subject.CommonName { // TODO: CommonName is deprecated
- cert.Names = append(cert.Names, strings.ToLower(ipStr))
- }
- }
- for _, email := range leaf.EmailAddresses {
- if email != leaf.Subject.CommonName { // TODO: CommonName is deprecated
- cert.Names = append(cert.Names, strings.ToLower(email))
- }
- }
- for _, u := range leaf.URIs {
- if u.String() != leaf.Subject.CommonName { // TODO: CommonName is deprecated
- cert.Names = append(cert.Names, u.String())
- }
- }
- if len(cert.Names) == 0 {
- return fmt.Errorf("certificate has no names")
- }
-
- cert.hash = hashCertificateChain(cert.Certificate.Certificate)
-
- return nil
-}
-
-// managedCertInStorageExpiresSoon returns true if cert (being a
-// managed certificate) is expiring within RenewDurationBefore.
-// It returns false if there was an error checking the expiration
-// of the certificate as found in storage, or if the certificate
-// in storage is NOT expiring soon. A certificate that is expiring
-// soon in our cache but is not expiring soon in storage probably
-// means that another instance renewed the certificate in the
-// meantime, and it would be a good idea to simply load the cert
-// into our cache rather than repeating the renewal process again.
-func (cfg *Config) managedCertInStorageExpiresSoon(cert Certificate) (bool, error) {
- certRes, err := cfg.loadCertResourceAnyIssuer(cert.Names[0])
- if err != nil {
- return false, err
- }
- _, needsRenew := cfg.managedCertNeedsRenewal(certRes)
- return needsRenew, nil
-}
-
-// reloadManagedCertificate reloads the certificate corresponding to the name(s)
-// on oldCert into the cache, from storage. This also replaces the old certificate
-// with the new one, so that all configurations that used the old cert now point
-// to the new cert. It assumes that the new certificate for oldCert.Names[0] is
-// already in storage.
-func (cfg *Config) reloadManagedCertificate(oldCert Certificate) error {
- if cfg.Logger != nil {
- cfg.Logger.Info("reloading managed certificate", zap.Strings("identifiers", oldCert.Names))
- }
- newCert, err := cfg.loadManagedCertificate(oldCert.Names[0])
- if err != nil {
- return fmt.Errorf("loading managed certificate for %v from storage: %v", oldCert.Names, err)
- }
- cfg.certCache.replaceCertificate(oldCert, newCert)
- return nil
-}
-
-// SubjectQualifiesForCert returns true if subj is a name which,
-// as a quick sanity check, looks like it could be the subject
-// of a certificate. Requirements are:
-// - must not be empty
-// - must not start or end with a dot (RFC 1034)
-// - must not contain common accidental special characters
-func SubjectQualifiesForCert(subj string) bool {
- // must not be empty
- return strings.TrimSpace(subj) != "" &&
-
- // must not start or end with a dot
- !strings.HasPrefix(subj, ".") &&
- !strings.HasSuffix(subj, ".") &&
-
- // if it has a wildcard, must be a left-most label (or exactly "*"
- // which won't be trusted by browsers but still technically works)
- (!strings.Contains(subj, "*") || strings.HasPrefix(subj, "*.") || subj == "*") &&
-
- // must not contain other common special characters
- !strings.ContainsAny(subj, "()[]{}<> \t\n\"\\!@#$%^&|;'+=")
-}
-
-// SubjectQualifiesForPublicCert returns true if the subject
-// name appears eligible for automagic TLS with a public
-// CA such as Let's Encrypt. For example: localhost and IP
-// addresses are not eligible because we cannot obtain certs
-// for those names with a public CA. Wildcard names are
-// allowed, as long as they conform to CABF requirements (only
-// one wildcard label, and it must be the left-most label).
-func SubjectQualifiesForPublicCert(subj string) bool {
- // must at least qualify for a certificate
- return SubjectQualifiesForCert(subj) &&
-
- // localhost, .localhost TLD, and .local TLD are ineligible
- !SubjectIsInternal(subj) &&
-
- // cannot be an IP address (as of yet), see
- // https://community.letsencrypt.org/t/certificate-for-static-ip/84/2?u=mholt
- !SubjectIsIP(subj) &&
-
- // only one wildcard label allowed, and it must be left-most, with 3+ labels
- (!strings.Contains(subj, "*") ||
- (strings.Count(subj, "*") == 1 &&
- strings.Count(subj, ".") > 1 &&
- len(subj) > 2 &&
- strings.HasPrefix(subj, "*.")))
-}
-
-// SubjectIsIP returns true if subj is an IP address.
-func SubjectIsIP(subj string) bool {
- return net.ParseIP(subj) != nil
-}
-
-// SubjectIsInternal returns true if subj is an internal-facing
-// hostname or address.
-func SubjectIsInternal(subj string) bool {
- return subj == "localhost" ||
- strings.HasSuffix(subj, ".localhost") ||
- strings.HasSuffix(subj, ".local")
-}
-
-// MatchWildcard returns true if subject (a candidate DNS name)
-// matches wildcard (a reference DNS name), mostly according to
-// RFC 6125-compliant wildcard rules. See also RFC 2818 which
-// states that IP addresses must match exactly, but this function
-// does not attempt to distinguish IP addresses from internal or
-// external DNS names that happen to look like IP addresses.
-// It uses DNS wildcard matching logic.
-// https://tools.ietf.org/html/rfc2818#section-3.1
-func MatchWildcard(subject, wildcard string) bool {
- if subject == wildcard {
- return true
- }
- if !strings.Contains(wildcard, "*") {
- return false
- }
- labels := strings.Split(subject, ".")
- for i := range labels {
- if labels[i] == "" {
- continue // invalid label
- }
- labels[i] = "*"
- candidate := strings.Join(labels, ".")
- if candidate == wildcard {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/caddyserver/certmagic/certmagic.go b/vendor/github.com/caddyserver/certmagic/certmagic.go
deleted file mode 100644
index bb33b90c..00000000
--- a/vendor/github.com/caddyserver/certmagic/certmagic.go
+++ /dev/null
@@ -1,490 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package certmagic automates the obtaining and renewal of TLS certificates,
-// including TLS & HTTPS best practices such as robust OCSP stapling, caching,
-// HTTP->HTTPS redirects, and more.
-//
-// Its high-level API serves your HTTP handlers over HTTPS if you simply give
-// the domain name(s) and the http.Handler; CertMagic will create and run
-// the HTTPS server for you, fully managing certificates during the lifetime
-// of the server. Similarly, it can be used to start TLS listeners or return
-// a ready-to-use tls.Config -- whatever layer you need TLS for, CertMagic
-// makes it easy. See the HTTPS, Listen, and TLS functions for that.
-//
-// If you need more control, create a Cache using NewCache() and then make
-// a Config using New(). You can then call Manage() on the config. But if
-// you use this lower-level API, you'll have to be sure to solve the HTTP
-// and TLS-ALPN challenges yourself (unless you disabled them or use the
-// DNS challenge) by using the provided Config.GetCertificate function
-// in your tls.Config and/or Config.HTTPChallangeHandler in your HTTP
-// handler.
-//
-// See the package's README for more instruction.
-package certmagic
-
-import (
- "context"
- "crypto"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "log"
- "net"
- "net/http"
- "sort"
- "strings"
- "sync"
- "time"
-)
-
-// HTTPS serves mux for all domainNames using the HTTP
-// and HTTPS ports, redirecting all HTTP requests to HTTPS.
-// It uses the Default config.
-//
-// This high-level convenience function is opinionated and
-// applies sane defaults for production use, including
-// timeouts for HTTP requests and responses. To allow very
-// long-lived connections, you should make your own
-// http.Server values and use this package's Listen(), TLS(),
-// or Config.TLSConfig() functions to customize to your needs.
-// For example, servers which need to support large uploads or
-// downloads with slow clients may need to use longer timeouts,
-// thus this function is not suitable.
-//
-// Calling this function signifies your acceptance to
-// the CA's Subscriber Agreement and/or Terms of Service.
-func HTTPS(domainNames []string, mux http.Handler) error {
- if mux == nil {
- mux = http.DefaultServeMux
- }
-
- DefaultACME.Agreed = true
- cfg := NewDefault()
-
- err := cfg.ManageSync(domainNames)
- if err != nil {
- return err
- }
-
- httpWg.Add(1)
- defer httpWg.Done()
-
- // if we haven't made listeners yet, do so now,
- // and clean them up when all servers are done
- lnMu.Lock()
- if httpLn == nil && httpsLn == nil {
- httpLn, err = net.Listen("tcp", fmt.Sprintf(":%d", HTTPPort))
- if err != nil {
- lnMu.Unlock()
- return err
- }
-
- tlsConfig := cfg.TLSConfig()
- tlsConfig.NextProtos = append([]string{"h2", "http/1.1"}, tlsConfig.NextProtos...)
-
- httpsLn, err = tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), tlsConfig)
- if err != nil {
- httpLn.Close()
- httpLn = nil
- lnMu.Unlock()
- return err
- }
-
- go func() {
- httpWg.Wait()
- lnMu.Lock()
- httpLn.Close()
- httpsLn.Close()
- lnMu.Unlock()
- }()
- }
- hln, hsln := httpLn, httpsLn
- lnMu.Unlock()
-
- // create HTTP/S servers that are configured
- // with sane default timeouts and appropriate
- // handlers (the HTTP server solves the HTTP
- // challenge and issues redirects to HTTPS,
- // while the HTTPS server simply serves the
- // user's handler)
- httpServer := &http.Server{
- ReadHeaderTimeout: 5 * time.Second,
- ReadTimeout: 5 * time.Second,
- WriteTimeout: 5 * time.Second,
- IdleTimeout: 5 * time.Second,
- }
- if len(cfg.Issuers) > 0 {
- if am, ok := cfg.Issuers[0].(*ACMEManager); ok {
- httpServer.Handler = am.HTTPChallengeHandler(http.HandlerFunc(httpRedirectHandler))
- }
- }
- httpsServer := &http.Server{
- ReadHeaderTimeout: 10 * time.Second,
- ReadTimeout: 30 * time.Second,
- WriteTimeout: 2 * time.Minute,
- IdleTimeout: 5 * time.Minute,
- Handler: mux,
- }
-
- log.Printf("%v Serving HTTP->HTTPS on %s and %s",
- domainNames, hln.Addr(), hsln.Addr())
-
- go httpServer.Serve(hln)
- return httpsServer.Serve(hsln)
-}
-
-func httpRedirectHandler(w http.ResponseWriter, r *http.Request) {
- toURL := "https://"
-
- // since we redirect to the standard HTTPS port, we
- // do not need to include it in the redirect URL
- requestHost := hostOnly(r.Host)
-
- toURL += requestHost
- toURL += r.URL.RequestURI()
-
- // get rid of this disgusting unencrypted HTTP connection 🤢
- w.Header().Set("Connection", "close")
-
- http.Redirect(w, r, toURL, http.StatusMovedPermanently)
-}
-
-// TLS enables management of certificates for domainNames
-// and returns a valid tls.Config. It uses the Default
-// config.
-//
-// Because this is a convenience function that returns
-// only a tls.Config, it does not assume HTTP is being
-// served on the HTTP port, so the HTTP challenge is
-// disabled (no HTTPChallengeHandler is necessary). The
-// package variable Default is modified so that the
-// HTTP challenge is disabled.
-//
-// Calling this function signifies your acceptance to
-// the CA's Subscriber Agreement and/or Terms of Service.
-func TLS(domainNames []string) (*tls.Config, error) {
- DefaultACME.Agreed = true
- DefaultACME.DisableHTTPChallenge = true
- cfg := NewDefault()
- return cfg.TLSConfig(), cfg.ManageSync(domainNames)
-}
-
-// Listen manages certificates for domainName and returns a
-// TLS listener. It uses the Default config.
-//
-// Because this convenience function returns only a TLS-enabled
-// listener and does not presume HTTP is also being served,
-// the HTTP challenge will be disabled. The package variable
-// Default is modified so that the HTTP challenge is disabled.
-//
-// Calling this function signifies your acceptance to
-// the CA's Subscriber Agreement and/or Terms of Service.
-func Listen(domainNames []string) (net.Listener, error) {
- DefaultACME.Agreed = true
- DefaultACME.DisableHTTPChallenge = true
- cfg := NewDefault()
- err := cfg.ManageSync(domainNames)
- if err != nil {
- return nil, err
- }
- return tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), cfg.TLSConfig())
-}
-
-// ManageSync obtains certificates for domainNames and keeps them
-// renewed using the Default config.
-//
-// This is a slightly lower-level function; you will need to
-// wire up support for the ACME challenges yourself. You can
-// obtain a Config to help you do that by calling NewDefault().
-//
-// You will need to ensure that you use a TLS config that gets
-// certificates from this Config and that the HTTP and TLS-ALPN
-// challenges can be solved. The easiest way to do this is to
-// use NewDefault().TLSConfig() as your TLS config and to wrap
-// your HTTP handler with NewDefault().HTTPChallengeHandler().
-// If you don't have an HTTP server, you will need to disable
-// the HTTP challenge.
-//
-// If you already have a TLS config you want to use, you can
-// simply set its GetCertificate field to
-// NewDefault().GetCertificate.
-//
-// Calling this function signifies your acceptance to
-// the CA's Subscriber Agreement and/or Terms of Service.
-func ManageSync(domainNames []string) error {
- DefaultACME.Agreed = true
- return NewDefault().ManageSync(domainNames)
-}
-
-// ManageAsync is the same as ManageSync, except that
-// certificates are managed asynchronously. This means
-// that the function will return before certificates
-// are ready, and errors that occur during certificate
-// obtain or renew operations are only logged. It is
-// vital that you monitor the logs if using this method,
-// which is only recommended for automated/non-interactive
-// environments.
-func ManageAsync(ctx context.Context, domainNames []string) error {
- DefaultACME.Agreed = true
- return NewDefault().ManageAsync(ctx, domainNames)
-}
-
-// OnDemandConfig configures on-demand TLS (certificate
-// operations as-needed, like during TLS handshakes,
-// rather than immediately).
-//
-// When this package's high-level convenience functions
-// are used (HTTPS, Manage, etc., where the Default
-// config is used as a template), this struct regulates
-// certificate operations using an implicit whitelist
-// containing the names passed into those functions if
-// no DecisionFunc is set. This ensures some degree of
-// control by default to avoid certificate operations for
-// aribtrary domain names. To override this whitelist,
-// manually specify a DecisionFunc. To impose rate limits,
-// specify your own DecisionFunc.
-type OnDemandConfig struct {
- // If set, this function will be called to determine
- // whether a certificate can be obtained or renewed
- // for the given name. If an error is returned, the
- // request will be denied.
- DecisionFunc func(name string) error
-
- // List of whitelisted hostnames (SNI values) for
- // deferred (on-demand) obtaining of certificates.
- // Used only by higher-level functions in this
- // package to persist the list of hostnames that
- // the config is supposed to manage. This is done
- // because it seems reasonable that if you say
- // "Manage [domain names...]", then only those
- // domain names should be able to have certs;
- // we don't NEED this feature, but it makes sense
- // for higher-level convenience functions to be
- // able to retain their convenience (alternative
- // is: the user manually creates a DecisionFunc
- // that whitelists the same names it already
- // passed into Manage) and without letting clients
- // have their run of any domain names they want.
- // Only enforced if len > 0.
- hostWhitelist []string
-}
-
-func (o *OnDemandConfig) whitelistContains(name string) bool {
- for _, n := range o.hostWhitelist {
- if strings.EqualFold(n, name) {
- return true
- }
- }
- return false
-}
-
-// isLoopback returns true if the hostname of addr looks
-// explicitly like a common local hostname. addr must only
-// be a host or a host:port combination.
-func isLoopback(addr string) bool {
- host := hostOnly(addr)
- return host == "localhost" ||
- strings.Trim(host, "[]") == "::1" ||
- strings.HasPrefix(host, "127.")
-}
-
-// isInternal returns true if the IP of addr
-// belongs to a private network IP range. addr
-// must only be an IP or an IP:port combination.
-// Loopback addresses are considered false.
-func isInternal(addr string) bool {
- privateNetworks := []string{
- "10.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "fc00::/7",
- }
- host := hostOnly(addr)
- ip := net.ParseIP(host)
- if ip == nil {
- return false
- }
- for _, privateNetwork := range privateNetworks {
- _, ipnet, _ := net.ParseCIDR(privateNetwork)
- if ipnet.Contains(ip) {
- return true
- }
- }
- return false
-}
-
-// hostOnly returns only the host portion of hostport.
-// If there is no port or if there is an error splitting
-// the port off, the whole input string is returned.
-func hostOnly(hostport string) string {
- host, _, err := net.SplitHostPort(hostport)
- if err != nil {
- return hostport // OK; probably had no port to begin with
- }
- return host
-}
-
-// PreChecker is an interface that can be optionally implemented by
-// Issuers. Pre-checks are performed before each call (or batch of
-// identical calls) to Issue(), giving the issuer the option to ensure
-// it has all the necessary information/state.
-type PreChecker interface {
- PreCheck(ctx context.Context, names []string, interactive bool) error
-}
-
-// Issuer is a type that can issue certificates.
-type Issuer interface {
- // Issue obtains a certificate for the given CSR. It
- // must honor context cancellation if it is long-running.
- // It can also use the context to find out if the current
- // call is part of a retry, via AttemptsCtxKey.
- Issue(ctx context.Context, request *x509.CertificateRequest) (*IssuedCertificate, error)
-
- // IssuerKey must return a string that uniquely identifies
- // this particular configuration of the Issuer such that
- // any certificates obtained by this Issuer will be treated
- // as identical if they have the same SANs.
- //
- // Certificates obtained from Issuers with the same IssuerKey
- // will overwrite others with the same SANs. For example, an
- // Issuer might be able to obtain certificates from different
- // CAs, say A and B. It is likely that the CAs have different
- // use cases and purposes (e.g. testing and production), so
- // their respective certificates should not overwrite eaach
- // other.
- IssuerKey() string
-}
-
-// Revoker can revoke certificates. Reason codes are defined
-// by RFC 5280 §5.3.1: https://tools.ietf.org/html/rfc5280#section-5.3.1
-// and are available as constants in our ACME library.
-type Revoker interface {
- Revoke(ctx context.Context, cert CertificateResource, reason int) error
-}
-
-// KeyGenerator can generate a private key.
-type KeyGenerator interface {
- // GenerateKey generates a private key. The returned
- // PrivateKey must be able to expose its associated
- // public key.
- GenerateKey() (crypto.PrivateKey, error)
-}
-
-// IssuedCertificate represents a certificate that was just issued.
-type IssuedCertificate struct {
- // The PEM-encoding of DER-encoded ASN.1 data.
- Certificate []byte
-
- // Any extra information to serialize alongside the
- // certificate in storage.
- Metadata interface{}
-}
-
-// CertificateResource associates a certificate with its private
-// key and other useful information, for use in maintaining the
-// certificate.
-type CertificateResource struct {
- // The list of names on the certificate;
- // for convenience only.
- SANs []string `json:"sans,omitempty"`
-
- // The PEM-encoding of DER-encoded ASN.1 data
- // for the cert or chain.
- CertificatePEM []byte `json:"-"`
-
- // The PEM-encoding of the certificate's private key.
- PrivateKeyPEM []byte `json:"-"`
-
- // Any extra information associated with the certificate,
- // usually provided by the issuer implementation.
- IssuerData interface{} `json:"issuer_data,omitempty"`
-
- // The unique string identifying the issuer of the
- // certificate; internally useful for storage access.
- issuerKey string `json:"-"`
-}
-
-// NamesKey returns the list of SANs as a single string,
-// truncated to some ridiculously long size limit. It
-// can act as a key for the set of names on the resource.
-func (cr *CertificateResource) NamesKey() string {
- sort.Strings(cr.SANs)
- result := strings.Join(cr.SANs, ",")
- if len(result) > 1024 {
- const trunc = "_trunc"
- result = result[:1024-len(trunc)] + trunc
- }
- return result
-}
-
-// Default contains the package defaults for the
-// various Config fields. This is used as a template
-// when creating your own Configs with New() or
-// NewDefault(), and it is also used as the Config
-// by all the high-level functions in this package
-// that abstract away most configuration (HTTPS(),
-// TLS(), Listen(), etc).
-//
-// The fields of this value will be used for Config
-// fields which are unset. Feel free to modify these
-// defaults, but do not use this Config by itself: it
-// is only a template. Valid configurations can be
-// obtained by calling New() (if you have your own
-// certificate cache) or NewDefault() (if you only
-// need a single config and want to use the default
-// cache).
-//
-// Even if the Issuers or Storage fields are not set,
-// defaults will be applied in the call to New().
-var Default = Config{
- RenewalWindowRatio: DefaultRenewalWindowRatio,
- Storage: defaultFileStorage,
- KeySource: DefaultKeyGenerator,
-}
-
-const (
- // HTTPChallengePort is the officially-designated port for
- // the HTTP challenge according to the ACME spec.
- HTTPChallengePort = 80
-
- // TLSALPNChallengePort is the officially-designated port for
- // the TLS-ALPN challenge according to the ACME spec.
- TLSALPNChallengePort = 443
-)
-
-// Port variables must remain their defaults unless you
-// forward packets from the defaults to whatever these
-// are set to; otherwise ACME challenges will fail.
-var (
- // HTTPPort is the port on which to serve HTTP
- // and, as such, the HTTP challenge (unless
- // Default.AltHTTPPort is set).
- HTTPPort = 80
-
- // HTTPSPort is the port on which to serve HTTPS
- // and, as such, the TLS-ALPN challenge
- // (unless Default.AltTLSALPNPort is set).
- HTTPSPort = 443
-)
-
-// Variables for conveniently serving HTTPS.
-var (
- httpLn, httpsLn net.Listener
- lnMu sync.Mutex
- httpWg sync.WaitGroup
-)
-
-// Maximum size for the stack trace when recovering from panics.
-const stackTraceBufferSize = 1024 * 128
diff --git a/vendor/github.com/caddyserver/certmagic/config.go b/vendor/github.com/caddyserver/certmagic/config.go
deleted file mode 100644
index d408418f..00000000
--- a/vendor/github.com/caddyserver/certmagic/config.go
+++ /dev/null
@@ -1,1086 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "bytes"
- "context"
- "crypto"
- "crypto/rand"
- "crypto/tls"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/asn1"
- "encoding/json"
- "errors"
- "fmt"
- weakrand "math/rand"
- "net"
- "net/url"
- "strings"
- "time"
-
- "github.com/mholt/acmez"
- "github.com/mholt/acmez/acme"
- "go.uber.org/zap"
- "golang.org/x/net/idna"
-)
-
-// Config configures a certificate manager instance.
-// An empty Config is not valid: use New() to obtain
-// a valid Config.
-type Config struct {
- // How much of a certificate's lifetime becomes the
- // renewal window, which is the span of time at the
- // end of the certificate's validity period in which
- // it should be renewed; for most certificates, the
- // global default is good, but for extremely short-
- // lived certs, you may want to raise this to ~0.5.
- RenewalWindowRatio float64
-
- // An optional event callback clients can set
- // to subscribe to certain things happening
- // internally by this config; invocations are
- // synchronous, so make them return quickly!
- OnEvent func(event string, data interface{})
-
- // DefaultServerName specifies a server name
- // to use when choosing a certificate if the
- // ClientHello's ServerName field is empty.
- DefaultServerName string
-
- // The state needed to operate on-demand TLS;
- // if non-nil, on-demand TLS is enabled and
- // certificate operations are deferred to
- // TLS handshakes (or as-needed).
- // TODO: Can we call this feature "Reactive/Lazy/Passive TLS" instead?
- OnDemand *OnDemandConfig
-
- // Adds the must staple TLS extension to the CSR.
- MustStaple bool
-
- // The source for getting new certificates; the
- // default Issuer is ACMEManager. If multiple
- // issuers are specified, they will be tried in
- // turn until one succeeds.
- Issuers []Issuer
-
- // The source of new private keys for certificates;
- // the default KeySource is StandardKeyGenerator.
- KeySource KeyGenerator
-
- // CertSelection chooses one of the certificates
- // with which the ClientHello will be completed;
- // if not set, DefaultCertificateSelector will
- // be used.
- CertSelection CertificateSelector
-
- // OCSP configures how OCSP is handled. By default,
- // OCSP responses are fetched for every certificate
- // with a responder URL, and cached on disk. Changing
- // these defaults is STRONGLY discouraged unless you
- // have a compelling reason to put clients at greater
- // risk and reduce their privacy.
- OCSP OCSPConfig
-
- // The storage to access when storing or loading
- // TLS assets. Default is the local file system.
- Storage Storage
-
- // Set a logger to enable logging.
- Logger *zap.Logger
-
- // required pointer to the in-memory cert cache
- certCache *Cache
-}
-
-// NewDefault makes a valid config based on the package
-// Default config. Most users will call this function
-// instead of New() since most use cases require only a
-// single config for any and all certificates.
-//
-// If your requirements are more advanced (for example,
-// multiple configs depending on the certificate), then use
-// New() instead. (You will need to make your own Cache
-// first.) If you only need a single Config to manage your
-// certs (even if that config changes, as long as it is the
-// only one), customize the Default package variable before
-// calling NewDefault().
-//
-// All calls to NewDefault() will return configs that use the
-// same, default certificate cache. All configs returned
-// by NewDefault() are based on the values of the fields of
-// Default at the time it is called.
-//
-// This is the only way to get a config that uses the
-// default certificate cache.
-func NewDefault() *Config {
- defaultCacheMu.Lock()
- if defaultCache == nil {
- defaultCache = NewCache(CacheOptions{
- // the cache will likely need to renew certificates,
- // so it will need to know how to do that, which
- // depends on the certificate being managed and which
- // can change during the lifetime of the cache; this
- // callback makes it possible to get the latest and
- // correct config with which to manage the cert,
- // but if the user does not provide one, we can only
- // assume that we are to use the default config
- GetConfigForCert: func(Certificate) (*Config, error) {
- return NewDefault(), nil
- },
- })
- }
- certCache := defaultCache
- defaultCacheMu.Unlock()
-
- return newWithCache(certCache, Default)
-}
-
-// New makes a new, valid config based on cfg and
-// uses the provided certificate cache. certCache
-// MUST NOT be nil or this function will panic.
-//
-// Use this method when you have an advanced use case
-// that requires a custom certificate cache and config
-// that may differ from the Default. For example, if
-// not all certificates are managed/renewed the same
-// way, you need to make your own Cache value with a
-// GetConfigForCert callback that returns the correct
-// configuration for each certificate. However, for
-// the vast majority of cases, there will be only a
-// single Config, thus the default cache (which always
-// uses the default Config) and default config will
-// suffice, and you should use NewDefault() instead.
-func New(certCache *Cache, cfg Config) *Config {
- if certCache == nil {
- panic("a certificate cache is required")
- }
- if certCache.options.GetConfigForCert == nil {
- panic("cache must have GetConfigForCert set in its options")
- }
- return newWithCache(certCache, cfg)
-}
-
-// newWithCache ensures that cfg is a valid config by populating
-// zero-value fields from the Default Config. If certCache is
-// nil, this function panics.
-func newWithCache(certCache *Cache, cfg Config) *Config {
- if certCache == nil {
- panic("cannot make a valid config without a pointer to a certificate cache")
- }
-
- if cfg.OnDemand == nil {
- cfg.OnDemand = Default.OnDemand
- }
- if cfg.RenewalWindowRatio == 0 {
- cfg.RenewalWindowRatio = Default.RenewalWindowRatio
- }
- if cfg.OnEvent == nil {
- cfg.OnEvent = Default.OnEvent
- }
- if cfg.KeySource == nil {
- cfg.KeySource = Default.KeySource
- }
- if cfg.DefaultServerName == "" {
- cfg.DefaultServerName = Default.DefaultServerName
- }
- if cfg.OnDemand == nil {
- cfg.OnDemand = Default.OnDemand
- }
- if !cfg.MustStaple {
- cfg.MustStaple = Default.MustStaple
- }
- if cfg.Storage == nil {
- cfg.Storage = Default.Storage
- }
- if len(cfg.Issuers) == 0 {
- cfg.Issuers = Default.Issuers
- if len(cfg.Issuers) == 0 {
- // at least one issuer is absolutely required
- cfg.Issuers = []Issuer{NewACMEManager(&cfg, DefaultACME)}
- }
- }
-
- // absolutely don't allow a nil storage,
- // because that would make almost anything
- // a config can do pointless
- if cfg.Storage == nil {
- cfg.Storage = defaultFileStorage
- }
-
- cfg.certCache = certCache
-
- return &cfg
-}
-
-// ManageSync causes the certificates for domainNames to be managed
-// according to cfg. If cfg.OnDemand is not nil, then this simply
-// whitelists the domain names and defers the certificate operations
-// to when they are needed. Otherwise, the certificates for each
-// name are loaded from storage or obtained from the CA. If loaded
-// from storage, they are renewed if they are expiring or expired.
-// It then caches the certificate in memory and is prepared to serve
-// them up during TLS handshakes.
-//
-// Note that name whitelisting for on-demand management only takes
-// effect if cfg.OnDemand.DecisionFunc is not set (is nil); it will
-// not overwrite an existing DecisionFunc, nor will it overwrite
-// its decision; i.e. the implicit whitelist is only used if no
-// DecisionFunc is set.
-//
-// This method is synchronous, meaning that certificates for all
-// domainNames must be successfully obtained (or renewed) before
-// it returns. It returns immediately on the first error for any
-// of the given domainNames. This behavior is recommended for
-// interactive use (i.e. when an administrator is present) so
-// that errors can be reported and fixed immediately.
-func (cfg *Config) ManageSync(domainNames []string) error {
- return cfg.manageAll(context.Background(), domainNames, false)
-}
-
-// ClientCredentials returns a list of TLS client certificate chains for the given identifiers.
-// The return value can be used in a tls.Config to enable client authentication using managed certificates.
-// Any certificates that need to be obtained or renewed for these identifiers will be managed accordingly.
-func (cfg *Config) ClientCredentials(ctx context.Context, identifiers []string) ([]tls.Certificate, error) {
- err := cfg.manageAll(ctx, identifiers, false)
- if err != nil {
- return nil, err
- }
- var chains []tls.Certificate
- for _, id := range identifiers {
- certRes, err := cfg.loadCertResourceAnyIssuer(id)
- if err != nil {
- return chains, err
- }
- chain, err := tls.X509KeyPair(certRes.CertificatePEM, certRes.PrivateKeyPEM)
- if err != nil {
- return chains, err
- }
- chains = append(chains, chain)
- }
- return chains, nil
-}
-
-// ManageAsync is the same as ManageSync, except that ACME
-// operations are performed asynchronously (in the background).
-// This method returns before certificates are ready. It is
-// crucial that the administrator monitors the logs and is
-// notified of any errors so that corrective action can be
-// taken as soon as possible. Any errors returned from this
-// method occurred before ACME transactions started.
-//
-// As long as logs are monitored, this method is typically
-// recommended for non-interactive environments.
-//
-// If there are failures loading, obtaining, or renewing a
-// certificate, it will be retried with exponential backoff
-// for up to about 30 days, with a maximum interval of about
-// 24 hours. Cancelling ctx will cancel retries and shut down
-// any goroutines spawned by ManageAsync.
-func (cfg *Config) ManageAsync(ctx context.Context, domainNames []string) error {
- return cfg.manageAll(ctx, domainNames, true)
-}
-
-func (cfg *Config) manageAll(ctx context.Context, domainNames []string, async bool) error {
- if ctx == nil {
- ctx = context.Background()
- }
-
- for _, domainName := range domainNames {
- // if on-demand is configured, defer obtain and renew operations
- if cfg.OnDemand != nil {
- if !cfg.OnDemand.whitelistContains(domainName) {
- cfg.OnDemand.hostWhitelist = append(cfg.OnDemand.hostWhitelist, domainName)
- }
- continue
- }
-
- // otherwise, begin management immediately
- err := cfg.manageOne(ctx, domainName, async)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (cfg *Config) manageOne(ctx context.Context, domainName string, async bool) error {
- // first try loading existing certificate from storage
- cert, err := cfg.CacheManagedCertificate(domainName)
- if err != nil {
- if _, ok := err.(ErrNotExist); !ok {
- return fmt.Errorf("%s: caching certificate: %v", domainName, err)
- }
- // if we don't have one in storage, obtain one
- obtain := func() error {
- var err error
- if async {
- err = cfg.ObtainCertAsync(ctx, domainName)
- } else {
- err = cfg.ObtainCertSync(ctx, domainName)
- }
- if err != nil {
- return fmt.Errorf("%s: obtaining certificate: %w", domainName, err)
- }
- cert, err = cfg.CacheManagedCertificate(domainName)
- if err != nil {
- return fmt.Errorf("%s: caching certificate after obtaining it: %v", domainName, err)
- }
- return nil
- }
- if async {
- // Leave the job name empty so as to allow duplicate 'obtain'
- // jobs; this is because Caddy calls ManageAsync() before the
- // previous config is stopped (and before its context is
- // canceled), which means that if an obtain job is still
- // running for the same domain, Submit() would not queue the
- // new one because it is still running, even though it is
- // (probably) about to be canceled (it might not if the new
- // config fails to finish loading, however). In any case, we
- // presume it is safe to enqueue a duplicate obtain job because
- // either the old one (or sometimes the new one) is about to be
- // canceled. This seems like reasonable logic for any consumer
- // of this lib. See https://github.com/caddyserver/caddy/issues/3202
- jm.Submit(cfg.Logger, "", obtain)
- return nil
- }
- return obtain()
- }
-
- // for an existing certificate, make sure it is renewed
- renew := func() error {
- var err error
- if async {
- err = cfg.RenewCertAsync(ctx, domainName, false)
- } else {
- err = cfg.RenewCertSync(ctx, domainName, false)
- }
- if err != nil {
- return fmt.Errorf("%s: renewing certificate: %w", domainName, err)
- }
- // successful renewal, so update in-memory cache
- err = cfg.reloadManagedCertificate(cert)
- if err != nil {
- return fmt.Errorf("%s: reloading renewed certificate into memory: %v", domainName, err)
- }
- return nil
- }
- if cert.NeedsRenewal(cfg) {
- if async {
- jm.Submit(cfg.Logger, "renew_"+domainName, renew)
- return nil
- }
- return renew()
- }
-
- return nil
-}
-
-// Unmanage causes the certificates for domainNames to stop being managed.
-// If there are certificates for the supplied domain names in the cache, they
-// are evicted from the cache.
-func (cfg *Config) Unmanage(domainNames []string) {
- var deleteQueue []Certificate
- for _, domainName := range domainNames {
- certs := cfg.certCache.AllMatchingCertificates(domainName)
- for _, cert := range certs {
- if !cert.managed {
- continue
- }
- deleteQueue = append(deleteQueue, cert)
- }
- }
-
- cfg.certCache.mu.Lock()
- for _, cert := range deleteQueue {
- cfg.certCache.removeCertificate(cert)
- }
- cfg.certCache.mu.Unlock()
-}
-
-// ObtainCertSync generates a new private key and obtains a certificate for
-// name using cfg in the foreground; i.e. interactively and without retries.
-// It stows the renewed certificate and its assets in storage if successful.
-// It DOES NOT load the certificate into the in-memory cache. This method
-// is a no-op if storage already has a certificate for name.
-func (cfg *Config) ObtainCertSync(ctx context.Context, name string) error {
- return cfg.obtainCert(ctx, name, true)
-}
-
-// ObtainCertAsync is the same as ObtainCertSync(), except it runs in the
-// background; i.e. non-interactively, and with retries if it fails.
-func (cfg *Config) ObtainCertAsync(ctx context.Context, name string) error {
- return cfg.obtainCert(ctx, name, false)
-}
-
-func (cfg *Config) obtainCert(ctx context.Context, name string, interactive bool) error {
- if len(cfg.Issuers) == 0 {
- return fmt.Errorf("no issuers configured; impossible to obtain or check for existing certificate in storage")
- }
-
- // if storage has all resources for this certificate, obtain is a no-op
- if cfg.storageHasCertResourcesAnyIssuer(name) {
- return nil
- }
-
- // ensure storage is writeable and readable
- // TODO: this is not necessary every time; should only perform check once every so often for each storage, which may require some global state...
- err := cfg.checkStorage()
- if err != nil {
- return fmt.Errorf("failed storage check: %v - storage is probably misconfigured", err)
- }
-
- log := loggerNamed(cfg.Logger, "obtain")
-
- if log != nil {
- log.Info("acquiring lock", zap.String("identifier", name))
- }
-
- // ensure idempotency of the obtain operation for this name
- lockKey := cfg.lockKey(certIssueLockOp, name)
- err = acquireLock(ctx, cfg.Storage, lockKey)
- if err != nil {
- return fmt.Errorf("unable to acquire lock '%s': %v", lockKey, err)
- }
- defer func() {
- if log != nil {
- log.Info("releasing lock", zap.String("identifier", name))
- }
- if err := releaseLock(cfg.Storage, lockKey); err != nil {
- if log != nil {
- log.Error("unable to unlock",
- zap.String("identifier", name),
- zap.String("lock_key", lockKey),
- zap.Error(err))
- }
- }
- }()
- if log != nil {
- log.Info("lock acquired", zap.String("identifier", name))
- }
-
- f := func(ctx context.Context) error {
- // check if obtain is still needed -- might have been obtained during lock
- if cfg.storageHasCertResourcesAnyIssuer(name) {
- if log != nil {
- log.Info("certificate already exists in storage", zap.String("identifier", name))
- }
- return nil
- }
-
- // if storage has a private key already, use it; otherwise,
- // we'll generate our own
- privKey, privKeyPEM, issuers, err := cfg.reusePrivateKey(name)
- if err != nil {
- return err
- }
- if privKey == nil {
- privKey, err = cfg.KeySource.GenerateKey()
- if err != nil {
- return err
- }
- privKeyPEM, err = encodePrivateKey(privKey)
- if err != nil {
- return err
- }
- }
-
- csr, err := cfg.generateCSR(privKey, []string{name})
- if err != nil {
- return err
- }
-
- // try to obtain from each issuer until we succeed
- var issuedCert *IssuedCertificate
- var issuerUsed Issuer
- for i, issuer := range issuers {
- if log != nil {
- log.Debug(fmt.Sprintf("trying issuer %d/%d", i+1, len(cfg.Issuers)),
- zap.String("issuer", issuer.IssuerKey()))
- }
-
- if prechecker, ok := issuer.(PreChecker); ok {
- err = prechecker.PreCheck(ctx, []string{name}, interactive)
- if err != nil {
- continue
- }
- }
-
- issuedCert, err = issuer.Issue(ctx, csr)
- if err == nil {
- issuerUsed = issuer
- break
- }
-
- // err is usually wrapped, which is nice for simply printing it, but
- // with our structured error logs we only need the problem string
- errToLog := err
- var problem acme.Problem
- if errors.As(err, &problem) {
- errToLog = problem
- }
- if log != nil {
- log.Error("could not get certificate from issuer",
- zap.String("identifier", name),
- zap.String("issuer", issuer.IssuerKey()),
- zap.Error(errToLog))
- }
- }
- if err != nil {
- // only the error from the last issuer will be returned, but we logged the others
- return fmt.Errorf("[%s] Obtain: %w", name, err)
- }
-
- // success - immediately save the certificate resource
- certRes := CertificateResource{
- SANs: namesFromCSR(csr),
- CertificatePEM: issuedCert.Certificate,
- PrivateKeyPEM: privKeyPEM,
- IssuerData: issuedCert.Metadata,
- }
- err = cfg.saveCertResource(issuerUsed, certRes)
- if err != nil {
- return fmt.Errorf("[%s] Obtain: saving assets: %v", name, err)
- }
-
- cfg.emit("cert_obtained", name)
-
- if log != nil {
- log.Info("certificate obtained successfully", zap.String("identifier", name))
- }
-
- return nil
- }
-
- if interactive {
- err = f(ctx)
- } else {
- err = doWithRetry(ctx, log, f)
- }
-
- return err
-}
-
-// reusePrivateKey looks for a private key for domain in storage in the configured issuers
-// paths. For the first private key it finds, it returns that key both decoded and PEM-encoded,
-// as well as the reordered list of issuers to use instead of cfg.Issuers (because if a key
-// is found, that issuer should be tried first, so it is moved to the front in a copy of
-// cfg.Issuers).
-func (cfg *Config) reusePrivateKey(domain string) (privKey crypto.PrivateKey, privKeyPEM []byte, issuers []Issuer, err error) {
- // make a copy of cfg.Issuers so that if we have to reorder elements, we don't
- // inadvertently mutate the configured issuers (see append calls below)
- issuers = make([]Issuer, len(cfg.Issuers))
- copy(issuers, cfg.Issuers)
-
- for i, issuer := range issuers {
- // see if this issuer location in storage has a private key for the domain
- privateKeyStorageKey := StorageKeys.SitePrivateKey(issuer.IssuerKey(), domain)
- privKeyPEM, err = cfg.Storage.Load(privateKeyStorageKey)
- if _, ok := err.(ErrNotExist); ok {
- err = nil // obviously, it's OK to not have a private key; so don't prevent obtaining a cert
- continue
- }
- if err != nil {
- return nil, nil, nil, fmt.Errorf("loading existing private key for reuse with issuer %s: %v", issuer.IssuerKey(), err)
- }
-
- // we loaded a private key; try decoding it so we can use it
- privKey, err = decodePrivateKey(privKeyPEM)
- if err != nil {
- return nil, nil, nil, err
- }
-
- // since the private key was found in storage for this issuer, move it
- // to the front of the list so we prefer this issuer first
- issuers = append([]Issuer{issuer}, append(issuers[:i], issuers[i+1:]...)...)
- break
- }
-
- return
-}
-
-// storageHasCertResourcesAnyIssuer returns true if storage has all the
-// certificate resources in storage from any configured issuer. It checks
-// all configured issuers in order.
-func (cfg *Config) storageHasCertResourcesAnyIssuer(name string) bool {
- for _, iss := range cfg.Issuers {
- if cfg.storageHasCertResources(iss, name) {
- return true
- }
- }
- return false
-}
-
-// RenewCertSync renews the certificate for name using cfg in the foreground;
-// i.e. interactively and without retries. It stows the renewed certificate
-// and its assets in storage if successful. It DOES NOT update the in-memory
-// cache with the new certificate. The certificate will not be renewed if it
-// is not close to expiring unless force is true.
-//
-// Renewing a certificate is the same as obtaining a certificate, except that
-// the existing private key already in storage is reused.
-func (cfg *Config) RenewCertSync(ctx context.Context, name string, force bool) error {
- return cfg.renewCert(ctx, name, force, true)
-}
-
-// RenewCertAsync is the same as RenewCertSync(), except it runs in the
-// background; i.e. non-interactively, and with retries if it fails.
-func (cfg *Config) RenewCertAsync(ctx context.Context, name string, force bool) error {
- return cfg.renewCert(ctx, name, force, false)
-}
-
-func (cfg *Config) renewCert(ctx context.Context, name string, force, interactive bool) error {
- if len(cfg.Issuers) == 0 {
- return fmt.Errorf("no issuers configured; impossible to renew or check existing certificate in storage")
- }
-
- // ensure storage is writeable and readable
- // TODO: this is not necessary every time; should only perform check once every so often for each storage, which may require some global state...
- err := cfg.checkStorage()
- if err != nil {
- return fmt.Errorf("failed storage check: %v - storage is probably misconfigured", err)
- }
-
- log := loggerNamed(cfg.Logger, "renew")
-
- if log != nil {
- log.Info("acquiring lock", zap.String("identifier", name))
- }
-
- // ensure idempotency of the renew operation for this name
- lockKey := cfg.lockKey(certIssueLockOp, name)
- err = acquireLock(ctx, cfg.Storage, lockKey)
- if err != nil {
- return fmt.Errorf("unable to acquire lock '%s': %v", lockKey, err)
- }
- defer func() {
- if log != nil {
- log.Info("releasing lock", zap.String("identifier", name))
- }
- if err := releaseLock(cfg.Storage, lockKey); err != nil {
- if log != nil {
- log.Error("unable to unlock",
- zap.String("identifier", name),
- zap.String("lock_key", lockKey),
- zap.Error(err))
- }
- }
- }()
- if log != nil {
- log.Info("lock acquired", zap.String("identifier", name))
- }
-
- f := func(ctx context.Context) error {
- // prepare for renewal (load PEM cert, key, and meta)
- certRes, err := cfg.loadCertResourceAnyIssuer(name)
- if err != nil {
- return err
- }
-
- // check if renew is still needed - might have been renewed while waiting for lock
- timeLeft, needsRenew := cfg.managedCertNeedsRenewal(certRes)
- if !needsRenew {
- if force {
- if log != nil {
- log.Info("certificate does not need to be renewed, but renewal is being forced",
- zap.String("identifier", name),
- zap.Duration("remaining", timeLeft))
- }
- } else {
- if log != nil {
- log.Info("certificate appears to have been renewed already",
- zap.String("identifier", name),
- zap.Duration("remaining", timeLeft))
- }
- return nil
- }
- }
-
- if log != nil {
- log.Info("renewing certificate",
- zap.String("identifier", name),
- zap.Duration("remaining", timeLeft))
- }
-
- privateKey, err := decodePrivateKey(certRes.PrivateKeyPEM)
- if err != nil {
- return err
- }
- csr, err := cfg.generateCSR(privateKey, []string{name})
- if err != nil {
- return err
- }
-
- // try to obtain from each issuer until we succeed
- var issuedCert *IssuedCertificate
- var issuerUsed Issuer
- for _, issuer := range cfg.Issuers {
- if prechecker, ok := issuer.(PreChecker); ok {
- err = prechecker.PreCheck(ctx, []string{name}, interactive)
- if err != nil {
- continue
- }
- }
-
- issuedCert, err = issuer.Issue(ctx, csr)
- if err == nil {
- issuerUsed = issuer
- break
- }
-
- // err is usually wrapped, which is nice for simply printing it, but
- // with our structured error logs we only need the problem string
- errToLog := err
- var problem acme.Problem
- if errors.As(err, &problem) {
- errToLog = problem
- }
- if log != nil {
- log.Error("could not get certificate from issuer",
- zap.String("identifier", name),
- zap.String("issuer", issuer.IssuerKey()),
- zap.Error(errToLog))
- }
- }
- if err != nil {
- // only the error from the last issuer will be returned, but we logged the others
- return fmt.Errorf("[%s] Renew: %w", name, err)
- }
-
- // success - immediately save the renewed certificate resource
- newCertRes := CertificateResource{
- SANs: namesFromCSR(csr),
- CertificatePEM: issuedCert.Certificate,
- PrivateKeyPEM: certRes.PrivateKeyPEM,
- IssuerData: issuedCert.Metadata,
- }
- err = cfg.saveCertResource(issuerUsed, newCertRes)
- if err != nil {
- return fmt.Errorf("[%s] Renew: saving assets: %v", name, err)
- }
-
- cfg.emit("cert_renewed", name)
-
- if log != nil {
- log.Info("certificate renewed successfully", zap.String("identifier", name))
- }
-
- return nil
- }
-
- if interactive {
- err = f(ctx)
- } else {
- err = doWithRetry(ctx, log, f)
- }
-
- return err
-}
-
-func (cfg *Config) generateCSR(privateKey crypto.PrivateKey, sans []string) (*x509.CertificateRequest, error) {
- csrTemplate := new(x509.CertificateRequest)
-
- for _, name := range sans {
- if ip := net.ParseIP(name); ip != nil {
- csrTemplate.IPAddresses = append(csrTemplate.IPAddresses, ip)
- } else if strings.Contains(name, "@") {
- csrTemplate.EmailAddresses = append(csrTemplate.EmailAddresses, name)
- } else if u, err := url.Parse(name); err == nil && strings.Contains(name, "/") {
- csrTemplate.URIs = append(csrTemplate.URIs, u)
- } else {
- // convert IDNs to ASCII according to RFC 5280 section 7
- normalizedName, err := idna.ToASCII(name)
- if err != nil {
- return nil, fmt.Errorf("converting identifier '%s' to ASCII: %v", name, err)
- }
- csrTemplate.DNSNames = append(csrTemplate.DNSNames, normalizedName)
- }
- }
-
- if cfg.MustStaple {
- csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, mustStapleExtension)
- }
-
- csrDER, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, privateKey)
- if err != nil {
- return nil, err
- }
-
- return x509.ParseCertificateRequest(csrDER)
-}
-
-// RevokeCert revokes the certificate for domain via ACME protocol. It requires
-// that cfg.Issuers is properly configured with the same issuer that issued the
-// certificate being revoked. See RFC 5280 §5.3.1 for reason codes.
-//
-// The certificate assets are deleted from storage after successful revocation
-// to prevent reuse.
-func (cfg *Config) RevokeCert(ctx context.Context, domain string, reason int, interactive bool) error {
- for i, issuer := range cfg.Issuers {
- issuerKey := issuer.IssuerKey()
-
- rev, ok := issuer.(Revoker)
- if !ok {
- return fmt.Errorf("issuer %d (%s) is not a Revoker", i, issuerKey)
- }
-
- certRes, err := cfg.loadCertResource(issuer, domain)
- if err != nil {
- return err
- }
-
- if !cfg.Storage.Exists(StorageKeys.SitePrivateKey(issuerKey, domain)) {
- return fmt.Errorf("private key not found for %s", certRes.SANs)
- }
-
- err = rev.Revoke(ctx, certRes, reason)
- if err != nil {
- return fmt.Errorf("issuer %d (%s): %v", i, issuerKey, err)
- }
-
- cfg.emit("cert_revoked", domain)
-
- err = cfg.deleteSiteAssets(issuerKey, domain)
- if err != nil {
- return fmt.Errorf("certificate revoked, but unable to fully clean up assets from issuer %s: %v", issuerKey, err)
- }
- }
-
- return nil
-}
-
-// TLSConfig is an opinionated method that returns a
-// recommended, modern TLS configuration that can be
-// used to configure TLS listeners, which also supports
-// the TLS-ALPN challenge and serves up certificates
-// managed by cfg.
-//
-// Unlike the package TLS() function, this method does
-// not, by itself, enable certificate management for
-// any domain names.
-//
-// Feel free to further customize the returned tls.Config,
-// but do not mess with the GetCertificate or NextProtos
-// fields unless you know what you're doing, as they're
-// necessary to solve the TLS-ALPN challenge.
-func (cfg *Config) TLSConfig() *tls.Config {
- return &tls.Config{
- // these two fields necessary for TLS-ALPN challenge
- GetCertificate: cfg.GetCertificate,
- NextProtos: []string{acmez.ACMETLS1Protocol},
-
- // the rest recommended for modern TLS servers
- MinVersion: tls.VersionTLS12,
- CurvePreferences: []tls.CurveID{
- tls.X25519,
- tls.CurveP256,
- },
- CipherSuites: preferredDefaultCipherSuites(),
- PreferServerCipherSuites: true,
- }
-}
-
-// getChallengeInfo loads the challenge info from either the internal challenge memory
-// or the external storage (implying distributed solving). The second return value
-// indicates whether challenge info was loaded from external storage. If true, the
-// challenge is being solved in a distributed fashion; if false, from internal memory.
-// If no matching challenge information can be found, an error is returned.
-func (cfg *Config) getChallengeInfo(identifier string) (Challenge, bool, error) {
- // first, check if our process initiated this challenge; if so, just return it
- chalData, ok := GetACMEChallenge(identifier)
- if ok {
- return chalData, false, nil
- }
-
- // otherwise, perhaps another instance in the cluster initiated it; check
- // the configured storage to retrieve challenge data
-
- var chalInfo acme.Challenge
- var chalInfoBytes []byte
- var tokenKey string
- for _, issuer := range cfg.Issuers {
- ds := distributedSolver{
- storage: cfg.Storage,
- storageKeyIssuerPrefix: storageKeyACMECAPrefix(issuer.IssuerKey()),
- }
- tokenKey = ds.challengeTokensKey(identifier)
- var err error
- chalInfoBytes, err = cfg.Storage.Load(tokenKey)
- if err == nil {
- break
- }
- if _, ok := err.(ErrNotExist); ok {
- continue
- }
- return Challenge{}, false, fmt.Errorf("opening distributed challenge token file %s: %v", tokenKey, err)
- }
- if len(chalInfoBytes) == 0 {
- return Challenge{}, false, fmt.Errorf("no information found to solve challenge for identifier: %s", identifier)
- }
-
- err := json.Unmarshal(chalInfoBytes, &chalInfo)
- if err != nil {
- return Challenge{}, false, fmt.Errorf("decoding challenge token file %s (corrupted?): %v", tokenKey, err)
- }
-
- return Challenge{Challenge: chalInfo}, true, nil
-}
-
-// checkStorage tests the storage by writing random bytes
-// to a random key, and then loading those bytes and
-// comparing the loaded value. If this fails, the provided
-// cfg.Storage mechanism should not be used.
-func (cfg *Config) checkStorage() error {
- key := fmt.Sprintf("rw_test_%d", weakrand.Int())
- contents := make([]byte, 1024*10) // size sufficient for one or two ACME resources
- _, err := weakrand.Read(contents)
- if err != nil {
- return err
- }
- err = cfg.Storage.Store(key, contents)
- if err != nil {
- return err
- }
- defer func() {
- deleteErr := cfg.Storage.Delete(key)
- if deleteErr != nil {
- if cfg.Logger != nil {
- cfg.Logger.Error("deleting test key from storage",
- zap.String("key", key), zap.Error(err))
- }
- }
- // if there was no other error, make sure
- // to return any error returned from Delete
- if err == nil {
- err = deleteErr
- }
- }()
- loaded, err := cfg.Storage.Load(key)
- if err != nil {
- return err
- }
- if !bytes.Equal(contents, loaded) {
- return fmt.Errorf("load yielded different value than was stored; expected %d bytes, got %d bytes of differing elements", len(contents), len(loaded))
- }
- return nil
-}
-
-// storageHasCertResources returns true if the storage
-// associated with cfg's certificate cache has all the
-// resources related to the certificate for domain: the
-// certificate, the private key, and the metadata.
-func (cfg *Config) storageHasCertResources(issuer Issuer, domain string) bool {
- issuerKey := issuer.IssuerKey()
- certKey := StorageKeys.SiteCert(issuerKey, domain)
- keyKey := StorageKeys.SitePrivateKey(issuerKey, domain)
- metaKey := StorageKeys.SiteMeta(issuerKey, domain)
- return cfg.Storage.Exists(certKey) &&
- cfg.Storage.Exists(keyKey) &&
- cfg.Storage.Exists(metaKey)
-}
-
-// deleteSiteAssets deletes the folder in storage containing the
-// certificate, private key, and metadata file for domain from the
-// issuer with the given issuer key.
-func (cfg *Config) deleteSiteAssets(issuerKey, domain string) error {
- err := cfg.Storage.Delete(StorageKeys.SiteCert(issuerKey, domain))
- if err != nil {
- return fmt.Errorf("deleting certificate file: %v", err)
- }
- err = cfg.Storage.Delete(StorageKeys.SitePrivateKey(issuerKey, domain))
- if err != nil {
- return fmt.Errorf("deleting private key: %v", err)
- }
- err = cfg.Storage.Delete(StorageKeys.SiteMeta(issuerKey, domain))
- if err != nil {
- return fmt.Errorf("deleting metadata file: %v", err)
- }
- err = cfg.Storage.Delete(StorageKeys.CertsSitePrefix(issuerKey, domain))
- if err != nil {
- return fmt.Errorf("deleting site asset folder: %v", err)
- }
- return nil
-}
-
-// lockKey returns a key for a lock that is specific to the operation
-// named op being performed related to domainName and this config's CA.
-func (cfg *Config) lockKey(op, domainName string) string {
- return fmt.Sprintf("%s_%s", op, domainName)
-}
-
-// managedCertNeedsRenewal returns true if certRes is expiring soon or already expired,
-// or if the process of decoding the cert and checking its expiration returned an error.
-func (cfg *Config) managedCertNeedsRenewal(certRes CertificateResource) (time.Duration, bool) {
- certChain, err := parseCertsFromPEMBundle(certRes.CertificatePEM)
- if err != nil {
- return 0, true
- }
- remaining := time.Until(certChain[0].NotAfter)
- needsRenew := currentlyInRenewalWindow(certChain[0].NotBefore, certChain[0].NotAfter, cfg.RenewalWindowRatio)
- return remaining, needsRenew
-}
-
-func (cfg *Config) emit(eventName string, data interface{}) {
- if cfg.OnEvent == nil {
- return
- }
- cfg.OnEvent(eventName, data)
-}
-
-func loggerNamed(l *zap.Logger, name string) *zap.Logger {
- if l == nil {
- return nil
- }
- return l.Named(name)
-}
-
-// CertificateSelector is a type which can select a certificate to use given multiple choices.
-type CertificateSelector interface {
- SelectCertificate(*tls.ClientHelloInfo, []Certificate) (Certificate, error)
-}
-
-// OCSPConfig configures how OCSP is handled.
-type OCSPConfig struct {
- // Disable automatic OCSP stapling; strongly
- // discouraged unless you have a good reason.
- // Disabling this puts clients at greater risk
- // and reduces their privacy.
- DisableStapling bool
-
- // A map of OCSP responder domains to replacement
- // domains for querying OCSP servers. Used for
- // overriding the OCSP responder URL that is
- // embedded in certificates. Mapping to an empty
- // URL will disable OCSP from that responder.
- ResponderOverrides map[string]string
-}
-
-// certIssueLockOp is the name of the operation used
-// when naming a lock to make it mutually exclusive
-// with other certificate issuance operations for a
-// certain name.
-const certIssueLockOp = "issue_cert"
-
-// Constants for PKIX MustStaple extension.
-var (
- tlsFeatureExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24}
- ocspMustStapleFeature = []byte{0x30, 0x03, 0x02, 0x01, 0x05}
- mustStapleExtension = pkix.Extension{
- Id: tlsFeatureExtensionOID,
- Value: ocspMustStapleFeature,
- }
-)
diff --git a/vendor/github.com/caddyserver/certmagic/crypto.go b/vendor/github.com/caddyserver/certmagic/crypto.go
deleted file mode 100644
index a705cdde..00000000
--- a/vendor/github.com/caddyserver/certmagic/crypto.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha256"
- "crypto/tls"
- "crypto/x509"
- "encoding/json"
- "encoding/pem"
- "fmt"
- "hash/fnv"
- "sort"
- "strings"
-
- "github.com/klauspost/cpuid/v2"
- "go.uber.org/zap"
- "golang.org/x/net/idna"
-)
-
-// encodePrivateKey marshals a EC or RSA private key into a PEM-encoded array of bytes.
-func encodePrivateKey(key crypto.PrivateKey) ([]byte, error) {
- var pemType string
- var keyBytes []byte
- switch key := key.(type) {
- case *ecdsa.PrivateKey:
- var err error
- pemType = "EC"
- keyBytes, err = x509.MarshalECPrivateKey(key)
- if err != nil {
- return nil, err
- }
- case *rsa.PrivateKey:
- pemType = "RSA"
- keyBytes = x509.MarshalPKCS1PrivateKey(key)
- case ed25519.PrivateKey:
- var err error
- pemType = "ED25519"
- keyBytes, err = x509.MarshalPKCS8PrivateKey(key)
- if err != nil {
- return nil, err
- }
- default:
- return nil, fmt.Errorf("unsupported key type: %T", key)
- }
- pemKey := pem.Block{Type: pemType + " PRIVATE KEY", Bytes: keyBytes}
- return pem.EncodeToMemory(&pemKey), nil
-}
-
-// decodePrivateKey loads a PEM-encoded ECC/RSA private key from an array of bytes.
-// Borrowed from Go standard library, to handle various private key and PEM block types.
-// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L291-L308
-// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L238)
-func decodePrivateKey(keyPEMBytes []byte) (crypto.Signer, error) {
- keyBlockDER, _ := pem.Decode(keyPEMBytes)
-
- if keyBlockDER.Type != "PRIVATE KEY" && !strings.HasSuffix(keyBlockDER.Type, " PRIVATE KEY") {
- return nil, fmt.Errorf("unknown PEM header %q", keyBlockDER.Type)
- }
-
- if key, err := x509.ParsePKCS1PrivateKey(keyBlockDER.Bytes); err == nil {
- return key, nil
- }
-
- if key, err := x509.ParsePKCS8PrivateKey(keyBlockDER.Bytes); err == nil {
- switch key := key.(type) {
- case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey:
- return key.(crypto.Signer), nil
- default:
- return nil, fmt.Errorf("found unknown private key type in PKCS#8 wrapping: %T", key)
- }
- }
-
- if key, err := x509.ParseECPrivateKey(keyBlockDER.Bytes); err == nil {
- return key, nil
- }
-
- return nil, fmt.Errorf("unknown private key type")
-}
-
-// parseCertsFromPEMBundle parses a certificate bundle from top to bottom and returns
-// a slice of x509 certificates. This function will error if no certificates are found.
-func parseCertsFromPEMBundle(bundle []byte) ([]*x509.Certificate, error) {
- var certificates []*x509.Certificate
- var certDERBlock *pem.Block
- for {
- certDERBlock, bundle = pem.Decode(bundle)
- if certDERBlock == nil {
- break
- }
- if certDERBlock.Type == "CERTIFICATE" {
- cert, err := x509.ParseCertificate(certDERBlock.Bytes)
- if err != nil {
- return nil, err
- }
- certificates = append(certificates, cert)
- }
- }
- if len(certificates) == 0 {
- return nil, fmt.Errorf("no certificates found in bundle")
- }
- return certificates, nil
-}
-
-// fastHash hashes input using a hashing algorithm that
-// is fast, and returns the hash as a hex-encoded string.
-// Do not use this for cryptographic purposes.
-func fastHash(input []byte) string {
- h := fnv.New32a()
- h.Write(input)
- return fmt.Sprintf("%x", h.Sum32())
-}
-
-// saveCertResource saves the certificate resource to disk. This
-// includes the certificate file itself, the private key, and the
-// metadata file.
-func (cfg *Config) saveCertResource(issuer Issuer, cert CertificateResource) error {
- metaBytes, err := json.MarshalIndent(cert, "", "\t")
- if err != nil {
- return fmt.Errorf("encoding certificate metadata: %v", err)
- }
-
- issuerKey := issuer.IssuerKey()
- certKey := cert.NamesKey()
-
- all := []keyValue{
- {
- key: StorageKeys.SiteCert(issuerKey, certKey),
- value: cert.CertificatePEM,
- },
- {
- key: StorageKeys.SitePrivateKey(issuerKey, certKey),
- value: cert.PrivateKeyPEM,
- },
- {
- key: StorageKeys.SiteMeta(issuerKey, certKey),
- value: metaBytes,
- },
- }
-
- return storeTx(cfg.Storage, all)
-}
-
-// loadCertResourceAnyIssuer loads and returns the certificate resource from any
-// of the configured issuers. If multiple are found (e.g. if there are 3 issuers
-// configured, and all 3 have a resource matching certNamesKey), then the newest
-// (latest NotBefore date) resource will be chosen.
-func (cfg *Config) loadCertResourceAnyIssuer(certNamesKey string) (CertificateResource, error) {
- // we can save some extra decoding steps if there's only one issuer, since
- // we don't need to compare potentially multiple available resources to
- // select the best one, when there's only one choice anyway
- if len(cfg.Issuers) == 1 {
- return cfg.loadCertResource(cfg.Issuers[0], certNamesKey)
- }
-
- type decodedCertResource struct {
- CertificateResource
- issuer Issuer
- decoded *x509.Certificate
- }
- var certResources []decodedCertResource
- var lastErr error
-
- // load and decode all certificate resources found with the
- // configured issuers so we can sort by newest
- for _, issuer := range cfg.Issuers {
- certRes, err := cfg.loadCertResource(issuer, certNamesKey)
- if err != nil {
- if _, ok := err.(ErrNotExist); ok {
- // not a problem, but we need to remember the error
- // in case we end up not finding any cert resources
- // since we'll need an error to return in that case
- lastErr = err
- continue
- }
- return CertificateResource{}, err
- }
- certs, err := parseCertsFromPEMBundle(certRes.CertificatePEM)
- if err != nil {
- return CertificateResource{}, err
- }
- certResources = append(certResources, decodedCertResource{
- CertificateResource: certRes,
- issuer: issuer,
- decoded: certs[0],
- })
- }
- if len(certResources) == 0 {
- if lastErr == nil {
- lastErr = fmt.Errorf("no certificate resources found") // just in case; e.g. no Issuers configured
- }
- return CertificateResource{}, lastErr
- }
-
- // sort by date so the most recently issued comes first
- sort.Slice(certResources, func(i, j int) bool {
- return certResources[j].decoded.NotBefore.Before(certResources[i].decoded.NotBefore)
- })
-
- if cfg.Logger != nil {
- cfg.Logger.Debug("loading managed certificate",
- zap.String("domain", certNamesKey),
- zap.Time("expiration", certResources[0].decoded.NotAfter),
- zap.String("issuer_key", certResources[0].issuer.IssuerKey()),
- zap.Any("storage", cfg.Storage),
- )
- }
-
- return certResources[0].CertificateResource, nil
-}
-
-// loadCertResource loads a certificate resource from the given issuer's storage location.
-func (cfg *Config) loadCertResource(issuer Issuer, certNamesKey string) (CertificateResource, error) {
- certRes := CertificateResource{issuerKey: issuer.IssuerKey()}
-
- normalizedName, err := idna.ToASCII(certNamesKey)
- if err != nil {
- return CertificateResource{}, fmt.Errorf("converting '%s' to ASCII: %v", certNamesKey, err)
- }
-
- certBytes, err := cfg.Storage.Load(StorageKeys.SiteCert(certRes.issuerKey, normalizedName))
- if err != nil {
- return CertificateResource{}, err
- }
- certRes.CertificatePEM = certBytes
- keyBytes, err := cfg.Storage.Load(StorageKeys.SitePrivateKey(certRes.issuerKey, normalizedName))
- if err != nil {
- return CertificateResource{}, err
- }
- certRes.PrivateKeyPEM = keyBytes
- metaBytes, err := cfg.Storage.Load(StorageKeys.SiteMeta(certRes.issuerKey, normalizedName))
- if err != nil {
- return CertificateResource{}, err
- }
- err = json.Unmarshal(metaBytes, &certRes)
- if err != nil {
- return CertificateResource{}, fmt.Errorf("decoding certificate metadata: %v", err)
- }
-
- return certRes, nil
-}
-
-// hashCertificateChain computes the unique hash of certChain,
-// which is the chain of DER-encoded bytes. It returns the
-// hex encoding of the hash.
-func hashCertificateChain(certChain [][]byte) string {
- h := sha256.New()
- for _, certInChain := range certChain {
- h.Write(certInChain)
- }
- return fmt.Sprintf("%x", h.Sum(nil))
-}
-
-func namesFromCSR(csr *x509.CertificateRequest) []string {
- var nameSet []string
- nameSet = append(nameSet, csr.DNSNames...)
- nameSet = append(nameSet, csr.EmailAddresses...)
- for _, v := range csr.IPAddresses {
- nameSet = append(nameSet, v.String())
- }
- for _, v := range csr.URIs {
- nameSet = append(nameSet, v.String())
- }
- return nameSet
-}
-
-// preferredDefaultCipherSuites returns an appropriate
-// cipher suite to use depending on hardware support
-// for AES-NI.
-//
-// See https://github.com/mholt/caddy/issues/1674
-func preferredDefaultCipherSuites() []uint16 {
- if cpuid.CPU.Supports(cpuid.AESNI) {
- return defaultCiphersPreferAES
- }
- return defaultCiphersPreferChaCha
-}
-
-var (
- defaultCiphersPreferAES = []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- }
- defaultCiphersPreferChaCha = []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- }
-)
-
-// StandardKeyGenerator is the standard, in-memory key source
-// that uses crypto/rand.
-type StandardKeyGenerator struct {
- // The type of keys to generate.
- KeyType KeyType
-}
-
-// GenerateKey generates a new private key according to kg.KeyType.
-func (kg StandardKeyGenerator) GenerateKey() (crypto.PrivateKey, error) {
- switch kg.KeyType {
- case ED25519:
- _, priv, err := ed25519.GenerateKey(rand.Reader)
- return priv, err
- case "", P256:
- return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- case P384:
- return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
- case RSA2048:
- return rsa.GenerateKey(rand.Reader, 2048)
- case RSA4096:
- return rsa.GenerateKey(rand.Reader, 4096)
- case RSA8192:
- return rsa.GenerateKey(rand.Reader, 8192)
- }
- return nil, fmt.Errorf("unrecognized or unsupported key type: %s", kg.KeyType)
-}
-
-// DefaultKeyGenerator is the default key source.
-var DefaultKeyGenerator = StandardKeyGenerator{KeyType: P256}
-
-// KeyType enumerates the known/supported key types.
-type KeyType string
-
-// Constants for all key types we support.
-const (
- ED25519 = KeyType("ed25519")
- P256 = KeyType("p256")
- P384 = KeyType("p384")
- RSA2048 = KeyType("rsa2048")
- RSA4096 = KeyType("rsa4096")
- RSA8192 = KeyType("rsa8192")
-)
diff --git a/vendor/github.com/caddyserver/certmagic/dnsutil.go b/vendor/github.com/caddyserver/certmagic/dnsutil.go
deleted file mode 100644
index 2573cb96..00000000
--- a/vendor/github.com/caddyserver/certmagic/dnsutil.go
+++ /dev/null
@@ -1,345 +0,0 @@
-package certmagic
-
-import (
- "errors"
- "fmt"
- "net"
- "strings"
- "sync"
- "time"
-
- "github.com/miekg/dns"
-)
-
-// Code in this file adapted from go-acme/lego, July 2020:
-// https://github.com/go-acme/lego
-// by Ludovic Fernandez and Dominik Menke
-//
-// It has been modified.
-
-// findZoneByFQDN determines the zone apex for the given fqdn by recursing
-// up the domain labels until the nameserver returns a SOA record in the
-// answer section.
-func findZoneByFQDN(fqdn string, nameservers []string) (string, error) {
- if !strings.HasSuffix(fqdn, ".") {
- fqdn += "."
- }
- soa, err := lookupSoaByFqdn(fqdn, nameservers)
- if err != nil {
- return "", err
- }
- return soa.zone, nil
-}
-
-func lookupSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) {
- if !strings.HasSuffix(fqdn, ".") {
- fqdn += "."
- }
-
- fqdnSOACacheMu.Lock()
- defer fqdnSOACacheMu.Unlock()
-
- // prefer cached version if fresh
- if ent := fqdnSOACache[fqdn]; ent != nil && !ent.isExpired() {
- return ent, nil
- }
-
- ent, err := fetchSoaByFqdn(fqdn, nameservers)
- if err != nil {
- return nil, err
- }
-
- // save result to cache, but don't allow
- // the cache to grow out of control
- if len(fqdnSOACache) >= 1000 {
- for key := range fqdnSOACache {
- delete(fqdnSOACache, key)
- break
- }
- }
- fqdnSOACache[fqdn] = ent
-
- return ent, nil
-}
-
-func fetchSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) {
- var err error
- var in *dns.Msg
-
- labelIndexes := dns.Split(fqdn)
- for _, index := range labelIndexes {
- domain := fqdn[index:]
-
- in, err = dnsQuery(domain, dns.TypeSOA, nameservers, true)
- if err != nil {
- continue
- }
- if in == nil {
- continue
- }
-
- switch in.Rcode {
- case dns.RcodeSuccess:
- // Check if we got a SOA RR in the answer section
- if len(in.Answer) == 0 {
- continue
- }
-
- // CNAME records cannot/should not exist at the root of a zone.
- // So we skip a domain when a CNAME is found.
- if dnsMsgContainsCNAME(in) {
- continue
- }
-
- for _, ans := range in.Answer {
- if soa, ok := ans.(*dns.SOA); ok {
- return newSoaCacheEntry(soa), nil
- }
- }
- case dns.RcodeNameError:
- // NXDOMAIN
- default:
- // Any response code other than NOERROR and NXDOMAIN is treated as error
- return nil, fmt.Errorf("unexpected response code '%s' for %s", dns.RcodeToString[in.Rcode], domain)
- }
- }
-
- return nil, fmt.Errorf("could not find the start of authority for %s%s", fqdn, formatDNSError(in, err))
-}
-
-// dnsMsgContainsCNAME checks for a CNAME answer in msg
-func dnsMsgContainsCNAME(msg *dns.Msg) bool {
- for _, ans := range msg.Answer {
- if _, ok := ans.(*dns.CNAME); ok {
- return true
- }
- }
- return false
-}
-
-func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (*dns.Msg, error) {
- m := createDNSMsg(fqdn, rtype, recursive)
- var in *dns.Msg
- var err error
- for _, ns := range nameservers {
- in, err = sendDNSQuery(m, ns)
- if err == nil && len(in.Answer) > 0 {
- break
- }
- }
- return in, err
-}
-
-func createDNSMsg(fqdn string, rtype uint16, recursive bool) *dns.Msg {
- m := new(dns.Msg)
- m.SetQuestion(fqdn, rtype)
- m.SetEdns0(4096, false)
- if !recursive {
- m.RecursionDesired = false
- }
- return m
-}
-
-func sendDNSQuery(m *dns.Msg, ns string) (*dns.Msg, error) {
- udp := &dns.Client{Net: "udp", Timeout: dnsTimeout}
- in, _, err := udp.Exchange(m, ns)
- // two kinds of errors we can handle by retrying with TCP:
- // truncation and timeout; see https://github.com/caddyserver/caddy/issues/3639
- truncated := in != nil && in.Truncated
- timeoutErr := err != nil && strings.Contains(err.Error(), "timeout")
- if truncated || timeoutErr {
- tcp := &dns.Client{Net: "tcp", Timeout: dnsTimeout}
- in, _, err = tcp.Exchange(m, ns)
- }
- return in, err
-}
-
-func formatDNSError(msg *dns.Msg, err error) string {
- var parts []string
- if msg != nil {
- parts = append(parts, dns.RcodeToString[msg.Rcode])
- }
- if err != nil {
- parts = append(parts, err.Error())
- }
- if len(parts) > 0 {
- return ": " + strings.Join(parts, " ")
- }
- return ""
-}
-
-// soaCacheEntry holds a cached SOA record (only selected fields)
-type soaCacheEntry struct {
- zone string // zone apex (a domain name)
- primaryNs string // primary nameserver for the zone apex
- expires time.Time // time when this cache entry should be evicted
-}
-
-func newSoaCacheEntry(soa *dns.SOA) *soaCacheEntry {
- return &soaCacheEntry{
- zone: soa.Hdr.Name,
- primaryNs: soa.Ns,
- expires: time.Now().Add(time.Duration(soa.Refresh) * time.Second),
- }
-}
-
-// isExpired checks whether a cache entry should be considered expired.
-func (cache *soaCacheEntry) isExpired() bool {
- return time.Now().After(cache.expires)
-}
-
-// systemOrDefaultNameservers attempts to get system nameservers from the
-// resolv.conf file given by path before falling back to hard-coded defaults.
-func systemOrDefaultNameservers(path string, defaults []string) []string {
- config, err := dns.ClientConfigFromFile(path)
- if err != nil || len(config.Servers) == 0 {
- return defaults
- }
- return config.Servers
-}
-
-// populateNameserverPorts ensures that all nameservers have a port number.
-func populateNameserverPorts(servers []string) {
- for i := range servers {
- _, port, _ := net.SplitHostPort(servers[i])
- if port == "" {
- servers[i] = net.JoinHostPort(servers[i], "53")
- }
- }
-}
-
-// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers.
-func checkDNSPropagation(fqdn, value string, resolvers []string) (bool, error) {
- if !strings.HasSuffix(fqdn, ".") {
- fqdn += "."
- }
-
- // Initial attempt to resolve at the recursive NS
- r, err := dnsQuery(fqdn, dns.TypeTXT, resolvers, true)
- if err != nil {
- return false, err
- }
-
- // TODO: make this configurable, maybe
- // if !p.requireCompletePropagation {
- // return true, nil
- // }
-
- if r.Rcode == dns.RcodeSuccess {
- fqdn = updateDomainWithCName(r, fqdn)
- }
-
- authoritativeNss, err := lookupNameservers(fqdn, resolvers)
- if err != nil {
- return false, err
- }
-
- return checkAuthoritativeNss(fqdn, value, authoritativeNss)
-}
-
-// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record.
-func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) {
- for _, ns := range nameservers {
- r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false)
- if err != nil {
- return false, err
- }
-
- if r.Rcode != dns.RcodeSuccess {
- if r.Rcode == dns.RcodeNameError {
- // if Present() succeeded, then it must show up eventually, or else
- // something is really broken in the DNS provider or their API;
- // no need for error here, simply have the caller try again
- return false, nil
- }
- return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn)
- }
-
- var found bool
- for _, rr := range r.Answer {
- if txt, ok := rr.(*dns.TXT); ok {
- record := strings.Join(txt.Txt, "")
- if record == value {
- found = true
- break
- }
- }
- }
-
- if !found {
- return false, nil
- }
- }
-
- return true, nil
-}
-
-// lookupNameservers returns the authoritative nameservers for the given fqdn.
-func lookupNameservers(fqdn string, resolvers []string) ([]string, error) {
- var authoritativeNss []string
-
- zone, err := findZoneByFQDN(fqdn, resolvers)
- if err != nil {
- return nil, fmt.Errorf("could not determine the zone: %w", err)
- }
-
- r, err := dnsQuery(zone, dns.TypeNS, resolvers, true)
- if err != nil {
- return nil, err
- }
-
- for _, rr := range r.Answer {
- if ns, ok := rr.(*dns.NS); ok {
- authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns))
- }
- }
-
- if len(authoritativeNss) > 0 {
- return authoritativeNss, nil
- }
- return nil, errors.New("could not determine authoritative nameservers")
-}
-
-// Update FQDN with CNAME if any
-func updateDomainWithCName(r *dns.Msg, fqdn string) string {
- for _, rr := range r.Answer {
- if cn, ok := rr.(*dns.CNAME); ok {
- if cn.Hdr.Name == fqdn {
- return cn.Target
- }
- }
- }
- return fqdn
-}
-
-// recursiveNameservers are used to pre-check DNS propagation. It
-// picks user-configured nameservers (custom) OR the defaults
-// obtained from resolv.conf and defaultNameservers if none is
-// configured and ensures that all server addresses have a port value.
-func recursiveNameservers(custom []string) []string {
- var servers []string
- if len(custom) == 0 {
- servers = systemOrDefaultNameservers(defaultResolvConf, defaultNameservers)
- } else {
- servers = make([]string, len(custom))
- copy(servers, custom)
- }
- populateNameserverPorts(servers)
- return servers
-}
-
-var defaultNameservers = []string{
- "8.8.8.8:53",
- "8.8.4.4:53",
- "1.1.1.1:53",
- "1.0.0.1:53",
-}
-
-var dnsTimeout = 10 * time.Second
-
-var (
- fqdnSOACache = map[string]*soaCacheEntry{}
- fqdnSOACacheMu sync.Mutex
-)
-
-const defaultResolvConf = "/etc/resolv.conf"
diff --git a/vendor/github.com/caddyserver/certmagic/filestorage.go b/vendor/github.com/caddyserver/certmagic/filestorage.go
deleted file mode 100644
index 8adc2ce7..00000000
--- a/vendor/github.com/caddyserver/certmagic/filestorage.go
+++ /dev/null
@@ -1,392 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "path"
- "path/filepath"
- "runtime"
- "time"
-)
-
-// FileStorage facilitates forming file paths derived from a root
-// directory. It is used to get file paths in a consistent,
-// cross-platform way or persisting ACME assets on the file system.
-type FileStorage struct {
- Path string
-}
-
-// Exists returns true if key exists in fs.
-func (fs *FileStorage) Exists(key string) bool {
- _, err := os.Stat(fs.Filename(key))
- return !os.IsNotExist(err)
-}
-
-// Store saves value at key.
-func (fs *FileStorage) Store(key string, value []byte) error {
- filename := fs.Filename(key)
- err := os.MkdirAll(filepath.Dir(filename), 0700)
- if err != nil {
- return err
- }
- return ioutil.WriteFile(filename, value, 0600)
-}
-
-// Load retrieves the value at key.
-func (fs *FileStorage) Load(key string) ([]byte, error) {
- contents, err := ioutil.ReadFile(fs.Filename(key))
- if os.IsNotExist(err) {
- return nil, ErrNotExist(err)
- }
- return contents, nil
-}
-
-// Delete deletes the value at key.
-func (fs *FileStorage) Delete(key string) error {
- err := os.Remove(fs.Filename(key))
- if os.IsNotExist(err) {
- return ErrNotExist(err)
- }
- return err
-}
-
-// List returns all keys that match prefix.
-func (fs *FileStorage) List(prefix string, recursive bool) ([]string, error) {
- var keys []string
- walkPrefix := fs.Filename(prefix)
-
- err := filepath.Walk(walkPrefix, func(fpath string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if info == nil {
- return fmt.Errorf("%s: file info is nil", fpath)
- }
- if fpath == walkPrefix {
- return nil
- }
-
- suffix, err := filepath.Rel(walkPrefix, fpath)
- if err != nil {
- return fmt.Errorf("%s: could not make path relative: %v", fpath, err)
- }
- keys = append(keys, path.Join(prefix, suffix))
-
- if !recursive && info.IsDir() {
- return filepath.SkipDir
- }
- return nil
- })
-
- return keys, err
-}
-
-// Stat returns information about key.
-func (fs *FileStorage) Stat(key string) (KeyInfo, error) {
- fi, err := os.Stat(fs.Filename(key))
- if os.IsNotExist(err) {
- return KeyInfo{}, ErrNotExist(err)
- }
- if err != nil {
- return KeyInfo{}, err
- }
- return KeyInfo{
- Key: key,
- Modified: fi.ModTime(),
- Size: fi.Size(),
- IsTerminal: !fi.IsDir(),
- }, nil
-}
-
-// Filename returns the key as a path on the file
-// system prefixed by fs.Path.
-func (fs *FileStorage) Filename(key string) string {
- return filepath.Join(fs.Path, filepath.FromSlash(key))
-}
-
-// Lock obtains a lock named by the given key. It blocks
-// until the lock can be obtained or an error is returned.
-func (fs *FileStorage) Lock(ctx context.Context, key string) error {
- filename := fs.lockFilename(key)
-
- for {
- err := createLockfile(filename)
- if err == nil {
- // got the lock, yay
- return nil
- }
- if !os.IsExist(err) {
- // unexpected error
- return fmt.Errorf("creating lock file: %v", err)
- }
-
- // lock file already exists
-
- var meta lockMeta
- f, err := os.Open(filename)
- if err == nil {
- err2 := json.NewDecoder(f).Decode(&meta)
- f.Close()
- if err2 != nil {
- return fmt.Errorf("decoding lockfile contents: %w", err2)
- }
- }
-
- switch {
- case os.IsNotExist(err):
- // must have just been removed; try again to create it
- continue
-
- case err != nil:
- // unexpected error
- return fmt.Errorf("accessing lock file: %v", err)
-
- case fileLockIsStale(meta):
- // lock file is stale - delete it and try again to create one
- log.Printf("[INFO][%s] Lock for '%s' is stale (created: %s, last update: %s); removing then retrying: %s",
- fs, key, meta.Created, meta.Updated, filename)
- removeLockfile(filename)
- continue
-
- default:
- // lockfile exists and is not stale;
- // just wait a moment and try again,
- // or return if context cancelled
- select {
- case <-time.After(fileLockPollInterval):
- case <-ctx.Done():
- return ctx.Err()
- }
- }
- }
-}
-
-// Unlock releases the lock for name.
-func (fs *FileStorage) Unlock(key string) error {
- return removeLockfile(fs.lockFilename(key))
-}
-
-func (fs *FileStorage) String() string {
- return "FileStorage:" + fs.Path
-}
-
-func (fs *FileStorage) lockFilename(key string) string {
- return filepath.Join(fs.lockDir(), StorageKeys.Safe(key)+".lock")
-}
-
-func (fs *FileStorage) lockDir() string {
- return filepath.Join(fs.Path, "locks")
-}
-
-func fileLockIsStale(meta lockMeta) bool {
- ref := meta.Updated
- if ref.IsZero() {
- ref = meta.Created
- }
- // since updates are exactly every lockFreshnessInterval,
- // add a grace period for the actual file read+write to
- // take place
- return time.Since(ref) > lockFreshnessInterval*2
-}
-
-// createLockfile atomically creates the lockfile
-// identified by filename. A successfully created
-// lockfile should be removed with removeLockfile.
-func createLockfile(filename string) error {
- err := atomicallyCreateFile(filename, true)
- if err != nil {
- return err
- }
-
- go keepLockfileFresh(filename)
-
- // if the app crashes in removeLockfile(), there is a
- // small chance the .unlock file is left behind; it's
- // safe to simply remove it as it's a guard against
- // double removal of the .lock file.
- _ = os.Remove(filename + ".unlock")
- return nil
-}
-
-// removeLockfile atomically removes filename,
-// which must be a lockfile created by createLockfile.
-// See discussion in PR #7 for more background:
-// https://github.com/caddyserver/certmagic/pull/7
-func removeLockfile(filename string) error {
- unlockFilename := filename + ".unlock"
- if err := atomicallyCreateFile(unlockFilename, false); err != nil {
- if os.IsExist(err) {
- // another process is handling the unlocking
- return nil
- }
- return err
- }
- defer os.Remove(unlockFilename)
- return os.Remove(filename)
-}
-
-// keepLockfileFresh continuously updates the lock file
-// at filename with the current timestamp. It stops
-// when the file disappears (happy path = lock released),
-// or when there is an error at any point. Since it polls
-// every lockFreshnessInterval, this function might
-// not terminate until up to lockFreshnessInterval after
-// the lock is released.
-func keepLockfileFresh(filename string) {
- defer func() {
- if err := recover(); err != nil {
- buf := make([]byte, stackTraceBufferSize)
- buf = buf[:runtime.Stack(buf, false)]
- log.Printf("panic: active locking: %v\n%s", err, buf)
- }
- }()
-
- for {
- time.Sleep(lockFreshnessInterval)
- done, err := updateLockfileFreshness(filename)
- if err != nil {
- log.Printf("[ERROR] Keeping lock file fresh: %v - terminating lock maintenance (lockfile: %s)", err, filename)
- return
- }
- if done {
- return
- }
- }
-}
-
-// updateLockfileFreshness updates the lock file at filename
-// with the current timestamp. It returns true if the parent
-// loop can terminate (i.e. no more need to update the lock).
-func updateLockfileFreshness(filename string) (bool, error) {
- f, err := os.OpenFile(filename, os.O_RDWR, 0644)
- if os.IsNotExist(err) {
- return true, nil // lock released
- }
- if err != nil {
- return true, err
- }
- defer f.Close()
-
- // read contents
- metaBytes, err := ioutil.ReadAll(io.LimitReader(f, 2048))
- if err != nil {
- return true, err
- }
- var meta lockMeta
- if err := json.Unmarshal(metaBytes, &meta); err != nil {
- return true, err
- }
-
- // truncate file and reset I/O offset to beginning
- if err := f.Truncate(0); err != nil {
- return true, err
- }
- if _, err := f.Seek(0, 0); err != nil {
- return true, err
- }
-
- // write updated timestamp
- meta.Updated = time.Now()
- if err = json.NewEncoder(f).Encode(meta); err != nil {
- return false, err
- }
-
- // sync to device; we suspect that sometimes file systems
- // (particularly AWS EFS) don't do this on their own,
- // leaving the file empty when we close it; see
- // https://github.com/caddyserver/caddy/issues/3954
- return false, f.Sync()
-}
-
-// atomicallyCreateFile atomically creates the file
-// identified by filename if it doesn't already exist.
-func atomicallyCreateFile(filename string, writeLockInfo bool) error {
- // no need to check this error, we only really care about the file creation error
- _ = os.MkdirAll(filepath.Dir(filename), 0700)
- f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644)
- if err != nil {
- return err
- }
- defer f.Close()
- if writeLockInfo {
- now := time.Now()
- meta := lockMeta{
- Created: now,
- Updated: now,
- }
- if err := json.NewEncoder(f).Encode(meta); err != nil {
- return err
- }
- // see https://github.com/caddyserver/caddy/issues/3954
- if err := f.Sync(); err != nil {
- return err
- }
- }
- return nil
-}
-
-// homeDir returns the best guess of the current user's home
-// directory from environment variables. If unknown, "." (the
-// current directory) is returned instead.
-func homeDir() string {
- home := os.Getenv("HOME")
- if home == "" && runtime.GOOS == "windows" {
- drive := os.Getenv("HOMEDRIVE")
- path := os.Getenv("HOMEPATH")
- home = drive + path
- if drive == "" || path == "" {
- home = os.Getenv("USERPROFILE")
- }
- }
- if home == "" {
- home = "."
- }
- return home
-}
-
-func dataDir() string {
- baseDir := filepath.Join(homeDir(), ".local", "share")
- if xdgData := os.Getenv("XDG_DATA_HOME"); xdgData != "" {
- baseDir = xdgData
- }
- return filepath.Join(baseDir, "certmagic")
-}
-
-// lockMeta is written into a lock file.
-type lockMeta struct {
- Created time.Time `json:"created,omitempty"`
- Updated time.Time `json:"updated,omitempty"`
-}
-
-// lockFreshnessInterval is how often to update
-// a lock's timestamp. Locks with a timestamp
-// more than this duration in the past (plus a
-// grace period for latency) can be considered
-// stale.
-const lockFreshnessInterval = 5 * time.Second
-
-// fileLockPollInterval is how frequently
-// to check the existence of a lock file
-const fileLockPollInterval = 1 * time.Second
-
-// Interface guard
-var _ Storage = (*FileStorage)(nil)
diff --git a/vendor/github.com/caddyserver/certmagic/handshake.go b/vendor/github.com/caddyserver/certmagic/handshake.go
deleted file mode 100644
index 5b749fdc..00000000
--- a/vendor/github.com/caddyserver/certmagic/handshake.go
+++ /dev/null
@@ -1,686 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "net"
- "strings"
- "sync"
- "time"
-
- "github.com/mholt/acmez"
- "go.uber.org/zap"
-)
-
-// GetCertificate gets a certificate to satisfy clientHello. In getting
-// the certificate, it abides the rules and settings defined in the
-// Config that matches clientHello.ServerName. It first checks the in-
-// memory cache, then, if the config enables "OnDemand", it accesses
-// disk, then accesses the network if it must obtain a new certificate
-// via ACME.
-//
-// This method is safe for use as a tls.Config.GetCertificate callback.
-func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
- cfg.emit("tls_handshake_started", clientHello)
-
- // special case: serve up the certificate for a TLS-ALPN ACME challenge
- // (https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05)
- for _, proto := range clientHello.SupportedProtos {
- if proto == acmez.ACMETLS1Protocol {
- challengeCert, distributed, err := cfg.getTLSALPNChallengeCert(clientHello)
- if err != nil {
- if cfg.Logger != nil {
- cfg.Logger.Error("tls-alpn challenge",
- zap.String("server_name", clientHello.ServerName),
- zap.Error(err))
- }
- return nil, err
- }
- if cfg.Logger != nil {
- cfg.Logger.Info("served key authentication certificate",
- zap.String("server_name", clientHello.ServerName),
- zap.String("challenge", "tls-alpn-01"),
- zap.String("remote", clientHello.Conn.RemoteAddr().String()),
- zap.Bool("distributed", distributed))
- }
- return challengeCert, nil
- }
- }
-
- // get the certificate and serve it up
- cert, err := cfg.getCertDuringHandshake(clientHello, true, true)
- if err == nil {
- cfg.emit("tls_handshake_completed", clientHello)
- }
- return &cert.Certificate, err
-}
-
-// getCertificate gets a certificate that matches name from the in-memory
-// cache, according to the lookup table associated with cfg. The lookup then
-// points to a certificate in the Instance certificate cache.
-//
-// The name is expected to already be normalized (e.g. lowercased).
-//
-// If there is no exact match for name, it will be checked against names of
-// the form '*.example.com' (wildcard certificates) according to RFC 6125.
-// If a match is found, matched will be true. If no matches are found, matched
-// will be false and a "default" certificate will be returned with defaulted
-// set to true. If defaulted is false, then no certificates were available.
-//
-// The logic in this function is adapted from the Go standard library,
-// which is by the Go Authors.
-//
-// This function is safe for concurrent use.
-func (cfg *Config) getCertificate(hello *tls.ClientHelloInfo) (cert Certificate, matched, defaulted bool) {
- name := normalizedName(hello.ServerName)
-
- if name == "" {
- // if SNI is empty, prefer matching IP address
- if hello.Conn != nil {
- addr := localIPFromConn(hello.Conn)
- cert, matched = cfg.selectCert(hello, addr)
- if matched {
- return
- }
- }
-
- // fall back to a "default" certificate, if specified
- if cfg.DefaultServerName != "" {
- normDefault := normalizedName(cfg.DefaultServerName)
- cert, defaulted = cfg.selectCert(hello, normDefault)
- if defaulted {
- return
- }
- }
- } else {
- // if SNI is specified, try an exact match first
- cert, matched = cfg.selectCert(hello, name)
- if matched {
- return
- }
-
- // try replacing labels in the name with
- // wildcards until we get a match
- labels := strings.Split(name, ".")
- for i := range labels {
- labels[i] = "*"
- candidate := strings.Join(labels, ".")
- cert, matched = cfg.selectCert(hello, candidate)
- if matched {
- return
- }
- }
- }
-
- // otherwise, we're bingo on ammo; see issues
- // caddyserver/caddy#2035 and caddyserver/caddy#1303 (any
- // change to certificate matching behavior must
- // account for hosts defined where the hostname
- // is empty or a catch-all, like ":443" or
- // "0.0.0.0:443")
-
- return
-}
-
-// selectCert uses hello to select a certificate from the
-// cache for name. If cfg.CertSelection is set, it will be
-// used to make the decision. Otherwise, the first matching
-// unexpired cert is returned. As a special case, if no
-// certificates match name and cfg.CertSelection is set,
-// then all certificates in the cache will be passed in
-// for the cfg.CertSelection to make the final decision.
-func (cfg *Config) selectCert(hello *tls.ClientHelloInfo, name string) (Certificate, bool) {
- logger := loggerNamed(cfg.Logger, "handshake")
- choices := cfg.certCache.getAllMatchingCerts(name)
- if len(choices) == 0 {
- if cfg.CertSelection == nil {
- if logger != nil {
- logger.Debug("no matching certificates and no custom selection logic", zap.String("identifier", name))
- }
- return Certificate{}, false
- }
- if logger != nil {
- logger.Debug("no matching certificate; will choose from all certificates", zap.String("identifier", name))
- }
- choices = cfg.certCache.getAllCerts()
- }
- if logger != nil {
- logger.Debug("choosing certificate",
- zap.String("identifier", name),
- zap.Int("num_choices", len(choices)))
- }
- if cfg.CertSelection == nil {
- cert, err := DefaultCertificateSelector(hello, choices)
- if logger != nil {
- logger.Debug("default certificate selection results",
- zap.Error(err),
- zap.String("identifier", name),
- zap.Strings("subjects", cert.Names),
- zap.Bool("managed", cert.managed),
- zap.String("issuer_key", cert.issuerKey),
- zap.String("hash", cert.hash))
- }
- return cert, err == nil
- }
- cert, err := cfg.CertSelection.SelectCertificate(hello, choices)
- if logger != nil {
- logger.Debug("custom certificate selection results",
- zap.Error(err),
- zap.String("identifier", name),
- zap.Strings("subjects", cert.Names),
- zap.Bool("managed", cert.managed),
- zap.String("issuer_key", cert.issuerKey),
- zap.String("hash", cert.hash))
- }
- return cert, err == nil
-}
-
-// DefaultCertificateSelector is the default certificate selection logic
-// given a choice of certificates. If there is at least one certificate in
-// choices, it always returns a certificate without error. It chooses the
-// first non-expired certificate that the client supports if possible,
-// otherwise it returns an expired certificate that the client supports,
-// otherwise it just returns the first certificate in the list of choices.
-func DefaultCertificateSelector(hello *tls.ClientHelloInfo, choices []Certificate) (Certificate, error) {
- if len(choices) == 0 {
- return Certificate{}, fmt.Errorf("no certificates available")
- }
- now := time.Now()
- best := choices[0]
- for _, choice := range choices {
- if err := hello.SupportsCertificate(&choice.Certificate); err != nil {
- continue
- }
- best = choice // at least the client supports it...
- if now.After(choice.Leaf.NotBefore) && now.Before(choice.Leaf.NotAfter) {
- return choice, nil // ...and unexpired, great! "Certificate, I choose you!"
- }
- }
- return best, nil // all matching certs are expired or incompatible, oh well
-}
-
-// getCertDuringHandshake will get a certificate for hello. It first tries
-// the in-memory cache. If no certificate for hello is in the cache, the
-// config most closely corresponding to hello will be loaded. If that config
-// allows it (OnDemand==true) and if loadIfNecessary == true, it goes to disk
-// to load it into the cache and serve it. If it's not on disk and if
-// obtainIfNecessary == true, the certificate will be obtained from the CA,
-// cached, and served. If obtainIfNecessary is true, then loadIfNecessary
-// must also be set to true. An error will be returned if and only if no
-// certificate is available.
-//
-// This function is safe for concurrent use.
-func (cfg *Config) getCertDuringHandshake(hello *tls.ClientHelloInfo, loadIfNecessary, obtainIfNecessary bool) (Certificate, error) {
- log := loggerNamed(cfg.Logger, "handshake")
-
- // First check our in-memory cache to see if we've already loaded it
- cert, matched, defaulted := cfg.getCertificate(hello)
- if matched {
- if log != nil {
- log.Debug("matched certificate in cache",
- zap.Strings("subjects", cert.Names),
- zap.Bool("managed", cert.managed),
- zap.Time("expiration", cert.Leaf.NotAfter),
- zap.String("hash", cert.hash))
- }
- if cert.managed && cfg.OnDemand != nil && obtainIfNecessary {
- // It's been reported before that if the machine goes to sleep (or
- // suspends the process) that certs which are already loaded into
- // memory won't get renewed in the background, so we need to check
- // expiry on each handshake too, sigh:
- // https://caddy.community/t/local-certificates-not-renewing-on-demand/9482
- return cfg.optionalMaintenance(loggerNamed(cfg.Logger, "on_demand"), cert, hello)
- }
- return cert, nil
- }
-
- name := cfg.getNameFromClientHello(hello)
-
- // We might be able to load or obtain a needed certificate. Load from
- // storage even if OnDemand isn't enabled in case a statically-managed
- // cert was evicted from a full cache.
- cfg.certCache.mu.RLock()
- cacheSize := len(cfg.certCache.cache)
- cfg.certCache.mu.RUnlock()
- loadDynamically := cfg.OnDemand != nil || cacheSize >= cfg.certCache.options.Capacity
-
- if loadDynamically && loadIfNecessary {
- // Then check to see if we have one on disk
- loadedCert, err := cfg.CacheManagedCertificate(name)
- if _, ok := err.(ErrNotExist); ok {
- // If no exact match, try a wildcard variant, which is something we can still use
- labels := strings.Split(name, ".")
- labels[0] = "*"
- loadedCert, err = cfg.CacheManagedCertificate(strings.Join(labels, "."))
- }
- if err == nil {
- if log != nil {
- log.Debug("loaded certificate from storage",
- zap.Strings("subjects", loadedCert.Names),
- zap.Bool("managed", loadedCert.managed),
- zap.Time("expiration", loadedCert.Leaf.NotAfter),
- zap.String("hash", loadedCert.hash))
- }
- loadedCert, err = cfg.handshakeMaintenance(hello, loadedCert)
- if err != nil {
- if log != nil {
- log.Error("maintining newly-loaded certificate",
- zap.String("server_name", name),
- zap.Error(err))
- }
- }
- return loadedCert, nil
- }
- if cfg.OnDemand != nil && obtainIfNecessary {
- // By this point, we need to ask the CA for a certificate
- return cfg.obtainOnDemandCertificate(hello)
- }
- }
-
- // Fall back to the default certificate if there is one
- if defaulted {
- if log != nil {
- log.Debug("fell back to default certificate",
- zap.Strings("subjects", cert.Names),
- zap.Bool("managed", cert.managed),
- zap.Time("expiration", cert.Leaf.NotAfter),
- zap.String("hash", cert.hash))
- }
- return cert, nil
- }
-
- if log != nil {
- log.Debug("no certificate matching TLS ClientHello",
- zap.String("server_name", hello.ServerName),
- zap.String("remote", hello.Conn.RemoteAddr().String()),
- zap.String("identifier", name),
- zap.Uint16s("cipher_suites", hello.CipherSuites),
- zap.Int("cache_size", cacheSize),
- zap.Int("cache_capacity", cfg.certCache.options.Capacity),
- zap.Bool("load_if_necessary", loadIfNecessary),
- zap.Bool("obtain_if_necessary", obtainIfNecessary),
- zap.Bool("on_demand", cfg.OnDemand != nil))
- }
-
- return Certificate{}, fmt.Errorf("no certificate available for '%s'", name)
-}
-
-// optionalMaintenance will perform maintenance on the certificate (if necessary) and
-// will return the resulting certificate. This should only be done if the certificate
-// is managed, OnDemand is enabled, and the scope is allowed to obtain certificates.
-func (cfg *Config) optionalMaintenance(log *zap.Logger, cert Certificate, hello *tls.ClientHelloInfo) (Certificate, error) {
- newCert, err := cfg.handshakeMaintenance(hello, cert)
- if err == nil {
- return newCert, nil
- }
-
- if log != nil {
- log.Error("renewing certificate on-demand failed",
- zap.Strings("subjects", cert.Names),
- zap.Time("not_after", cert.Leaf.NotAfter),
- zap.Error(err))
- }
-
- if cert.Expired() {
- return cert, err
- }
-
- // still has time remaining, so serve it anyway
- return cert, nil
-}
-
-// checkIfCertShouldBeObtained checks to see if an on-demand TLS certificate
-// should be obtained for a given domain based upon the config settings. If
-// a non-nil error is returned, do not issue a new certificate for name.
-func (cfg *Config) checkIfCertShouldBeObtained(name string) error {
- if cfg.OnDemand == nil {
- return fmt.Errorf("not configured for on-demand certificate issuance")
- }
- if !SubjectQualifiesForCert(name) {
- return fmt.Errorf("subject name does not qualify for certificate: %s", name)
- }
- if cfg.OnDemand.DecisionFunc != nil {
- return cfg.OnDemand.DecisionFunc(name)
- }
- if len(cfg.OnDemand.hostWhitelist) > 0 &&
- !cfg.OnDemand.whitelistContains(name) {
- return fmt.Errorf("certificate for '%s' is not managed", name)
- }
- return nil
-}
-
-// obtainOnDemandCertificate obtains a certificate for hello.
-// If another goroutine has already started obtaining a cert for
-// hello, it will wait and use what the other goroutine obtained.
-//
-// This function is safe for use by multiple concurrent goroutines.
-func (cfg *Config) obtainOnDemandCertificate(hello *tls.ClientHelloInfo) (Certificate, error) {
- log := loggerNamed(cfg.Logger, "on_demand")
-
- name := cfg.getNameFromClientHello(hello)
-
- getCertWithoutReobtaining := func() (Certificate, error) {
- // very important to set the obtainIfNecessary argument to false, so we don't repeat this infinitely
- return cfg.getCertDuringHandshake(hello, true, false)
- }
-
- // We must protect this process from happening concurrently, so synchronize.
- obtainCertWaitChansMu.Lock()
- wait, ok := obtainCertWaitChans[name]
- if ok {
- // lucky us -- another goroutine is already obtaining the certificate.
- // wait for it to finish obtaining the cert and then we'll use it.
- obtainCertWaitChansMu.Unlock()
-
- // TODO: see if we can get a proper context in here, for true cancellation
- timeout := time.NewTimer(2 * time.Minute)
- select {
- case <-timeout.C:
- return Certificate{}, fmt.Errorf("timed out waiting to obtain certificate for %s", name)
- case <-wait:
- timeout.Stop()
- }
-
- return getCertWithoutReobtaining()
- }
-
- // looks like it's up to us to do all the work and obtain the cert.
- // make a chan others can wait on if needed
- wait = make(chan struct{})
- obtainCertWaitChans[name] = wait
- obtainCertWaitChansMu.Unlock()
-
- unblockWaiters := func() {
- obtainCertWaitChansMu.Lock()
- close(wait)
- delete(obtainCertWaitChans, name)
- obtainCertWaitChansMu.Unlock()
- }
-
- // Make sure the certificate should be obtained based on config
- err := cfg.checkIfCertShouldBeObtained(name)
- if err != nil {
- unblockWaiters()
- return Certificate{}, err
- }
-
- if log != nil {
- log.Info("obtaining new certificate", zap.String("server_name", name))
- }
-
- // TODO: use a proper context; we use one with timeout because retries are enabled because interactive is false
- ctx, cancel := context.WithTimeout(context.TODO(), 90*time.Second)
- defer cancel()
-
- // Obtain the certificate
- err = cfg.ObtainCertAsync(ctx, name)
-
- // immediately unblock anyone waiting for it; doing this in
- // a defer would risk deadlock because of the recursive call
- // to getCertDuringHandshake below when we return!
- unblockWaiters()
-
- if err != nil {
- // shucks; failed to solve challenge on-demand
- return Certificate{}, err
- }
-
- // success; certificate was just placed on disk, so
- // we need only restart serving the certificate
- return getCertWithoutReobtaining()
-}
-
-// handshakeMaintenance performs a check on cert for expiration and OCSP validity.
-// If necessary, it will renew the certificate and/or refresh the OCSP staple.
-// OCSP stapling errors are not returned, only logged.
-//
-// This function is safe for use by multiple concurrent goroutines.
-func (cfg *Config) handshakeMaintenance(hello *tls.ClientHelloInfo, cert Certificate) (Certificate, error) {
- log := loggerNamed(cfg.Logger, "on_demand")
-
- // Check cert expiration
- if currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio) {
- return cfg.renewDynamicCertificate(hello, cert)
- }
-
- // Check OCSP staple validity
- if cert.ocsp != nil {
- refreshTime := cert.ocsp.ThisUpdate.Add(cert.ocsp.NextUpdate.Sub(cert.ocsp.ThisUpdate) / 2)
- if time.Now().After(refreshTime) {
- _, err := stapleOCSP(cfg.OCSP, cfg.Storage, &cert, nil)
- if err != nil {
- // An error with OCSP stapling is not the end of the world, and in fact, is
- // quite common considering not all certs have issuer URLs that support it.
- if log != nil {
- log.Warn("stapling OCSP",
- zap.String("server_name", hello.ServerName),
- zap.Error(err))
- }
- }
- cfg.certCache.mu.Lock()
- cfg.certCache.cache[cert.hash] = cert
- cfg.certCache.mu.Unlock()
- }
- }
-
- return cert, nil
-}
-
-// renewDynamicCertificate renews the certificate for name using cfg. It returns the
-// certificate to use and an error, if any. name should already be lower-cased before
-// calling this function. name is the name obtained directly from the handshake's
-// ClientHello. If the certificate hasn't yet expired, currentCert will be returned
-// and the renewal will happen in the background; otherwise this blocks until the
-// certificate has been renewed, and returns the renewed certificate.
-//
-// This function is safe for use by multiple concurrent goroutines.
-func (cfg *Config) renewDynamicCertificate(hello *tls.ClientHelloInfo, currentCert Certificate) (Certificate, error) {
- log := loggerNamed(cfg.Logger, "on_demand")
-
- name := cfg.getNameFromClientHello(hello)
- timeLeft := time.Until(currentCert.Leaf.NotAfter)
-
- getCertWithoutReobtaining := func() (Certificate, error) {
- // very important to set the obtainIfNecessary argument to false, so we don't repeat this infinitely
- return cfg.getCertDuringHandshake(hello, true, false)
- }
-
- // see if another goroutine is already working on this certificate
- obtainCertWaitChansMu.Lock()
- wait, ok := obtainCertWaitChans[name]
- if ok {
- // lucky us -- another goroutine is already renewing the certificate
- obtainCertWaitChansMu.Unlock()
-
- if timeLeft > 0 {
- // the current certificate hasn't expired, and another goroutine is already
- // renewing it, so we might as well serve what we have without blocking
- if log != nil {
- log.Debug("certificate expires soon but is already being renewed; serving current certificate",
- zap.Strings("subjects", currentCert.Names),
- zap.Duration("remaining", timeLeft))
- }
- return currentCert, nil
- }
-
- // otherwise, we'll have to wait for the renewal to finish so we don't serve
- // an expired certificate
-
- if log != nil {
- log.Debug("certificate has expired, but is already being renewed; waiting for renewal to complete",
- zap.Strings("subjects", currentCert.Names),
- zap.Time("expired", currentCert.Leaf.NotAfter))
- }
-
- // TODO: see if we can get a proper context in here, for true cancellation
- timeout := time.NewTimer(2 * time.Minute)
- select {
- case <-timeout.C:
- return Certificate{}, fmt.Errorf("timed out waiting for certificate renewal of %s", name)
- case <-wait:
- timeout.Stop()
- }
-
- return getCertWithoutReobtaining()
- }
-
- // looks like it's up to us to do all the work and renew the cert
- wait = make(chan struct{})
- obtainCertWaitChans[name] = wait
- obtainCertWaitChansMu.Unlock()
-
- unblockWaiters := func() {
- obtainCertWaitChansMu.Lock()
- close(wait)
- delete(obtainCertWaitChans, name)
- obtainCertWaitChansMu.Unlock()
- }
-
- if log != nil {
- log.Info("attempting certificate renewal",
- zap.String("server_name", name),
- zap.Strings("subjects", currentCert.Names),
- zap.Time("expiration", currentCert.Leaf.NotAfter),
- zap.Duration("remaining", timeLeft))
- }
-
- // Make sure a certificate for this name should be obtained on-demand
- err := cfg.checkIfCertShouldBeObtained(name)
- if err != nil {
- // if not, remove from cache (it will be deleted from storage later)
- cfg.certCache.mu.Lock()
- cfg.certCache.removeCertificate(currentCert)
- cfg.certCache.mu.Unlock()
- unblockWaiters()
- return Certificate{}, err
- }
-
- // Renew and reload the certificate
- renewAndReload := func(ctx context.Context, cancel context.CancelFunc) (Certificate, error) {
- defer cancel()
- err = cfg.RenewCertAsync(ctx, name, false)
- if err == nil {
- // even though the recursive nature of the dynamic cert loading
- // would just call this function anyway, we do it here to
- // make the replacement as atomic as possible.
- newCert, err := cfg.CacheManagedCertificate(name)
- if err != nil {
- if log != nil {
- log.Error("loading renewed certificate", zap.String("server_name", name), zap.Error(err))
- }
- } else {
- // replace the old certificate with the new one
- cfg.certCache.replaceCertificate(currentCert, newCert)
- }
- }
-
- // immediately unblock anyone waiting for it; doing this in
- // a defer would risk deadlock because of the recursive call
- // to getCertDuringHandshake below when we return!
- unblockWaiters()
-
- if err != nil {
- return Certificate{}, err
- }
-
- return getCertWithoutReobtaining()
- }
-
- // if the certificate hasn't expired, we can serve what we have and renew in the background
- if timeLeft > 0 {
- // TODO: get a proper context; we use one with timeout because retries are enabled because interactive is false
- ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute)
- go renewAndReload(ctx, cancel)
- return currentCert, nil
- }
-
- // otherwise, we have to block while we renew an expired certificate
- ctx, cancel := context.WithTimeout(context.TODO(), 90*time.Second)
- return renewAndReload(ctx, cancel)
-}
-
-// getTLSALPNChallengeCert is to be called when the clientHello pertains to
-// a TLS-ALPN challenge and a certificate is required to solve it. This method gets
-// the relevant challenge info and then returns the associated certificate (if any)
-// or generates it anew if it's not available (as is the case when distributed
-// solving). True is returned if the challenge is being solved distributed (there
-// is no semantic difference with distributed solving; it is mainly for logging).
-func (cfg *Config) getTLSALPNChallengeCert(clientHello *tls.ClientHelloInfo) (*tls.Certificate, bool, error) {
- chalData, distributed, err := cfg.getChallengeInfo(clientHello.ServerName)
- if err != nil {
- return nil, distributed, err
- }
-
- // fast path: we already created the certificate (this avoids having to re-create
- // it at every handshake that tries to verify, e.g. multi-perspective validation)
- if chalData.data != nil {
- return chalData.data.(*tls.Certificate), distributed, nil
- }
-
- // otherwise, we can re-create the solution certificate, but it takes a few cycles
- cert, err := acmez.TLSALPN01ChallengeCert(chalData.Challenge)
- if err != nil {
- return nil, distributed, fmt.Errorf("making TLS-ALPN challenge certificate: %v", err)
- }
- if cert == nil {
- return nil, distributed, fmt.Errorf("got nil TLS-ALPN challenge certificate but no error")
- }
-
- return cert, distributed, nil
-}
-
-// getNameFromClientHello returns a normalized form of hello.ServerName.
-// If hello.ServerName is empty (i.e. client did not use SNI), then the
-// associated connection's local address is used to extract an IP address.
-func (*Config) getNameFromClientHello(hello *tls.ClientHelloInfo) string {
- if name := normalizedName(hello.ServerName); name != "" {
- return name
- }
- return localIPFromConn(hello.Conn)
-}
-
-// localIPFromConn returns the host portion of c's local address
-// and strips the scope ID if one exists (see RFC 4007).
-func localIPFromConn(c net.Conn) string {
- if c == nil {
- return ""
- }
- localAddr := c.LocalAddr().String()
- ip, _, err := net.SplitHostPort(localAddr)
- if err != nil {
- // OK; assume there was no port
- ip = localAddr
- }
- // IPv6 addresses can have scope IDs, e.g. "fe80::4c3:3cff:fe4f:7e0b%eth0",
- // but for our purposes, these are useless (unless a valid use case proves
- // otherwise; see issue #3911)
- if scopeIDStart := strings.Index(ip, "%"); scopeIDStart > -1 {
- ip = ip[:scopeIDStart]
- }
- return ip
-}
-
-// normalizedName returns a cleaned form of serverName that is
-// used for consistency when referring to a SNI value.
-func normalizedName(serverName string) string {
- return strings.ToLower(strings.TrimSpace(serverName))
-}
-
-// obtainCertWaitChans is used to coordinate obtaining certs for each hostname.
-var obtainCertWaitChans = make(map[string]chan struct{})
-var obtainCertWaitChansMu sync.Mutex
diff --git a/vendor/github.com/caddyserver/certmagic/httphandler.go b/vendor/github.com/caddyserver/certmagic/httphandler.go
deleted file mode 100644
index d17cfaab..00000000
--- a/vendor/github.com/caddyserver/certmagic/httphandler.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "net/http"
- "strings"
-
- "github.com/mholt/acmez/acme"
- "go.uber.org/zap"
-)
-
-// HTTPChallengeHandler wraps h in a handler that can solve the ACME
-// HTTP challenge. cfg is required, and it must have a certificate
-// cache backed by a functional storage facility, since that is where
-// the challenge state is stored between initiation and solution.
-//
-// If a request is not an ACME HTTP challenge, h will be invoked.
-func (am *ACMEManager) HTTPChallengeHandler(h http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if am.HandleHTTPChallenge(w, r) {
- return
- }
- h.ServeHTTP(w, r)
- })
-}
-
-// HandleHTTPChallenge uses am to solve challenge requests from an ACME
-// server that were initiated by this instance or any other instance in
-// this cluster (being, any instances using the same storage am does).
-//
-// If the HTTP challenge is disabled, this function is a no-op.
-//
-// If am is nil or if am does not have a certificate cache backed by
-// usable storage, solving the HTTP challenge will fail.
-//
-// It returns true if it handled the request; if so, the response has
-// already been written. If false is returned, this call was a no-op and
-// the request has not been handled.
-func (am *ACMEManager) HandleHTTPChallenge(w http.ResponseWriter, r *http.Request) bool {
- if am == nil {
- return false
- }
- if am.DisableHTTPChallenge {
- return false
- }
- if !LooksLikeHTTPChallenge(r) {
- return false
- }
- return am.distributedHTTPChallengeSolver(w, r)
-}
-
-// distributedHTTPChallengeSolver checks to see if this challenge
-// request was initiated by this or another instance which uses the
-// same storage as am does, and attempts to complete the challenge for
-// it. It returns true if the request was handled; false otherwise.
-func (am *ACMEManager) distributedHTTPChallengeSolver(w http.ResponseWriter, r *http.Request) bool {
- if am == nil {
- return false
- }
- host := hostOnly(r.Host)
- chalInfo, distributed, err := am.config.getChallengeInfo(host)
- if err != nil {
- if am.Logger != nil {
- am.Logger.Error("looking up info for HTTP challenge",
- zap.String("host", host),
- zap.Error(err))
- }
- return false
- }
- return solveHTTPChallenge(am.Logger, w, r, chalInfo.Challenge, distributed)
-}
-
-// solveHTTPChallenge solves the HTTP challenge using the given challenge information.
-// If the challenge is being solved in a distributed fahsion, set distributed to true for logging purposes.
-// It returns true the properties of the request check out in relation to the HTTP challenge.
-// Most of this code borrowed from xenolf's built-in HTTP-01 challenge solver in March 2018.
-func solveHTTPChallenge(logger *zap.Logger, w http.ResponseWriter, r *http.Request, challenge acme.Challenge, distributed bool) bool {
- challengeReqPath := challenge.HTTP01ResourcePath()
- if r.URL.Path == challengeReqPath &&
- strings.EqualFold(hostOnly(r.Host), challenge.Identifier.Value) && // mitigate DNS rebinding attacks
- r.Method == "GET" {
- w.Header().Add("Content-Type", "text/plain")
- w.Write([]byte(challenge.KeyAuthorization))
- r.Close = true
- if logger != nil {
- logger.Info("served key authentication",
- zap.String("identifier", challenge.Identifier.Value),
- zap.String("challenge", "http-01"),
- zap.String("remote", r.RemoteAddr),
- zap.Bool("distributed", distributed))
- }
- return true
- }
- return false
-}
-
-// SolveHTTPChallenge solves the HTTP challenge. It should be used only on HTTP requests that are
-// from ACME servers trying to validate an identifier (i.e. LooksLikeHTTPChallenge() == true). It
-// returns true if the request criteria check out and it answered with key authentication, in which
-// case no further handling of the request is necessary.
-func SolveHTTPChallenge(logger *zap.Logger, w http.ResponseWriter, r *http.Request, challenge acme.Challenge) bool {
- return solveHTTPChallenge(logger, w, r, challenge, false)
-}
-
-// LooksLikeHTTPChallenge returns true if r looks like an ACME
-// HTTP challenge request from an ACME server.
-func LooksLikeHTTPChallenge(r *http.Request) bool {
- return r.Method == "GET" && strings.HasPrefix(r.URL.Path, challengeBasePath)
-}
-
-const challengeBasePath = "/.well-known/acme-challenge"
diff --git a/vendor/github.com/caddyserver/certmagic/maintain.go b/vendor/github.com/caddyserver/certmagic/maintain.go
deleted file mode 100644
index 63d475ce..00000000
--- a/vendor/github.com/caddyserver/certmagic/maintain.go
+++ /dev/null
@@ -1,648 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "context"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "log"
- "path"
- "runtime"
- "strings"
- "time"
-
- "github.com/mholt/acmez/acme"
- "go.uber.org/zap"
- "golang.org/x/crypto/ocsp"
-)
-
-// maintainAssets is a permanently-blocking function
-// that loops indefinitely and, on a regular schedule, checks
-// certificates for expiration and initiates a renewal of certs
-// that are expiring soon. It also updates OCSP stapling. It
-// should only be called once per cache. Panics are recovered,
-// and if panicCount < 10, the function is called recursively,
-// incrementing panicCount each time. Initial invocation should
-// start panicCount at 0.
-func (certCache *Cache) maintainAssets(panicCount int) {
- log := loggerNamed(certCache.logger, "maintenance")
- if log != nil {
- log = log.With(zap.String("cache", fmt.Sprintf("%p", certCache)))
- }
-
- defer func() {
- if err := recover(); err != nil {
- buf := make([]byte, stackTraceBufferSize)
- buf = buf[:runtime.Stack(buf, false)]
- if log != nil {
- log.Error("panic", zap.Any("error", err), zap.ByteString("stack", buf))
- }
- if panicCount < 10 {
- certCache.maintainAssets(panicCount + 1)
- }
- }
- }()
-
- renewalTicker := time.NewTicker(certCache.options.RenewCheckInterval)
- ocspTicker := time.NewTicker(certCache.options.OCSPCheckInterval)
-
- if log != nil {
- log.Info("started background certificate maintenance")
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- for {
- select {
- case <-renewalTicker.C:
- err := certCache.RenewManagedCertificates(ctx)
- if err != nil && log != nil {
- log.Error("renewing managed certificates", zap.Error(err))
- }
- case <-ocspTicker.C:
- certCache.updateOCSPStaples(ctx)
- case <-certCache.stopChan:
- renewalTicker.Stop()
- ocspTicker.Stop()
- // TODO: stop any in-progress maintenance operations and clear locks we made (this might be done now with our use of context)
- if log != nil {
- log.Info("stopped background certificate maintenance")
- }
- close(certCache.doneChan)
- return
- }
- }
-}
-
-// RenewManagedCertificates renews managed certificates,
-// including ones loaded on-demand. Note that this is done
-// automatically on a regular basis; normally you will not
-// need to call this. This method assumes non-interactive
-// mode (i.e. operating in the background).
-func (certCache *Cache) RenewManagedCertificates(ctx context.Context) error {
- log := loggerNamed(certCache.logger, "maintenance")
-
- // configs will hold a map of certificate name to the config
- // to use when managing that certificate
- configs := make(map[string]*Config)
-
- // we use the queues for a very important reason: to do any and all
- // operations that could require an exclusive write lock outside
- // of the read lock! otherwise we get a deadlock, yikes. in other
- // words, our first iteration through the certificate cache does NOT
- // perform any operations--only queues them--so that more fine-grained
- // write locks may be obtained during the actual operations.
- var renewQueue, reloadQueue, deleteQueue []Certificate
-
- certCache.mu.RLock()
- for certKey, cert := range certCache.cache {
- if !cert.managed {
- continue
- }
-
- // the list of names on this cert should never be empty... programmer error?
- if cert.Names == nil || len(cert.Names) == 0 {
- if log != nil {
- log.Warn("certificate has no names; removing from cache", zap.String("cert_key", certKey))
- }
- deleteQueue = append(deleteQueue, cert)
- continue
- }
-
- // get the config associated with this certificate
- cfg, err := certCache.getConfig(cert)
- if err != nil {
- if log != nil {
- log.Error("unable to get configuration to manage certificate; unable to renew",
- zap.Strings("identifiers", cert.Names),
- zap.Error(err))
- }
- continue
- }
- if cfg == nil {
- // this is bad if this happens, probably a programmer error (oops)
- if log != nil {
- log.Error("no configuration associated with certificate; unable to manage",
- zap.Strings("identifiers", cert.Names))
- }
- continue
- }
- if cfg.OnDemand != nil {
- continue
- }
-
- // if time is up or expires soon, we need to try to renew it
- if cert.NeedsRenewal(cfg) {
- configs[cert.Names[0]] = cfg
-
- // see if the certificate in storage has already been renewed, possibly by another
- // instance that didn't coordinate with this one; if so, just load it (this
- // might happen if another instance already renewed it - kinda sloppy but checking disk
- // first is a simple way to possibly drastically reduce rate limit problems)
- storedCertExpiring, err := cfg.managedCertInStorageExpiresSoon(cert)
- if err != nil {
- // hmm, weird, but not a big deal, maybe it was deleted or something
- if log != nil {
- log.Warn("error while checking if stored certificate is also expiring soon",
- zap.Strings("identifiers", cert.Names),
- zap.Error(err))
- }
- } else if !storedCertExpiring {
- // if the certificate is NOT expiring soon and there was no error, then we
- // are good to just reload the certificate from storage instead of repeating
- // a likely-unnecessary renewal procedure
- reloadQueue = append(reloadQueue, cert)
- continue
- }
-
- // the certificate in storage has not been renewed yet, so we will do it
- // NOTE: It is super-important to note that the TLS-ALPN challenge requires
- // a write lock on the cache in order to complete its challenge, so it is extra
- // vital that this renew operation does not happen inside our read lock!
- renewQueue = append(renewQueue, cert)
- }
- }
- certCache.mu.RUnlock()
-
- // Reload certificates that merely need to be updated in memory
- for _, oldCert := range reloadQueue {
- timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
- if log != nil {
- log.Info("certificate expires soon, but is already renewed in storage; reloading stored certificate",
- zap.Strings("identifiers", oldCert.Names),
- zap.Duration("remaining", timeLeft))
- }
-
- cfg := configs[oldCert.Names[0]]
-
- // crucially, this happens OUTSIDE a lock on the certCache
- err := cfg.reloadManagedCertificate(oldCert)
- if err != nil {
- if log != nil {
- log.Error("loading renewed certificate",
- zap.Strings("identifiers", oldCert.Names),
- zap.Error(err))
- }
- continue
- }
- }
-
- // Renewal queue
- for _, oldCert := range renewQueue {
- cfg := configs[oldCert.Names[0]]
- err := certCache.queueRenewalTask(ctx, oldCert, cfg)
- if err != nil {
- if log != nil {
- log.Error("queueing renewal task",
- zap.Strings("identifiers", oldCert.Names),
- zap.Error(err))
- }
- continue
- }
- }
-
- // Deletion queue
- certCache.mu.Lock()
- for _, cert := range deleteQueue {
- certCache.removeCertificate(cert)
- }
- certCache.mu.Unlock()
-
- return nil
-}
-
-func (certCache *Cache) queueRenewalTask(ctx context.Context, oldCert Certificate, cfg *Config) error {
- log := loggerNamed(certCache.logger, "maintenance")
-
- timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
- if log != nil {
- log.Info("certificate expires soon; queuing for renewal",
- zap.Strings("identifiers", oldCert.Names),
- zap.Duration("remaining", timeLeft))
- }
-
- // Get the name which we should use to renew this certificate;
- // we only support managing certificates with one name per cert,
- // so this should be easy.
- renewName := oldCert.Names[0]
-
- // queue up this renewal job (is a no-op if already active or queued)
- jm.Submit(cfg.Logger, "renew_"+renewName, func() error {
- timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
- if log != nil {
- log.Info("attempting certificate renewal",
- zap.Strings("identifiers", oldCert.Names),
- zap.Duration("remaining", timeLeft))
- }
-
- // perform renewal - crucially, this happens OUTSIDE a lock on certCache
- err := cfg.RenewCertAsync(ctx, renewName, false)
- if err != nil {
- if cfg.OnDemand != nil {
- // loaded dynamically, remove dynamically
- certCache.mu.Lock()
- certCache.removeCertificate(oldCert)
- certCache.mu.Unlock()
- }
- return fmt.Errorf("%v %v", oldCert.Names, err)
- }
-
- // successful renewal, so update in-memory cache by loading
- // renewed certificate so it will be used with handshakes
- err = cfg.reloadManagedCertificate(oldCert)
- if err != nil {
- return ErrNoRetry{fmt.Errorf("%v %v", oldCert.Names, err)}
- }
- return nil
- })
-
- return nil
-}
-
-// updateOCSPStaples updates the OCSP stapling in all
-// eligible, cached certificates.
-//
-// OCSP maintenance strives to abide the relevant points on
-// Ryan Sleevi's recommendations for good OCSP support:
-// https://gist.github.com/sleevi/5efe9ef98961ecfb4da8
-func (certCache *Cache) updateOCSPStaples(ctx context.Context) {
- logger := loggerNamed(certCache.logger, "maintenance")
-
- // temporary structures to store updates or tasks
- // so that we can keep our locks short-lived
- type ocspUpdate struct {
- rawBytes []byte
- parsed *ocsp.Response
- }
- type updateQueueEntry struct {
- cert Certificate
- certHash string
- lastNextUpdate time.Time
- }
- type renewQueueEntry struct {
- oldCert Certificate
- ocspResp *ocsp.Response
- }
- updated := make(map[string]ocspUpdate)
- var updateQueue []updateQueueEntry // certs that need a refreshed staple
- var renewQueue []renewQueueEntry // certs that need to be renewed (due to revocation)
- configs := make(map[string]*Config)
-
- // obtain brief read lock during our scan to see which staples need updating
- certCache.mu.RLock()
- for certHash, cert := range certCache.cache {
- // no point in updating OCSP for expired or "synthetic" certificates
- if cert.Leaf == nil || cert.Expired() {
- continue
- }
- var lastNextUpdate time.Time
- if cert.ocsp != nil {
- lastNextUpdate = cert.ocsp.NextUpdate
- if freshOCSP(cert.ocsp) {
- continue // no need to update staple if ours is still fresh
- }
- }
- updateQueue = append(updateQueue, updateQueueEntry{cert, certHash, lastNextUpdate})
- }
- certCache.mu.RUnlock()
-
- // perform updates outside of any lock on certCache
- for _, qe := range updateQueue {
- cert := qe.cert
- certHash := qe.certHash
- lastNextUpdate := qe.lastNextUpdate
-
- cfg, err := certCache.getConfig(cert)
- if err != nil {
- if logger != nil {
- logger.Error("unable to refresh OCSP staple because getting automation config for certificate failed",
- zap.Strings("identifiers", cert.Names),
- zap.Error(err))
- }
- continue
- }
- if cfg == nil {
- // this is bad if this happens, probably a programmer error (oops)
- if logger != nil {
- logger.Error("no configuration associated with certificate; unable to manage OCSP staples",
- zap.Strings("identifiers", cert.Names))
- }
- continue
- }
-
- ocspResp, err := stapleOCSP(cfg.OCSP, cfg.Storage, &cert, nil)
- if err != nil || ocspResp == nil {
- if cert.ocsp != nil {
- // if there was no staple before, that's fine; otherwise we should log the error
- if logger != nil {
- logger.Error("stapling OCSP",
- zap.Strings("identifiers", cert.Names),
- zap.Error(err))
- }
- }
- continue
- }
-
- // By this point, we've obtained the latest OCSP response.
- // If there was no staple before, or if the response is updated, make
- // sure we apply the update to all names on the certificate.
- if cert.ocsp != nil && (lastNextUpdate.IsZero() || lastNextUpdate != cert.ocsp.NextUpdate) {
- if logger != nil {
- logger.Info("advancing OCSP staple",
- zap.Strings("identifiers", cert.Names),
- zap.Time("from", lastNextUpdate),
- zap.Time("to", cert.ocsp.NextUpdate))
- }
- updated[certHash] = ocspUpdate{rawBytes: cert.Certificate.OCSPStaple, parsed: cert.ocsp}
- }
-
- // If a managed certificate was revoked, we should attempt to replace it with a new one.
- if cert.managed && ocspResp.Status == ocsp.Revoked && len(cert.Names) > 0 {
- renewQueue = append(renewQueue, renewQueueEntry{
- oldCert: cert,
- ocspResp: ocspResp,
- })
- configs[cert.Names[0]] = cfg
- }
- }
-
- // These write locks should be brief since we have all the info we need now.
- for certKey, update := range updated {
- certCache.mu.Lock()
- cert := certCache.cache[certKey]
- cert.ocsp = update.parsed
- cert.Certificate.OCSPStaple = update.rawBytes
- certCache.cache[certKey] = cert
- certCache.mu.Unlock()
- }
-
- // We attempt to replace any certificates that were revoked.
- // Crucially, this happens OUTSIDE a lock on the certCache.
- for _, renew := range renewQueue {
- if logger != nil {
- logger.Warn("OCSP status for managed certificate is REVOKED; attempting to replace with new certificate",
- zap.Strings("identifiers", renew.oldCert.Names),
- zap.Time("expiration", renew.oldCert.Leaf.NotAfter))
- }
-
- renewName := renew.oldCert.Names[0]
- cfg := configs[renewName]
-
- // if revoked for key compromise, we can't be sure whether the storage of
- // the key is still safe; however, we KNOW the old key is not safe, and we
- // can only hope by the time of revocation that storage has been secured;
- // key management is not something we want to get into, but in this case
- // it seems prudent to replace the key - and since renewal requires reuse
- // of a prior key, we can't do a "renew" to replace the cert if we need a
- // new key, so we'll have to do an obtain instead
- var obtainInsteadOfRenew bool
- if renew.ocspResp.RevocationReason == acme.ReasonKeyCompromise {
- err := cfg.moveCompromisedPrivateKey(renew.oldCert, logger)
- if err != nil && logger != nil {
- logger.Error("could not remove compromised private key from use",
- zap.Strings("identifiers", renew.oldCert.Names),
- zap.String("issuer", renew.oldCert.issuerKey),
- zap.Error(err))
- }
- obtainInsteadOfRenew = true
- }
-
- var err error
- if obtainInsteadOfRenew {
- err = cfg.ObtainCertAsync(ctx, renewName)
- } else {
- // notice that we force renewal; otherwise, it might see that the
- // certificate isn't close to expiring and return, but we really
- // need a replacement certificate! see issue #4191
- err = cfg.RenewCertAsync(ctx, renewName, true)
- }
- if err != nil {
- // probably better to not serve a revoked certificate at all
- if logger != nil {
- logger.Error("unable to obtain new to certificate after OCSP status of REVOKED; removing from cache",
- zap.Strings("identifiers", renew.oldCert.Names),
- zap.Error(err))
- }
- certCache.mu.Lock()
- certCache.removeCertificate(renew.oldCert)
- certCache.mu.Unlock()
- continue
- }
- err = cfg.reloadManagedCertificate(renew.oldCert)
- if err != nil {
- if logger != nil {
- logger.Error("after obtaining new certificate due to OCSP status of REVOKED",
- zap.Strings("identifiers", renew.oldCert.Names),
- zap.Error(err))
- }
- continue
- }
- }
-}
-
-// CleanStorageOptions specifies how to clean up a storage unit.
-type CleanStorageOptions struct {
- OCSPStaples bool
- ExpiredCerts bool
- ExpiredCertGracePeriod time.Duration
-}
-
-// CleanStorage removes assets which are no longer useful,
-// according to opts.
-func CleanStorage(ctx context.Context, storage Storage, opts CleanStorageOptions) {
- if opts.OCSPStaples {
- err := deleteOldOCSPStaples(ctx, storage)
- if err != nil {
- log.Printf("[ERROR] Deleting old OCSP staples: %v", err)
- }
- }
- if opts.ExpiredCerts {
- err := deleteExpiredCerts(ctx, storage, opts.ExpiredCertGracePeriod)
- if err != nil {
- log.Printf("[ERROR] Deleting expired certificates: %v", err)
- }
- }
- // TODO: delete stale locks?
-}
-
-func deleteOldOCSPStaples(ctx context.Context, storage Storage) error {
- ocspKeys, err := storage.List(prefixOCSP, false)
- if err != nil {
- // maybe just hasn't been created yet; no big deal
- return nil
- }
- for _, key := range ocspKeys {
- // if context was cancelled, quit early; otherwise proceed
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- ocspBytes, err := storage.Load(key)
- if err != nil {
- log.Printf("[ERROR] While deleting old OCSP staples, unable to load staple file: %v", err)
- continue
- }
- resp, err := ocsp.ParseResponse(ocspBytes, nil)
- if err != nil {
- // contents are invalid; delete it
- err = storage.Delete(key)
- if err != nil {
- log.Printf("[ERROR] Purging corrupt staple file %s: %v", key, err)
- }
- continue
- }
- if time.Now().After(resp.NextUpdate) {
- // response has expired; delete it
- err = storage.Delete(key)
- if err != nil {
- log.Printf("[ERROR] Purging expired staple file %s: %v", key, err)
- }
- }
- }
- return nil
-}
-
-func deleteExpiredCerts(ctx context.Context, storage Storage, gracePeriod time.Duration) error {
- issuerKeys, err := storage.List(prefixCerts, false)
- if err != nil {
- // maybe just hasn't been created yet; no big deal
- return nil
- }
-
- for _, issuerKey := range issuerKeys {
- siteKeys, err := storage.List(issuerKey, false)
- if err != nil {
- log.Printf("[ERROR] Listing contents of %s: %v", issuerKey, err)
- continue
- }
-
- for _, siteKey := range siteKeys {
- // if context was cancelled, quit early; otherwise proceed
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- siteAssets, err := storage.List(siteKey, false)
- if err != nil {
- log.Printf("[ERROR] Listing contents of %s: %v", siteKey, err)
- continue
- }
-
- for _, assetKey := range siteAssets {
- if path.Ext(assetKey) != ".crt" {
- continue
- }
-
- certFile, err := storage.Load(assetKey)
- if err != nil {
- return fmt.Errorf("loading certificate file %s: %v", assetKey, err)
- }
- block, _ := pem.Decode(certFile)
- if block == nil || block.Type != "CERTIFICATE" {
- return fmt.Errorf("certificate file %s does not contain PEM-encoded certificate", assetKey)
- }
- cert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return fmt.Errorf("certificate file %s is malformed; error parsing PEM: %v", assetKey, err)
- }
-
- if expiredTime := time.Since(cert.NotAfter); expiredTime >= gracePeriod {
- log.Printf("[INFO] Certificate %s expired %s ago; cleaning up", assetKey, expiredTime)
- baseName := strings.TrimSuffix(assetKey, ".crt")
- for _, relatedAsset := range []string{
- assetKey,
- baseName + ".key",
- baseName + ".json",
- } {
- log.Printf("[INFO] Deleting %s because resource expired", relatedAsset)
- err := storage.Delete(relatedAsset)
- if err != nil {
- log.Printf("[ERROR] Cleaning up asset related to expired certificate for %s: %s: %v",
- baseName, relatedAsset, err)
- }
- }
- }
- }
-
- // update listing; if folder is empty, delete it
- siteAssets, err = storage.List(siteKey, false)
- if err != nil {
- continue
- }
- if len(siteAssets) == 0 {
- log.Printf("[INFO] Deleting %s because key is empty", siteKey)
- err := storage.Delete(siteKey)
- if err != nil {
- return fmt.Errorf("deleting empty site folder %s: %v", siteKey, err)
- }
- }
- }
- }
- return nil
-}
-
-// moveCompromisedPrivateKey moves the private key for cert to a ".compromised" file
-// by copying the data to the new file, then deleting the old one.
-func (cfg *Config) moveCompromisedPrivateKey(cert Certificate, logger *zap.Logger) error {
- privKeyStorageKey := StorageKeys.SitePrivateKey(cert.issuerKey, cert.Names[0])
-
- privKeyPEM, err := cfg.Storage.Load(privKeyStorageKey)
- if err != nil {
- return err
- }
-
- compromisedPrivKeyStorageKey := privKeyStorageKey + ".compromised"
- err = cfg.Storage.Store(compromisedPrivKeyStorageKey, privKeyPEM)
- if err != nil {
- // better safe than sorry: as a last resort, try deleting the key so it won't be reused
- cfg.Storage.Delete(privKeyStorageKey)
- return err
- }
-
- err = cfg.Storage.Delete(privKeyStorageKey)
- if err != nil {
- return err
- }
-
- logger.Info("removed certificate's compromised private key from use",
- zap.String("storage_path", compromisedPrivKeyStorageKey),
- zap.Strings("identifiers", cert.Names),
- zap.String("issuer", cert.issuerKey))
-
- return nil
-}
-
-const (
- // DefaultRenewCheckInterval is how often to check certificates for expiration.
- // Scans are very lightweight, so this can be semi-frequent. This default should
- // be smaller than *DefaultRenewalWindowRatio/3, which
- // gives certificates plenty of chance to be renewed on time.
- DefaultRenewCheckInterval = 10 * time.Minute
-
- // DefaultRenewalWindowRatio is how much of a certificate's lifetime becomes the
- // renewal window. The renewal window is the span of time at the end of the
- // certificate's validity period in which it should be renewed. A default value
- // of ~1/3 is pretty safe and recommended for most certificates.
- DefaultRenewalWindowRatio = 1.0 / 3.0
-
- // DefaultOCSPCheckInterval is how often to check if OCSP stapling needs updating.
- DefaultOCSPCheckInterval = 1 * time.Hour
-)
diff --git a/vendor/github.com/caddyserver/certmagic/ocsp.go b/vendor/github.com/caddyserver/certmagic/ocsp.go
deleted file mode 100644
index 4a21546d..00000000
--- a/vendor/github.com/caddyserver/certmagic/ocsp.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "bytes"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net/http"
- "time"
-
- "golang.org/x/crypto/ocsp"
-)
-
-// stapleOCSP staples OCSP information to cert for hostname name.
-// If you have it handy, you should pass in the PEM-encoded certificate
-// bundle; otherwise the DER-encoded cert will have to be PEM-encoded.
-// If you don't have the PEM blocks already, just pass in nil.
-//
-// Errors here are not necessarily fatal, it could just be that the
-// certificate doesn't have an issuer URL. This function may return
-// both nil values if OCSP stapling is disabled according to ocspConfig.
-//
-// If a status was received, it returns that status. Note that the
-// returned status is not always stapled to the certificate.
-func stapleOCSP(ocspConfig OCSPConfig, storage Storage, cert *Certificate, pemBundle []byte) (*ocsp.Response, error) {
- if ocspConfig.DisableStapling {
- return nil, nil
- }
-
- if pemBundle == nil {
- // we need a PEM encoding only for some function calls below
- bundle := new(bytes.Buffer)
- for _, derBytes := range cert.Certificate.Certificate {
- pem.Encode(bundle, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
- }
- pemBundle = bundle.Bytes()
- }
-
- var ocspBytes []byte
- var ocspResp *ocsp.Response
- var ocspErr error
- var gotNewOCSP bool
-
- // First try to load OCSP staple from storage and see if
- // we can still use it.
- ocspStapleKey := StorageKeys.OCSPStaple(cert, pemBundle)
- cachedOCSP, err := storage.Load(ocspStapleKey)
- if err == nil {
- resp, err := ocsp.ParseResponse(cachedOCSP, nil)
- if err == nil {
- if freshOCSP(resp) {
- // staple is still fresh; use it
- ocspBytes = cachedOCSP
- ocspResp = resp
- }
- } else {
- // invalid contents; delete the file
- // (we do this independently of the maintenance routine because
- // in this case we know for sure this should be a staple file
- // because we loaded it by name, whereas the maintenance routine
- // just iterates the list of files, even if somehow a non-staple
- // file gets in the folder. in this case we are sure it is corrupt.)
- err := storage.Delete(ocspStapleKey)
- if err != nil {
- log.Printf("[WARNING] Unable to delete invalid OCSP staple file: %v", err)
- }
- }
- }
-
- // If we couldn't get a fresh staple by reading the cache,
- // then we need to request it from the OCSP responder
- if ocspResp == nil || len(ocspBytes) == 0 {
- ocspBytes, ocspResp, ocspErr = getOCSPForCert(ocspConfig, pemBundle)
- if ocspErr != nil {
- // An error here is not a problem because a certificate may simply
- // not contain a link to an OCSP server. But we should log it anyway.
- // There's nothing else we can do to get OCSP for this certificate,
- // so we can return here with the error.
- return nil, fmt.Errorf("no OCSP stapling for %v: %v", cert.Names, ocspErr)
- }
- gotNewOCSP = true
- }
-
- // By now, we should have a response. If good, staple it to
- // the certificate. If the OCSP response was not loaded from
- // storage, we persist it for next time.
- if ocspResp.Status == ocsp.Good {
- if ocspResp.NextUpdate.After(cert.Leaf.NotAfter) {
- // uh oh, this OCSP response expires AFTER the certificate does, that's kinda bogus.
- // it was the reason a lot of Symantec-validated sites (not Caddy) went down
- // in October 2017. https://twitter.com/mattiasgeniar/status/919432824708648961
- return ocspResp, fmt.Errorf("invalid: OCSP response for %v valid after certificate expiration (%s)",
- cert.Names, cert.Leaf.NotAfter.Sub(ocspResp.NextUpdate))
- }
- cert.Certificate.OCSPStaple = ocspBytes
- cert.ocsp = ocspResp
- if gotNewOCSP {
- err := storage.Store(ocspStapleKey, ocspBytes)
- if err != nil {
- return ocspResp, fmt.Errorf("unable to write OCSP staple file for %v: %v", cert.Names, err)
- }
- }
- }
-
- return ocspResp, nil
-}
-
-// getOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response,
-// the parsed response, and an error, if any. The returned []byte can be passed directly
-// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the
-// issued certificate, this function will try to get the issuer certificate from the
-// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return
-// values are nil, the OCSP status may be assumed OCSPUnknown.
-//
-// Borrowed from xenolf.
-func getOCSPForCert(ocspConfig OCSPConfig, bundle []byte) ([]byte, *ocsp.Response, error) {
- // TODO: Perhaps this should be synchronized too, with a Locker?
-
- certificates, err := parseCertsFromPEMBundle(bundle)
- if err != nil {
- return nil, nil, err
- }
-
- // We expect the certificate slice to be ordered downwards the chain.
- // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it,
- // which should always be the first two certificates. If there's no
- // OCSP server listed in the leaf cert, there's nothing to do. And if
- // we have only one certificate so far, we need to get the issuer cert.
- issuedCert := certificates[0]
- if len(issuedCert.OCSPServer) == 0 {
- return nil, nil, fmt.Errorf("no OCSP server specified in certificate")
- }
-
- // apply override for responder URL
- respURL := issuedCert.OCSPServer[0]
- if len(ocspConfig.ResponderOverrides) > 0 {
- if override, ok := ocspConfig.ResponderOverrides[respURL]; ok {
- respURL = override
- }
- }
- if respURL == "" {
- return nil, nil, fmt.Errorf("override disables querying OCSP responder: %v", issuedCert.OCSPServer[0])
- }
-
- if len(certificates) == 1 {
- if len(issuedCert.IssuingCertificateURL) == 0 {
- return nil, nil, fmt.Errorf("no URL to issuing certificate")
- }
-
- resp, err := http.Get(issuedCert.IssuingCertificateURL[0])
- if err != nil {
- return nil, nil, fmt.Errorf("getting issuer certificate: %v", err)
- }
- defer resp.Body.Close()
-
- issuerBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024*1024))
- if err != nil {
- return nil, nil, fmt.Errorf("reading issuer certificate: %v", err)
- }
-
- issuerCert, err := x509.ParseCertificate(issuerBytes)
- if err != nil {
- return nil, nil, fmt.Errorf("parsing issuer certificate: %v", err)
- }
-
- // insert it into the slice on position 0;
- // we want it ordered right SRV CRT -> CA
- certificates = append(certificates, issuerCert)
- }
-
- issuerCert := certificates[1]
-
- ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil)
- if err != nil {
- return nil, nil, fmt.Errorf("creating OCSP request: %v", err)
- }
-
- reader := bytes.NewReader(ocspReq)
- req, err := http.Post(respURL, "application/ocsp-request", reader)
- if err != nil {
- return nil, nil, fmt.Errorf("making OCSP request: %v", err)
- }
- defer req.Body.Close()
-
- ocspResBytes, err := ioutil.ReadAll(io.LimitReader(req.Body, 1024*1024))
- if err != nil {
- return nil, nil, fmt.Errorf("reading OCSP response: %v", err)
- }
-
- ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert)
- if err != nil {
- return nil, nil, fmt.Errorf("parsing OCSP response: %v", err)
- }
-
- return ocspResBytes, ocspRes, nil
-}
-
-// freshOCSP returns true if resp is still fresh,
-// meaning that it is not expedient to get an
-// updated response from the OCSP server.
-func freshOCSP(resp *ocsp.Response) bool {
- nextUpdate := resp.NextUpdate
- // If there is an OCSP responder certificate, and it expires before the
- // OCSP response, use its expiration date as the end of the OCSP
- // response's validity period.
- if resp.Certificate != nil && resp.Certificate.NotAfter.Before(nextUpdate) {
- nextUpdate = resp.Certificate.NotAfter
- }
- // start checking OCSP staple about halfway through validity period for good measure
- refreshTime := resp.ThisUpdate.Add(nextUpdate.Sub(resp.ThisUpdate) / 2)
- return time.Now().Before(refreshTime)
-}
diff --git a/vendor/github.com/caddyserver/certmagic/ratelimiter.go b/vendor/github.com/caddyserver/certmagic/ratelimiter.go
deleted file mode 100644
index 6a3b7b18..00000000
--- a/vendor/github.com/caddyserver/certmagic/ratelimiter.go
+++ /dev/null
@@ -1,243 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "context"
- "log"
- "runtime"
- "sync"
- "time"
-)
-
-// NewRateLimiter returns a rate limiter that allows up to maxEvents
-// in a sliding window of size window. If maxEvents and window are
-// both 0, or if maxEvents is non-zero and window is 0, rate limiting
-// is disabled. This function panics if maxEvents is less than 0 or
-// if maxEvents is 0 and window is non-zero, which is considered to be
-// an invalid configuration, as it would never allow events.
-func NewRateLimiter(maxEvents int, window time.Duration) *RingBufferRateLimiter {
- if maxEvents < 0 {
- panic("maxEvents cannot be less than zero")
- }
- if maxEvents == 0 && window != 0 {
- panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
- }
- rbrl := &RingBufferRateLimiter{
- window: window,
- ring: make([]time.Time, maxEvents),
- started: make(chan struct{}),
- stopped: make(chan struct{}),
- ticket: make(chan struct{}),
- }
- go rbrl.loop()
- <-rbrl.started // make sure loop is ready to receive before we return
- return rbrl
-}
-
-// RingBufferRateLimiter uses a ring to enforce rate limits
-// consisting of a maximum number of events within a single
-// sliding window of a given duration. An empty value is
-// not valid; use NewRateLimiter to get one.
-type RingBufferRateLimiter struct {
- window time.Duration
- ring []time.Time // maxEvents == len(ring)
- cursor int // always points to the oldest timestamp
- mu sync.Mutex // protects ring, cursor, and window
- started chan struct{}
- stopped chan struct{}
- ticket chan struct{}
-}
-
-// Stop cleans up r's scheduling goroutine.
-func (r *RingBufferRateLimiter) Stop() {
- close(r.stopped)
-}
-
-func (r *RingBufferRateLimiter) loop() {
- defer func() {
- if err := recover(); err != nil {
- buf := make([]byte, stackTraceBufferSize)
- buf = buf[:runtime.Stack(buf, false)]
- log.Printf("panic: ring buffer rate limiter: %v\n%s", err, buf)
- }
- }()
-
- for {
- // if we've been stopped, return
- select {
- case <-r.stopped:
- return
- default:
- }
-
- if len(r.ring) == 0 {
- if r.window == 0 {
- // rate limiting is disabled; always allow immediately
- r.permit()
- continue
- }
- panic("invalid configuration: maxEvents = 0 and window != 0 does not allow any events")
- }
-
- // wait until next slot is available or until we've been stopped
- r.mu.Lock()
- then := r.ring[r.cursor].Add(r.window)
- r.mu.Unlock()
- waitDuration := time.Until(then)
- waitTimer := time.NewTimer(waitDuration)
- select {
- case <-waitTimer.C:
- r.permit()
- case <-r.stopped:
- waitTimer.Stop()
- return
- }
- }
-}
-
-// Allow returns true if the event is allowed to
-// happen right now. It does not wait. If the event
-// is allowed, a ticket is claimed.
-func (r *RingBufferRateLimiter) Allow() bool {
- select {
- case <-r.ticket:
- return true
- default:
- return false
- }
-}
-
-// Wait blocks until the event is allowed to occur. It returns an
-// error if the context is cancelled.
-func (r *RingBufferRateLimiter) Wait(ctx context.Context) error {
- select {
- case <-ctx.Done():
- return context.Canceled
- case <-r.ticket:
- return nil
- }
-}
-
-// MaxEvents returns the maximum number of events that
-// are allowed within the sliding window.
-func (r *RingBufferRateLimiter) MaxEvents() int {
- r.mu.Lock()
- defer r.mu.Unlock()
- return len(r.ring)
-}
-
-// SetMaxEvents changes the maximum number of events that are
-// allowed in the sliding window. If the new limit is lower,
-// the oldest events will be forgotten. If the new limit is
-// higher, the window will suddenly have capacity for new
-// reservations. It panics if maxEvents is 0 and window size
-// is not zero.
-func (r *RingBufferRateLimiter) SetMaxEvents(maxEvents int) {
- newRing := make([]time.Time, maxEvents)
- r.mu.Lock()
- defer r.mu.Unlock()
-
- if r.window != 0 && maxEvents == 0 {
- panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
- }
-
- // only make the change if the new limit is different
- if maxEvents == len(r.ring) {
- return
- }
-
- // the new ring may be smaller; fast-forward to the
- // oldest timestamp that will be kept in the new
- // ring so the oldest ones are forgotten and the
- // newest ones will be remembered
- sizeDiff := len(r.ring) - maxEvents
- for i := 0; i < sizeDiff; i++ {
- r.advance()
- }
-
- if len(r.ring) > 0 {
- // copy timestamps into the new ring until we
- // have either copied all of them or have reached
- // the capacity of the new ring
- startCursor := r.cursor
- for i := 0; i < len(newRing); i++ {
- newRing[i] = r.ring[r.cursor]
- r.advance()
- if r.cursor == startCursor {
- // new ring is larger than old one;
- // "we've come full circle"
- break
- }
- }
- }
-
- r.ring = newRing
- r.cursor = 0
-}
-
-// Window returns the size of the sliding window.
-func (r *RingBufferRateLimiter) Window() time.Duration {
- r.mu.Lock()
- defer r.mu.Unlock()
- return r.window
-}
-
-// SetWindow changes r's sliding window duration to window.
-// Goroutines that are already blocked on a call to Wait()
-// will not be affected. It panics if window is non-zero
-// but the max event limit is 0.
-func (r *RingBufferRateLimiter) SetWindow(window time.Duration) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if window != 0 && len(r.ring) == 0 {
- panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
- }
- r.window = window
-}
-
-// permit allows one event through the throttle. This method
-// blocks until a goroutine is waiting for a ticket or until
-// the rate limiter is stopped.
-func (r *RingBufferRateLimiter) permit() {
- for {
- select {
- case r.started <- struct{}{}:
- // notify parent goroutine that we've started; should
- // only happen once, before constructor returns
- continue
- case <-r.stopped:
- return
- case r.ticket <- struct{}{}:
- r.mu.Lock()
- defer r.mu.Unlock()
- if len(r.ring) > 0 {
- r.ring[r.cursor] = time.Now()
- r.advance()
- }
- return
- }
- }
-}
-
-// advance moves the cursor to the next position.
-// It is NOT safe for concurrent use, so it must
-// be called inside a lock on r.mu.
-func (r *RingBufferRateLimiter) advance() {
- r.cursor++
- if r.cursor >= len(r.ring) {
- r.cursor = 0
- }
-}
diff --git a/vendor/github.com/caddyserver/certmagic/solvers.go b/vendor/github.com/caddyserver/certmagic/solvers.go
deleted file mode 100644
index 8cdaeaf8..00000000
--- a/vendor/github.com/caddyserver/certmagic/solvers.go
+++ /dev/null
@@ -1,686 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "context"
- "crypto/tls"
- "encoding/json"
- "fmt"
- "log"
- "net"
- "net/http"
- "path"
- "runtime"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/libdns/libdns"
- "github.com/mholt/acmez"
- "github.com/mholt/acmez/acme"
- "github.com/miekg/dns"
-)
-
-// httpSolver solves the HTTP challenge. It must be
-// associated with a config and an address to use
-// for solving the challenge. If multiple httpSolvers
-// are initialized concurrently, the first one to
-// begin will start the server, and the last one to
-// finish will stop the server. This solver must be
-// wrapped by a distributedSolver to work properly,
-// because the only way the HTTP challenge handler
-// can access the keyAuth material is by loading it
-// from storage, which is done by distributedSolver.
-type httpSolver struct {
- closed int32 // accessed atomically
- acmeManager *ACMEManager
- address string
-}
-
-// Present starts an HTTP server if none is already listening on s.address.
-func (s *httpSolver) Present(ctx context.Context, _ acme.Challenge) error {
- solversMu.Lock()
- defer solversMu.Unlock()
-
- si := getSolverInfo(s.address)
- si.count++
- if si.listener != nil {
- return nil // already be served by us
- }
-
- // notice the unusual error handling here; we
- // only continue to start a challenge server if
- // we got a listener; in all other cases return
- ln, err := robustTryListen(s.address)
- if ln == nil {
- return err
- }
-
- // successfully bound socket, so save listener and start key auth HTTP server
- si.listener = ln
- go s.serve(si)
-
- return nil
-}
-
-// serve is an HTTP server that serves only HTTP challenge responses.
-func (s *httpSolver) serve(si *solverInfo) {
- defer func() {
- if err := recover(); err != nil {
- buf := make([]byte, stackTraceBufferSize)
- buf = buf[:runtime.Stack(buf, false)]
- log.Printf("panic: http solver server: %v\n%s", err, buf)
- }
- }()
- defer close(si.done)
- httpServer := &http.Server{Handler: s.acmeManager.HTTPChallengeHandler(http.NewServeMux())}
- httpServer.SetKeepAlivesEnabled(false)
- err := httpServer.Serve(si.listener)
- if err != nil && atomic.LoadInt32(&s.closed) != 1 {
- log.Printf("[ERROR] key auth HTTP server: %v", err)
- }
-}
-
-// CleanUp cleans up the HTTP server if it is the last one to finish.
-func (s *httpSolver) CleanUp(ctx context.Context, _ acme.Challenge) error {
- solversMu.Lock()
- defer solversMu.Unlock()
- si := getSolverInfo(s.address)
- si.count--
- if si.count == 0 {
- // last one out turns off the lights
- atomic.StoreInt32(&s.closed, 1)
- if si.listener != nil {
- si.listener.Close()
- <-si.done
- }
- delete(solvers, s.address)
- }
- return nil
-}
-
-// tlsALPNSolver is a type that can solve TLS-ALPN challenges.
-// It must have an associated config and address on which to
-// serve the challenge.
-type tlsALPNSolver struct {
- config *Config
- address string
-}
-
-// Present adds the certificate to the certificate cache and, if
-// needed, starts a TLS server for answering TLS-ALPN challenges.
-func (s *tlsALPNSolver) Present(ctx context.Context, chal acme.Challenge) error {
- // we pre-generate the certificate for efficiency with multi-perspective
- // validation, so it only has to be done once (at least, by this instance;
- // distributed solving does not have that luxury, oh well) - update the
- // challenge data in memory to be the generated certificate
- cert, err := acmez.TLSALPN01ChallengeCert(chal)
- if err != nil {
- return err
- }
-
- key := challengeKey(chal)
- activeChallengesMu.Lock()
- chalData := activeChallenges[key]
- chalData.data = cert
- activeChallenges[key] = chalData
- activeChallengesMu.Unlock()
-
- // the rest of this function increments the
- // challenge count for the solver at this
- // listener address, and if necessary, starts
- // a simple TLS server
-
- solversMu.Lock()
- defer solversMu.Unlock()
-
- si := getSolverInfo(s.address)
- si.count++
- if si.listener != nil {
- return nil // already be served by us
- }
-
- // notice the unusual error handling here; we
- // only continue to start a challenge server if
- // we got a listener; in all other cases return
- ln, err := robustTryListen(s.address)
- if ln == nil {
- return err
- }
-
- // we were able to bind the socket, so make it into a TLS
- // listener, store it with the solverInfo, and start the
- // challenge server
-
- si.listener = tls.NewListener(ln, s.config.TLSConfig())
-
- go func() {
- defer func() {
- if err := recover(); err != nil {
- buf := make([]byte, stackTraceBufferSize)
- buf = buf[:runtime.Stack(buf, false)]
- log.Printf("panic: tls-alpn solver server: %v\n%s", err, buf)
- }
- }()
- defer close(si.done)
- for {
- conn, err := si.listener.Accept()
- if err != nil {
- if atomic.LoadInt32(&si.closed) == 1 {
- return
- }
- log.Printf("[ERROR] TLS-ALPN challenge server: accept: %v", err)
- continue
- }
- go s.handleConn(conn)
- }
- }()
-
- return nil
-}
-
-// handleConn completes the TLS handshake and then closes conn.
-func (*tlsALPNSolver) handleConn(conn net.Conn) {
- defer func() {
- if err := recover(); err != nil {
- buf := make([]byte, stackTraceBufferSize)
- buf = buf[:runtime.Stack(buf, false)]
- log.Printf("panic: tls-alpn solver handler: %v\n%s", err, buf)
- }
- }()
- defer conn.Close()
- tlsConn, ok := conn.(*tls.Conn)
- if !ok {
- log.Printf("[ERROR] TLS-ALPN challenge server: expected tls.Conn but got %T: %#v", conn, conn)
- return
- }
- err := tlsConn.Handshake()
- if err != nil {
- log.Printf("[ERROR] TLS-ALPN challenge server: handshake: %v", err)
- return
- }
-}
-
-// CleanUp removes the challenge certificate from the cache, and if
-// it is the last one to finish, stops the TLS server.
-func (s *tlsALPNSolver) CleanUp(ctx context.Context, chal acme.Challenge) error {
- solversMu.Lock()
- defer solversMu.Unlock()
- si := getSolverInfo(s.address)
- si.count--
- if si.count == 0 {
- // last one out turns off the lights
- atomic.StoreInt32(&si.closed, 1)
- if si.listener != nil {
- si.listener.Close()
- <-si.done
- }
- delete(solvers, s.address)
- }
-
- return nil
-}
-
-// DNS01Solver is a type that makes libdns providers usable
-// as ACME dns-01 challenge solvers.
-// See https://github.com/libdns/libdns
-type DNS01Solver struct {
- // The implementation that interacts with the DNS
- // provider to set or delete records. (REQUIRED)
- DNSProvider ACMEDNSProvider
-
- // The TTL for the temporary challenge records.
- TTL time.Duration
-
- // Maximum time to wait for temporary record to appear.
- PropagationTimeout time.Duration
-
- // Preferred DNS resolver(s) to use when doing DNS lookups.
- Resolvers []string
-
- txtRecords map[string]dnsPresentMemory // keyed by domain name
- txtRecordsMu sync.Mutex
-}
-
-// Present creates the DNS TXT record for the given ACME challenge.
-func (s *DNS01Solver) Present(ctx context.Context, challenge acme.Challenge) error {
- dnsName := challenge.DNS01TXTRecordName()
- keyAuth := challenge.DNS01KeyAuthorization()
-
- // multiple identifiers can have the same ACME challenge
- // domain (e.g. example.com and *.example.com) so we need
- // to ensure that we don't solve those concurrently and
- // step on each challenges' metaphorical toes; see
- // https://github.com/caddyserver/caddy/issues/3474
- activeDNSChallenges.Lock(dnsName)
-
- zone, err := findZoneByFQDN(dnsName, recursiveNameservers(s.Resolvers))
- if err != nil {
- return fmt.Errorf("could not determine zone for domain %q: %v", dnsName, err)
- }
-
- rec := libdns.Record{
- Type: "TXT",
- Name: libdns.RelativeName(dnsName+".", zone),
- Value: keyAuth,
- TTL: s.TTL,
- }
-
- results, err := s.DNSProvider.AppendRecords(ctx, zone, []libdns.Record{rec})
- if err != nil {
- return fmt.Errorf("adding temporary record for zone %s: %w", zone, err)
- }
- if len(results) != 1 {
- return fmt.Errorf("expected one record, got %d: %v", len(results), results)
- }
-
- // remember the record and zone we got so we can clean up more efficiently
- s.txtRecordsMu.Lock()
- if s.txtRecords == nil {
- s.txtRecords = make(map[string]dnsPresentMemory)
- }
- s.txtRecords[dnsName] = dnsPresentMemory{dnsZone: zone, rec: results[0]}
- s.txtRecordsMu.Unlock()
-
- return nil
-}
-
-// Wait blocks until the TXT record created in Present() appears in
-// authoritative lookups, i.e. until it has propagated, or until
-// timeout, whichever is first.
-func (s *DNS01Solver) Wait(ctx context.Context, challenge acme.Challenge) error {
- dnsName := challenge.DNS01TXTRecordName()
- keyAuth := challenge.DNS01KeyAuthorization()
-
- timeout := s.PropagationTimeout
- if timeout == 0 {
- timeout = 2 * time.Minute
- }
- const interval = 2 * time.Second
-
- resolvers := recursiveNameservers(s.Resolvers)
-
- var err error
- start := time.Now()
- for time.Since(start) < timeout {
- select {
- case <-time.After(interval):
- case <-ctx.Done():
- return ctx.Err()
- }
- var ready bool
- ready, err = checkDNSPropagation(dnsName, keyAuth, resolvers)
- if err != nil {
- return fmt.Errorf("checking DNS propagation of %s: %w", dnsName, err)
- }
- if ready {
- return nil
- }
- }
-
- return fmt.Errorf("timed out waiting for record to fully propagate; verify DNS provider configuration is correct - last error: %v", err)
-}
-
-// CleanUp deletes the DNS TXT record created in Present().
-func (s *DNS01Solver) CleanUp(ctx context.Context, challenge acme.Challenge) error {
- dnsName := challenge.DNS01TXTRecordName()
-
- defer func() {
- // always forget about it so we don't leak memory
- s.txtRecordsMu.Lock()
- delete(s.txtRecords, dnsName)
- s.txtRecordsMu.Unlock()
-
- // always do this last - but always do it!
- activeDNSChallenges.Unlock(dnsName)
- }()
-
- // recall the record we created and zone we looked up
- s.txtRecordsMu.Lock()
- memory, ok := s.txtRecords[dnsName]
- if !ok {
- s.txtRecordsMu.Unlock()
- return fmt.Errorf("no memory of presenting a DNS record for %s (probably OK if presenting failed)", challenge.Identifier.Value)
- }
- s.txtRecordsMu.Unlock()
-
- // clean up the record
- _, err := s.DNSProvider.DeleteRecords(ctx, memory.dnsZone, []libdns.Record{memory.rec})
- if err != nil {
- return fmt.Errorf("deleting temporary record for zone %s: %w", memory.dnsZone, err)
- }
-
- return nil
-}
-
-type dnsPresentMemory struct {
- dnsZone string
- rec libdns.Record
-}
-
-// ACMEDNSProvider defines the set of operations required for
-// ACME challenges. A DNS provider must be able to append and
-// delete records in order to solve ACME challenges. Find one
-// you can use at https://github.com/libdns. If your provider
-// isn't implemented yet, feel free to contribute!
-type ACMEDNSProvider interface {
- libdns.RecordAppender
- libdns.RecordDeleter
-}
-
-// activeDNSChallenges synchronizes DNS challenges for
-// names to ensure that challenges for the same ACME
-// DNS name do not overlap; for example, the TXT record
-// to make for both example.com and *.example.com are
-// the same; thus we cannot solve them concurrently.
-var activeDNSChallenges = newMapMutex()
-
-// mapMutex implements named mutexes.
-type mapMutex struct {
- cond *sync.Cond
- set map[interface{}]struct{}
-}
-
-func newMapMutex() *mapMutex {
- return &mapMutex{
- cond: sync.NewCond(new(sync.Mutex)),
- set: make(map[interface{}]struct{}),
- }
-}
-
-func (mmu *mapMutex) Lock(key interface{}) {
- mmu.cond.L.Lock()
- defer mmu.cond.L.Unlock()
- for mmu.locked(key) {
- mmu.cond.Wait()
- }
- mmu.set[key] = struct{}{}
-}
-
-func (mmu *mapMutex) Unlock(key interface{}) {
- mmu.cond.L.Lock()
- defer mmu.cond.L.Unlock()
- delete(mmu.set, key)
- mmu.cond.Broadcast()
-}
-
-func (mmu *mapMutex) locked(key interface{}) (ok bool) {
- _, ok = mmu.set[key]
- return
-}
-
-// distributedSolver allows the ACME HTTP-01 and TLS-ALPN challenges
-// to be solved by an instance other than the one which initiated it.
-// This is useful behind load balancers or in other cluster/fleet
-// configurations. The only requirement is that the instance which
-// initiates the challenge shares the same storage and locker with
-// the others in the cluster. The storage backing the certificate
-// cache in distributedSolver.config is crucial.
-//
-// Obviously, the instance which completes the challenge must be
-// serving on the HTTPChallengePort for the HTTP-01 challenge or the
-// TLSALPNChallengePort for the TLS-ALPN-01 challenge (or have all
-// the packets port-forwarded) to receive and handle the request. The
-// server which receives the challenge must handle it by checking to
-// see if the challenge token exists in storage, and if so, decode it
-// and use it to serve up the correct response. HTTPChallengeHandler
-// in this package as well as the GetCertificate method implemented
-// by a Config support and even require this behavior.
-//
-// In short: the only two requirements for cluster operation are
-// sharing sync and storage, and using the facilities provided by
-// this package for solving the challenges.
-type distributedSolver struct {
- // The storage backing the distributed solver. It must be
- // the same storage configuration as what is solving the
- // challenge in order to be effective.
- storage Storage
-
- // The storage key prefix, associated with the issuer
- // that is solving the challenge.
- storageKeyIssuerPrefix string
-
- // Since the distributedSolver is only a
- // wrapper over an actual solver, place
- // the actual solver here.
- solver acmez.Solver
-}
-
-// Present invokes the underlying solver's Present method
-// and also stores domain, token, and keyAuth to the storage
-// backing the certificate cache of dhs.acmeManager.
-func (dhs distributedSolver) Present(ctx context.Context, chal acme.Challenge) error {
- infoBytes, err := json.Marshal(chal)
- if err != nil {
- return err
- }
-
- err = dhs.storage.Store(dhs.challengeTokensKey(challengeKey(chal)), infoBytes)
- if err != nil {
- return err
- }
-
- err = dhs.solver.Present(ctx, chal)
- if err != nil {
- return fmt.Errorf("presenting with embedded solver: %v", err)
- }
- return nil
-}
-
-// Wait wraps the underlying solver's Wait() method, if any. Implements acmez.Waiter.
-func (dhs distributedSolver) Wait(ctx context.Context, challenge acme.Challenge) error {
- if waiter, ok := dhs.solver.(acmez.Waiter); ok {
- return waiter.Wait(ctx, challenge)
- }
- return nil
-}
-
-// CleanUp invokes the underlying solver's CleanUp method
-// and also cleans up any assets saved to storage.
-func (dhs distributedSolver) CleanUp(ctx context.Context, chal acme.Challenge) error {
- err := dhs.storage.Delete(dhs.challengeTokensKey(challengeKey(chal)))
- if err != nil {
- return err
- }
- err = dhs.solver.CleanUp(ctx, chal)
- if err != nil {
- return fmt.Errorf("cleaning up embedded provider: %v", err)
- }
- return nil
-}
-
-// challengeTokensPrefix returns the key prefix for challenge info.
-func (dhs distributedSolver) challengeTokensPrefix() string {
- return path.Join(dhs.storageKeyIssuerPrefix, "challenge_tokens")
-}
-
-// challengeTokensKey returns the key to use to store and access
-// challenge info for domain.
-func (dhs distributedSolver) challengeTokensKey(domain string) string {
- return path.Join(dhs.challengeTokensPrefix(), StorageKeys.Safe(domain)+".json")
-}
-
-// solverInfo associates a listener with the
-// number of challenges currently using it.
-type solverInfo struct {
- closed int32 // accessed atomically
- count int
- listener net.Listener
- done chan struct{} // used to signal when our own solver server is done
-}
-
-// getSolverInfo gets a valid solverInfo struct for address.
-func getSolverInfo(address string) *solverInfo {
- si, ok := solvers[address]
- if !ok {
- si = &solverInfo{done: make(chan struct{})}
- solvers[address] = si
- }
- return si
-}
-
-// robustTryListen calls net.Listen for a TCP socket at addr.
-// This function may return both a nil listener and a nil error!
-// If it was able to bind the socket, it returns the listener
-// and no error. If it wasn't able to bind the socket because
-// the socket is already in use, then it returns a nil listener
-// and nil error. If it had any other error, it returns the
-// error. The intended error handling logic for this function
-// is to proceed if the returned listener is not nil; otherwise
-// return err (which may also be nil). In other words, this
-// function ignores errors if the socket is already in use,
-// which is useful for our challenge servers, where we assume
-// that whatever is already listening can solve the challenges.
-func robustTryListen(addr string) (net.Listener, error) {
- var listenErr error
- for i := 0; i < 2; i++ {
- // doesn't hurt to sleep briefly before the second
- // attempt in case the OS has timing issues
- if i > 0 {
- time.Sleep(100 * time.Millisecond)
- }
-
- // if we can bind the socket right away, great!
- var ln net.Listener
- ln, listenErr = net.Listen("tcp", addr)
- if listenErr == nil {
- return ln, nil
- }
-
- // if it failed just because the socket is already in use, we
- // have no choice but to assume that whatever is using the socket
- // can answer the challenge already, so we ignore the error
- connectErr := dialTCPSocket(addr)
- if connectErr == nil {
- return nil, nil
- }
-
- // hmm, we couldn't connect to the socket, so something else must
- // be wrong, right? wrong!! we've had reports across multiple OSes
- // now that sometimes connections fail even though the OS told us
- // that the address was already in use; either the listener is
- // fluctuating between open and closed very, very quickly, or the
- // OS is inconsistent and contradicting itself; I have been unable
- // to reproduce this, so I'm now resorting to hard-coding substring
- // matching in error messages as a really hacky and unreliable
- // safeguard against this, until we can idenify exactly what was
- // happening; see the following threads for more info:
- // https://caddy.community/t/caddy-retry-error/7317
- // https://caddy.community/t/v2-upgrade-to-caddy2-failing-with-errors/7423
- if strings.Contains(listenErr.Error(), "address already in use") ||
- strings.Contains(listenErr.Error(), "one usage of each socket address") {
- log.Printf("[WARNING] OS reports a contradiction: %v - but we cannot connect to it, with this error: %v; continuing anyway 🤞 (I don't know what causes this... if you do, please help?)", listenErr, connectErr)
- return nil, nil
- }
- }
- return nil, fmt.Errorf("could not start listener for challenge server at %s: %v", addr, listenErr)
-}
-
-// dialTCPSocket connects to a TCP address just for the sake of
-// seeing if it is open. It returns a nil error if a TCP connection
-// can successfully be made to addr within a short timeout.
-func dialTCPSocket(addr string) error {
- conn, err := net.DialTimeout("tcp", addr, 250*time.Millisecond)
- if err == nil {
- conn.Close()
- }
- return err
-}
-
-// GetACMEChallenge returns an active ACME challenge for the given identifier,
-// or false if no active challenge for that identifier is known.
-func GetACMEChallenge(identifier string) (Challenge, bool) {
- activeChallengesMu.Lock()
- chalData, ok := activeChallenges[identifier]
- activeChallengesMu.Unlock()
- return chalData, ok
-}
-
-// The active challenge solvers, keyed by listener address,
-// and protected by a mutex. Note that the creation of
-// solver listeners and the incrementing of their counts
-// are atomic operations guarded by this mutex.
-var (
- solvers = make(map[string]*solverInfo)
- solversMu sync.Mutex
-)
-
-// activeChallenges holds information about all known, currently-active
-// ACME challenges, keyed by identifier. CertMagic guarantees that
-// challenges for the same identifier do not overlap, by its locking
-// mechanisms; thus if a challenge comes in for a certain identifier,
-// we can be confident that if this process initiated the challenge,
-// the correct information to solve it is in this map. (It may have
-// alternatively been initiated by another instance in a cluster, in
-// which case the distributed solver will take care of that.)
-var (
- activeChallenges = make(map[string]Challenge)
- activeChallengesMu sync.Mutex
-)
-
-// Challenge is an ACME challenge, but optionally paired with
-// data that can make it easier or more efficient to solve.
-type Challenge struct {
- acme.Challenge
- data interface{}
-}
-
-// challengeKey returns the map key for a given challenge; it is the identifier
-// unless it is an IP address using the TLS-ALPN challenge.
-func challengeKey(chal acme.Challenge) string {
- if chal.Type == acme.ChallengeTypeTLSALPN01 && chal.Identifier.Type == "ip" {
- reversed, err := dns.ReverseAddr(chal.Identifier.Value)
- if err == nil {
- return reversed[:len(reversed)-1] // strip off '.'
- }
- }
- return chal.Identifier.Value
-}
-
-// solverWrapper should be used to wrap all challenge solvers so that
-// we can add the challenge info to memory; this makes challenges globally
-// solvable by a single HTTP or TLS server even if multiple servers with
-// different configurations/scopes need to get certificates.
-type solverWrapper struct{ acmez.Solver }
-
-func (sw solverWrapper) Present(ctx context.Context, chal acme.Challenge) error {
- activeChallengesMu.Lock()
- activeChallenges[challengeKey(chal)] = Challenge{Challenge: chal}
- activeChallengesMu.Unlock()
- return sw.Solver.Present(ctx, chal)
-}
-
-func (sw solverWrapper) Wait(ctx context.Context, chal acme.Challenge) error {
- if waiter, ok := sw.Solver.(acmez.Waiter); ok {
- return waiter.Wait(ctx, chal)
- }
- return nil
-}
-
-func (sw solverWrapper) CleanUp(ctx context.Context, chal acme.Challenge) error {
- activeChallengesMu.Lock()
- delete(activeChallenges, challengeKey(chal))
- activeChallengesMu.Unlock()
- return sw.Solver.CleanUp(ctx, chal)
-}
-
-// Interface guards
-var (
- _ acmez.Solver = (*solverWrapper)(nil)
- _ acmez.Waiter = (*solverWrapper)(nil)
- _ acmez.Waiter = (*distributedSolver)(nil)
-)
diff --git a/vendor/github.com/caddyserver/certmagic/storage.go b/vendor/github.com/caddyserver/certmagic/storage.go
deleted file mode 100644
index 804a4740..00000000
--- a/vendor/github.com/caddyserver/certmagic/storage.go
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2015 Matthew Holt
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package certmagic
-
-import (
- "context"
- "path"
- "regexp"
- "strings"
- "sync"
- "time"
-
- "go.uber.org/zap"
-)
-
-// Storage is a type that implements a key-value store.
-// Keys are prefix-based, with forward slash '/' as separators
-// and without a leading slash.
-//
-// Processes running in a cluster will wish to use the
-// same Storage value (its implementation and configuration)
-// in order to share certificates and other TLS resources
-// with the cluster.
-//
-// The Load, Delete, List, and Stat methods should return
-// ErrNotExist if the key does not exist.
-//
-// Implementations of Storage must be safe for concurrent use.
-type Storage interface {
- // Locker provides atomic synchronization
- // operations, making Storage safe to share.
- Locker
-
- // Store puts value at key.
- Store(key string, value []byte) error
-
- // Load retrieves the value at key.
- Load(key string) ([]byte, error)
-
- // Delete deletes key. An error should be
- // returned only if the key still exists
- // when the method returns.
- Delete(key string) error
-
- // Exists returns true if the key exists
- // and there was no error checking.
- Exists(key string) bool
-
- // List returns all keys that match prefix.
- // If recursive is true, non-terminal keys
- // will be enumerated (i.e. "directories"
- // should be walked); otherwise, only keys
- // prefixed exactly by prefix will be listed.
- List(prefix string, recursive bool) ([]string, error)
-
- // Stat returns information about key.
- Stat(key string) (KeyInfo, error)
-}
-
-// Locker facilitates synchronization of certificate tasks across
-// machines and networks.
-type Locker interface {
- // Lock acquires the lock for key, blocking until the lock
- // can be obtained or an error is returned. Note that, even
- // after acquiring a lock, an idempotent operation may have
- // already been performed by another process that acquired
- // the lock before - so always check to make sure idempotent
- // operations still need to be performed after acquiring the
- // lock.
- //
- // The actual implementation of obtaining of a lock must be
- // an atomic operation so that multiple Lock calls at the
- // same time always results in only one caller receiving the
- // lock at any given time.
- //
- // To prevent deadlocks, all implementations (where this concern
- // is relevant) should put a reasonable expiration on the lock in
- // case Unlock is unable to be called due to some sort of network
- // failure or system crash. Additionally, implementations should
- // honor context cancellation as much as possible (in case the
- // caller wishes to give up and free resources before the lock
- // can be obtained).
- Lock(ctx context.Context, key string) error
-
- // Unlock releases the lock for key. This method must ONLY be
- // called after a successful call to Lock, and only after the
- // critical section is finished, even if it errored or timed
- // out. Unlock cleans up any resources allocated during Lock.
- Unlock(key string) error
-}
-
-// KeyInfo holds information about a key in storage.
-// Key and IsTerminal are required; Modified and Size
-// are optional if the storage implementation is not
-// able to get that information. Setting them will
-// make certain operations more consistent or
-// predictable, but it is not crucial to basic
-// functionality.
-type KeyInfo struct {
- Key string
- Modified time.Time
- Size int64
- IsTerminal bool // false for keys that only contain other keys (like directories)
-}
-
-// storeTx stores all the values or none at all.
-func storeTx(s Storage, all []keyValue) error {
- for i, kv := range all {
- err := s.Store(kv.key, kv.value)
- if err != nil {
- for j := i - 1; j >= 0; j-- {
- s.Delete(all[j].key)
- }
- return err
- }
- }
- return nil
-}
-
-// keyValue pairs a key and a value.
-type keyValue struct {
- key string
- value []byte
-}
-
-// KeyBuilder provides a namespace for methods that
-// build keys and key prefixes, for addressing items
-// in a Storage implementation.
-type KeyBuilder struct{}
-
-// CertsPrefix returns the storage key prefix for
-// the given certificate issuer.
-func (keys KeyBuilder) CertsPrefix(issuerKey string) string {
- return path.Join(prefixCerts, keys.Safe(issuerKey))
-}
-
-// CertsSitePrefix returns a key prefix for items associated with
-// the site given by domain using the given issuer key.
-func (keys KeyBuilder) CertsSitePrefix(issuerKey, domain string) string {
- return path.Join(keys.CertsPrefix(issuerKey), keys.Safe(domain))
-}
-
-// SiteCert returns the path to the certificate file for domain
-// that is associated with the issuer with the given issuerKey.
-func (keys KeyBuilder) SiteCert(issuerKey, domain string) string {
- safeDomain := keys.Safe(domain)
- return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".crt")
-}
-
-// SitePrivateKey returns the path to the private key file for domain
-// that is associated with the certificate from the given issuer with
-// the given issuerKey.
-func (keys KeyBuilder) SitePrivateKey(issuerKey, domain string) string {
- safeDomain := keys.Safe(domain)
- return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".key")
-}
-
-// SiteMeta returns the path to the metadata file for domain that
-// is associated with the certificate from the given issuer with
-// the given issuerKey.
-func (keys KeyBuilder) SiteMeta(issuerKey, domain string) string {
- safeDomain := keys.Safe(domain)
- return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".json")
-}
-
-// OCSPStaple returns a key for the OCSP staple associated
-// with the given certificate. If you have the PEM bundle
-// handy, pass that in to save an extra encoding step.
-func (keys KeyBuilder) OCSPStaple(cert *Certificate, pemBundle []byte) string {
- var ocspFileName string
- if len(cert.Names) > 0 {
- firstName := keys.Safe(cert.Names[0])
- ocspFileName = firstName + "-"
- }
- ocspFileName += fastHash(pemBundle)
- return path.Join(prefixOCSP, ocspFileName)
-}
-
-// Safe standardizes and sanitizes str for use as
-// a single component of a storage key. This method
-// is idempotent.
-func (keys KeyBuilder) Safe(str string) string {
- str = strings.ToLower(str)
- str = strings.TrimSpace(str)
-
- // replace a few specific characters
- repl := strings.NewReplacer(
- " ", "_",
- "+", "_plus_",
- "*", "wildcard_",
- ":", "-",
- "..", "", // prevent directory traversal (regex allows single dots)
- )
- str = repl.Replace(str)
-
- // finally remove all non-word characters
- return safeKeyRE.ReplaceAllLiteralString(str, "")
-}
-
-// CleanUpOwnLocks immediately cleans up all
-// current locks obtained by this process. Since
-// this does not cancel the operations that
-// the locks are synchronizing, this should be
-// called only immediately before process exit.
-// Errors are only reported if a logger is given.
-func CleanUpOwnLocks(logger *zap.Logger) {
- locksMu.Lock()
- defer locksMu.Unlock()
- for lockKey, storage := range locks {
- err := storage.Unlock(lockKey)
- if err == nil {
- delete(locks, lockKey)
- } else if logger != nil {
- logger.Error("unable to clean up lock in storage backend",
- zap.Any("storage", storage),
- zap.String("lock_key", lockKey),
- zap.Error(err),
- )
- }
- }
-}
-
-func acquireLock(ctx context.Context, storage Storage, lockKey string) error {
- err := storage.Lock(ctx, lockKey)
- if err == nil {
- locksMu.Lock()
- locks[lockKey] = storage
- locksMu.Unlock()
- }
- return err
-}
-
-func releaseLock(storage Storage, lockKey string) error {
- err := storage.Unlock(lockKey)
- if err == nil {
- locksMu.Lock()
- delete(locks, lockKey)
- locksMu.Unlock()
- }
- return err
-}
-
-// locks stores a reference to all the current
-// locks obtained by this process.
-var locks = make(map[string]Storage)
-var locksMu sync.Mutex
-
-// StorageKeys provides methods for accessing
-// keys and key prefixes for items in a Storage.
-// Typically, you will not need to use this
-// because accessing storage is abstracted away
-// for most cases. Only use this if you need to
-// directly access TLS assets in your application.
-var StorageKeys KeyBuilder
-
-const (
- prefixCerts = "certificates"
- prefixOCSP = "ocsp"
-)
-
-// safeKeyRE matches any undesirable characters in storage keys.
-// Note that this allows dots, so you'll have to strip ".." manually.
-var safeKeyRE = regexp.MustCompile(`[^\w@.-]`)
-
-// ErrNotExist is returned by Storage implementations when
-// a resource is not found. It is similar to os.IsNotExist
-// except this is a type, not a variable.
-// TODO: use new Go error wrapping conventions
-type ErrNotExist interface {
- error
-}
-
-// defaultFileStorage is a convenient, default storage
-// implementation using the local file system.
-var defaultFileStorage = &FileStorage{Path: dataDir()}
diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/LICENSE.txt
deleted file mode 100644
index 24b53065..00000000
--- a/vendor/github.com/cespare/xxhash/LICENSE.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Copyright (c) 2016 Caleb Spare
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md
deleted file mode 100644
index 0982fd25..00000000
--- a/vendor/github.com/cespare/xxhash/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# xxhash
-
-[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
-high-quality hashing algorithm that is much faster than anything in the Go
-standard library.
-
-The API is very small, taking its cue from the other hashing packages in the
-standard library:
-
- $ go doc github.com/cespare/xxhash !
- package xxhash // import "github.com/cespare/xxhash"
-
- Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
- at http://cyan4973.github.io/xxHash/.
-
- func New() hash.Hash64
- func Sum64(b []byte) uint64
- func Sum64String(s string) uint64
-
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
-
-## Benchmarks
-
-Here are some quick benchmarks comparing the pure-Go and assembly
-implementations of Sum64 against another popular Go XXH64 implementation,
-[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash):
-
-| input size | OneOfOne | cespare (purego) | cespare |
-| --- | --- | --- | --- |
-| 5 B | 416 MB/s | 720 MB/s | 872 MB/s |
-| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s |
-| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s |
-| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s |
-
-These numbers were generated with:
-
-```
-$ go test -benchtime 10s -bench '/OneOfOne,'
-$ go test -tags purego -benchtime 10s -bench '/xxhash,'
-$ go test -benchtime 10s -bench '/xxhash,'
-```
-
-## Projects using this package
-
-- [InfluxDB](https://github.com/influxdata/influxdb)
-- [Prometheus](https://github.com/prometheus/prometheus)
diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go
deleted file mode 100644
index f3eac5eb..00000000
--- a/vendor/github.com/cespare/xxhash/rotate.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build !go1.9
-
-package xxhash
-
-// TODO(caleb): After Go 1.10 comes out, remove this fallback code.
-
-func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
-func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
-func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
-func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
-func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
-func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
-func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
-func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go
deleted file mode 100644
index b99612ba..00000000
--- a/vendor/github.com/cespare/xxhash/rotate19.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build go1.9
-
-package xxhash
-
-import "math/bits"
-
-func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
-func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
-func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
-func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
-func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
-func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
-func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
-func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml
deleted file mode 100644
index c516ea88..00000000
--- a/vendor/github.com/cespare/xxhash/v2/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-go:
- - "1.x"
- - master
-env:
- - TAGS=""
- - TAGS="-tags purego"
-script: go test $TAGS -v ./...
diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
deleted file mode 100644
index 24b53065..00000000
--- a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Copyright (c) 2016 Caleb Spare
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
deleted file mode 100644
index 2fd8693c..00000000
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# xxhash
-
-[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
-[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
-high-quality hashing algorithm that is much faster than anything in the Go
-standard library.
-
-This package provides a straightforward API:
-
-```
-func Sum64(b []byte) uint64
-func Sum64String(s string) uint64
-type Digest struct{ ... }
- func New() *Digest
-```
-
-The `Digest` type implements hash.Hash64. Its key methods are:
-
-```
-func (*Digest) Write([]byte) (int, error)
-func (*Digest) WriteString(string) (int, error)
-func (*Digest) Sum64() uint64
-```
-
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
-
-## Compatibility
-
-This package is in a module and the latest code is in version 2 of the module.
-You need a version of Go with at least "minimal module compatibility" to use
-github.com/cespare/xxhash/v2:
-
-* 1.9.7+ for Go 1.9
-* 1.10.3+ for Go 1.10
-* Go 1.11 or later
-
-I recommend using the latest release of Go.
-
-## Benchmarks
-
-Here are some quick benchmarks comparing the pure-Go and assembly
-implementations of Sum64.
-
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
-
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
-
-```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
-```
-
-## Projects using this package
-
-- [InfluxDB](https://github.com/influxdata/influxdb)
-- [Prometheus](https://github.com/prometheus/prometheus)
-- [FreeCache](https://github.com/coocood/freecache)
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
deleted file mode 100644
index db0b35fb..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
-// at http://cyan4973.github.io/xxHash/.
-package xxhash
-
-import (
- "encoding/binary"
- "errors"
- "math/bits"
-)
-
-const (
- prime1 uint64 = 11400714785074694791
- prime2 uint64 = 14029467366897019727
- prime3 uint64 = 1609587929392839161
- prime4 uint64 = 9650029242287828579
- prime5 uint64 = 2870177450012600261
-)
-
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
-
-// Digest implements hash.Hash64.
-type Digest struct {
- v1 uint64
- v2 uint64
- v3 uint64
- v4 uint64
- total uint64
- mem [32]byte
- n int // how much of mem is used
-}
-
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
-func New() *Digest {
- var d Digest
- d.Reset()
- return &d
-}
-
-// Reset clears the Digest's state so that it can be reused.
-func (d *Digest) Reset() {
- d.v1 = prime1v + prime2
- d.v2 = prime2
- d.v3 = 0
- d.v4 = -prime1v
- d.total = 0
- d.n = 0
-}
-
-// Size always returns 8 bytes.
-func (d *Digest) Size() int { return 8 }
-
-// BlockSize always returns 32 bytes.
-func (d *Digest) BlockSize() int { return 32 }
-
-// Write adds more data to d. It always returns len(b), nil.
-func (d *Digest) Write(b []byte) (n int, err error) {
- n = len(b)
- d.total += uint64(n)
-
- if d.n+n < 32 {
- // This new data doesn't even fill the current block.
- copy(d.mem[d.n:], b)
- d.n += n
- return
- }
-
- if d.n > 0 {
- // Finish off the partial block.
- copy(d.mem[d.n:], b)
- d.v1 = round(d.v1, u64(d.mem[0:8]))
- d.v2 = round(d.v2, u64(d.mem[8:16]))
- d.v3 = round(d.v3, u64(d.mem[16:24]))
- d.v4 = round(d.v4, u64(d.mem[24:32]))
- b = b[32-d.n:]
- d.n = 0
- }
-
- if len(b) >= 32 {
- // One or more full blocks left.
- nw := writeBlocks(d, b)
- b = b[nw:]
- }
-
- // Store any remaining partial block.
- copy(d.mem[:], b)
- d.n = len(b)
-
- return
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-func (d *Digest) Sum(b []byte) []byte {
- s := d.Sum64()
- return append(
- b,
- byte(s>>56),
- byte(s>>48),
- byte(s>>40),
- byte(s>>32),
- byte(s>>24),
- byte(s>>16),
- byte(s>>8),
- byte(s),
- )
-}
-
-// Sum64 returns the current hash.
-func (d *Digest) Sum64() uint64 {
- var h uint64
-
- if d.total >= 32 {
- v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = d.v3 + prime5
- }
-
- h += d.total
-
- i, end := 0, d.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(d.mem[i:i+8]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for i < end {
- h ^= uint64(d.mem[i]) * prime5
- h = rol11(h) * prime1
- i++
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-const (
- magic = "xxh\x06"
- marshaledSize = len(magic) + 8*5 + 32
-)
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (d *Digest) MarshalBinary() ([]byte, error) {
- b := make([]byte, 0, marshaledSize)
- b = append(b, magic...)
- b = appendUint64(b, d.v1)
- b = appendUint64(b, d.v2)
- b = appendUint64(b, d.v3)
- b = appendUint64(b, d.v4)
- b = appendUint64(b, d.total)
- b = append(b, d.mem[:d.n]...)
- b = b[:len(b)+len(d.mem)-d.n]
- return b, nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-func (d *Digest) UnmarshalBinary(b []byte) error {
- if len(b) < len(magic) || string(b[:len(magic)]) != magic {
- return errors.New("xxhash: invalid hash state identifier")
- }
- if len(b) != marshaledSize {
- return errors.New("xxhash: invalid hash state size")
- }
- b = b[len(magic):]
- b, d.v1 = consumeUint64(b)
- b, d.v2 = consumeUint64(b)
- b, d.v3 = consumeUint64(b)
- b, d.v4 = consumeUint64(b)
- b, d.total = consumeUint64(b)
- copy(d.mem[:], b)
- b = b[len(d.mem):]
- d.n = int(d.total % uint64(len(d.mem)))
- return nil
-}
-
-func appendUint64(b []byte, x uint64) []byte {
- var a [8]byte
- binary.LittleEndian.PutUint64(a[:], x)
- return append(b, a[:]...)
-}
-
-func consumeUint64(b []byte) ([]byte, uint64) {
- x := u64(b)
- return b[8:], x
-}
-
-func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
-func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
-
-func round(acc, input uint64) uint64 {
- acc += input * prime2
- acc = rol31(acc)
- acc *= prime1
- return acc
-}
-
-func mergeRound(acc, val uint64) uint64 {
- val = round(0, val)
- acc ^= val
- acc = acc*prime1 + prime4
- return acc
-}
-
-func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
-func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
-func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
-func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
-func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
-func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
-func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
-func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
deleted file mode 100644
index ad14b807..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-//
-//go:noescape
-func Sum64(b []byte) uint64
-
-//go:noescape
-func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
deleted file mode 100644
index d580e32a..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
+++ /dev/null
@@ -1,215 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-#include "textflag.h"
-
-// Register allocation:
-// AX h
-// CX pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// R15 prime4v
-
-// round reads from and advances the buffer pointer in CX.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (CX), R12 \
- ADDQ $8, CX \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ R15, acc
-
-// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
- // Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), R15
-
- // Load slice.
- MOVQ b_base+0(FP), CX
- MOVQ b_len+8(FP), DX
- LEAQ (CX)(DX*1), BX
-
- // The first loop limit will be len(b)-32.
- SUBQ $32, BX
-
- // Check whether we have at least one block.
- CMPQ DX, $32
- JLT noBlocks
-
- // Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
-
- // Loop until CX > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ CX, BX
- JLE blockLoop
-
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
-
- JMP afterBlocks
-
-noBlocks:
- MOVQ ·prime5v(SB), AX
-
-afterBlocks:
- ADDQ DX, AX
-
- // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
- ADDQ $24, BX
-
- CMPQ CX, BX
- JG fourByte
-
-wordLoop:
- // Calculate k1.
- MOVQ (CX), R8
- ADDQ $8, CX
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
-
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ R15, AX
-
- CMPQ CX, BX
- JLE wordLoop
-
-fourByte:
- ADDQ $4, BX
- CMPQ CX, BX
- JG singles
-
- MOVL (CX), R8
- ADDQ $4, CX
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ CX, BX
- JGE finalize
-
-singlesLoop:
- MOVBQZX (CX), R12
- ADDQ $1, CX
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
-
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ CX, BX
- JL singlesLoop
-
-finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
-
- MOVQ AX, ret+24(FP)
- RET
-
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
-// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
- // Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
-
- // Load slice.
- MOVQ b_base+8(FP), CX
- MOVQ b_len+16(FP), DX
- LEAQ (CX)(DX*1), BX
- SUBQ $32, BX
-
- // Load vN from d.
- MOVQ d+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
-
- // We don't need to check the loop condition here; this function is
- // always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ CX, BX
- JLE blockLoop
-
- // Copy vN back to d.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
-
- // The number of bytes written is CX minus the old base pointer.
- SUBQ b_base+8(FP), CX
- MOVQ CX, ret+32(FP)
-
- RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
deleted file mode 100644
index 4a5a8216..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// +build !amd64 appengine !gc purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-func Sum64(b []byte) uint64 {
- // A simpler version would be
- // d := New()
- // d.Write(b)
- // return d.Sum64()
- // but this is faster, particularly for small inputs.
-
- n := len(b)
- var h uint64
-
- if n >= 32 {
- v1 := prime1v + prime2
- v2 := prime2
- v3 := uint64(0)
- v4 := -prime1v
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = prime5
- }
-
- h += uint64(n)
-
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
- h = rol11(h) * prime1
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-func writeBlocks(d *Digest, b []byte) int {
- v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
- n := len(b)
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
- return n - len(b)
-}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
deleted file mode 100644
index fc9bea7a..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build appengine
-
-// This file contains the safe implementations of otherwise unsafe-using code.
-
-package xxhash
-
-// Sum64String computes the 64-bit xxHash digest of s.
-func Sum64String(s string) uint64 {
- return Sum64([]byte(s))
-}
-
-// WriteString adds more data to d. It always returns len(s), nil.
-func (d *Digest) WriteString(s string) (n int, err error) {
- return d.Write([]byte(s))
-}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
deleted file mode 100644
index 53bf76ef..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build !appengine
-
-// This file encapsulates usage of unsafe.
-// xxhash_safe.go contains the safe implementations.
-
-package xxhash
-
-import (
- "reflect"
- "unsafe"
-)
-
-// Notes:
-//
-// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
-// for some discussion about these unsafe conversions.
-//
-// In the future it's possible that compiler optimizations will make these
-// unsafe operations unnecessary: https://golang.org/issue/2205.
-//
-// Both of these wrapper functions still incur function call overhead since they
-// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
-// for strings to squeeze out a bit more speed. Mid-stack inlining should
-// eventually fix this.
-
-// Sum64String computes the 64-bit xxHash digest of s.
-// It may be faster than Sum64([]byte(s)) by avoiding a copy.
-func Sum64String(s string) uint64 {
- var b []byte
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- bh.Len = len(s)
- bh.Cap = len(s)
- return Sum64(b)
-}
-
-// WriteString adds more data to d. It always returns len(s), nil.
-// It may be faster than Write([]byte(s)) by avoiding a copy.
-func (d *Digest) WriteString(s string) (n int, err error) {
- var b []byte
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- bh.Len = len(s)
- bh.Cap = len(s)
- return d.Write(b)
-}
diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go
deleted file mode 100644
index f896bd28..00000000
--- a/vendor/github.com/cespare/xxhash/xxhash.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
-// at http://cyan4973.github.io/xxHash/.
-package xxhash
-
-import (
- "encoding/binary"
- "hash"
-)
-
-const (
- prime1 uint64 = 11400714785074694791
- prime2 uint64 = 14029467366897019727
- prime3 uint64 = 1609587929392839161
- prime4 uint64 = 9650029242287828579
- prime5 uint64 = 2870177450012600261
-)
-
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
-
-type xxh struct {
- v1 uint64
- v2 uint64
- v3 uint64
- v4 uint64
- total int
- mem [32]byte
- n int // how much of mem is used
-}
-
-// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm.
-func New() hash.Hash64 {
- var x xxh
- x.Reset()
- return &x
-}
-
-func (x *xxh) Reset() {
- x.n = 0
- x.total = 0
- x.v1 = prime1v + prime2
- x.v2 = prime2
- x.v3 = 0
- x.v4 = -prime1v
-}
-
-func (x *xxh) Size() int { return 8 }
-func (x *xxh) BlockSize() int { return 32 }
-
-// Write adds more data to x. It always returns len(b), nil.
-func (x *xxh) Write(b []byte) (n int, err error) {
- n = len(b)
- x.total += len(b)
-
- if x.n+len(b) < 32 {
- // This new data doesn't even fill the current block.
- copy(x.mem[x.n:], b)
- x.n += len(b)
- return
- }
-
- if x.n > 0 {
- // Finish off the partial block.
- copy(x.mem[x.n:], b)
- x.v1 = round(x.v1, u64(x.mem[0:8]))
- x.v2 = round(x.v2, u64(x.mem[8:16]))
- x.v3 = round(x.v3, u64(x.mem[16:24]))
- x.v4 = round(x.v4, u64(x.mem[24:32]))
- b = b[32-x.n:]
- x.n = 0
- }
-
- if len(b) >= 32 {
- // One or more full blocks left.
- b = writeBlocks(x, b)
- }
-
- // Store any remaining partial block.
- copy(x.mem[:], b)
- x.n = len(b)
-
- return
-}
-
-func (x *xxh) Sum(b []byte) []byte {
- s := x.Sum64()
- return append(
- b,
- byte(s>>56),
- byte(s>>48),
- byte(s>>40),
- byte(s>>32),
- byte(s>>24),
- byte(s>>16),
- byte(s>>8),
- byte(s),
- )
-}
-
-func (x *xxh) Sum64() uint64 {
- var h uint64
-
- if x.total >= 32 {
- v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = x.v3 + prime5
- }
-
- h += uint64(x.total)
-
- i, end := 0, x.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(x.mem[i:i+8]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(x.mem[i:i+4])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for i < end {
- h ^= uint64(x.mem[i]) * prime5
- h = rol11(h) * prime1
- i++
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
-func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
-
-func round(acc, input uint64) uint64 {
- acc += input * prime2
- acc = rol31(acc)
- acc *= prime1
- return acc
-}
-
-func mergeRound(acc, val uint64) uint64 {
- val = round(0, val)
- acc ^= val
- acc = acc*prime1 + prime4
- return acc
-}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go
deleted file mode 100644
index d6176526..00000000
--- a/vendor/github.com/cespare/xxhash/xxhash_amd64.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-//
-//go:noescape
-func Sum64(b []byte) uint64
-
-func writeBlocks(x *xxh, b []byte) []byte
diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s
deleted file mode 100644
index 757f2011..00000000
--- a/vendor/github.com/cespare/xxhash/xxhash_amd64.s
+++ /dev/null
@@ -1,233 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-#include "textflag.h"
-
-// Register allocation:
-// AX h
-// CX pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// R15 prime4v
-
-// round reads from and advances the buffer pointer in CX.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (CX), R12 \
- ADDQ $8, CX \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ R15, acc
-
-// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
- // Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), R15
-
- // Load slice.
- MOVQ b_base+0(FP), CX
- MOVQ b_len+8(FP), DX
- LEAQ (CX)(DX*1), BX
-
- // The first loop limit will be len(b)-32.
- SUBQ $32, BX
-
- // Check whether we have at least one block.
- CMPQ DX, $32
- JLT noBlocks
-
- // Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
-
- // Loop until CX > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ CX, BX
- JLE blockLoop
-
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
-
- JMP afterBlocks
-
-noBlocks:
- MOVQ ·prime5v(SB), AX
-
-afterBlocks:
- ADDQ DX, AX
-
- // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
- ADDQ $24, BX
-
- CMPQ CX, BX
- JG fourByte
-
-wordLoop:
- // Calculate k1.
- MOVQ (CX), R8
- ADDQ $8, CX
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
-
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ R15, AX
-
- CMPQ CX, BX
- JLE wordLoop
-
-fourByte:
- ADDQ $4, BX
- CMPQ CX, BX
- JG singles
-
- MOVL (CX), R8
- ADDQ $4, CX
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ CX, BX
- JGE finalize
-
-singlesLoop:
- MOVBQZX (CX), R12
- ADDQ $1, CX
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
-
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ CX, BX
- JL singlesLoop
-
-finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
-
- MOVQ AX, ret+24(FP)
- RET
-
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the x pointer.
-
-// func writeBlocks(x *xxh, b []byte) []byte
-TEXT ·writeBlocks(SB), NOSPLIT, $0-56
- // Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
-
- // Load slice.
- MOVQ b_base+8(FP), CX
- MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below
- MOVQ b_len+16(FP), DX
- LEAQ (CX)(DX*1), BX
- SUBQ $32, BX
-
- // Load vN from x.
- MOVQ x+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
-
- // We don't need to check the loop condition here; this function is
- // always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ CX, BX
- JLE blockLoop
-
- // Copy vN back to x.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
-
- // Construct return slice.
- // NOTE: It's important that we don't construct a slice that has a base
- // pointer off the end of the original slice, as in Go 1.7+ this will
- // cause runtime crashes. (See discussion in, for example,
- // https://github.com/golang/go/issues/16772.)
- // Therefore, we calculate the length/cap first, and if they're zero, we
- // keep the old base. This is what the compiler does as well if you
- // write code like
- // b = b[len(b):]
-
- // New length is 32 - (CX - BX) -> BX+32 - CX.
- ADDQ $32, BX
- SUBQ CX, BX
- JZ afterSetBase
-
- MOVQ CX, ret_base+32(FP)
-
-afterSetBase:
- MOVQ BX, ret_len+40(FP)
- MOVQ BX, ret_cap+48(FP) // set cap == len
-
- RET
diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/xxhash_other.go
deleted file mode 100644
index c68d13f8..00000000
--- a/vendor/github.com/cespare/xxhash/xxhash_other.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// +build !amd64 appengine !gc purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-func Sum64(b []byte) uint64 {
- // A simpler version would be
- // x := New()
- // x.Write(b)
- // return x.Sum64()
- // but this is faster, particularly for small inputs.
-
- n := len(b)
- var h uint64
-
- if n >= 32 {
- v1 := prime1v + prime2
- v2 := prime2
- v3 := uint64(0)
- v4 := -prime1v
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = prime5
- }
-
- h += uint64(n)
-
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
- h = rol11(h) * prime1
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-func writeBlocks(x *xxh, b []byte) []byte {
- v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4
- return b
-}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go
deleted file mode 100644
index dfa15ab7..00000000
--- a/vendor/github.com/cespare/xxhash/xxhash_safe.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build appengine
-
-// This file contains the safe implementations of otherwise unsafe-using code.
-
-package xxhash
-
-// Sum64String computes the 64-bit xxHash digest of s.
-func Sum64String(s string) uint64 {
- return Sum64([]byte(s))
-}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go
deleted file mode 100644
index d2b64e8b..00000000
--- a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build !appengine
-
-// This file encapsulates usage of unsafe.
-// xxhash_safe.go contains the safe implementations.
-
-package xxhash
-
-import (
- "reflect"
- "unsafe"
-)
-
-// Sum64String computes the 64-bit xxHash digest of s.
-// It may be faster than Sum64([]byte(s)) by avoiding a copy.
-//
-// TODO(caleb): Consider removing this if an optimization is ever added to make
-// it unnecessary: https://golang.org/issue/2205.
-//
-// TODO(caleb): We still have a function call; we could instead write Go/asm
-// copies of Sum64 for strings to squeeze out a bit more speed.
-func Sum64String(s string) uint64 {
- // See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
- // for some discussion about this unsafe conversion.
- var b []byte
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- bh.Len = len(s)
- bh.Cap = len(s)
- return Sum64(b)
-}
diff --git a/vendor/github.com/cheekybits/genny/.gitignore b/vendor/github.com/cheekybits/genny/.gitignore
deleted file mode 100644
index c62d148c..00000000
--- a/vendor/github.com/cheekybits/genny/.gitignore
+++ /dev/null
@@ -1,26 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-
-genny
diff --git a/vendor/github.com/cheekybits/genny/.travis.yml b/vendor/github.com/cheekybits/genny/.travis.yml
deleted file mode 100644
index 78ba5f2d..00000000
--- a/vendor/github.com/cheekybits/genny/.travis.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-language: go
-
-go:
- - 1.7
- - 1.8
- - 1.9
diff --git a/vendor/github.com/cheekybits/genny/LICENSE b/vendor/github.com/cheekybits/genny/LICENSE
deleted file mode 100644
index 519d7f22..00000000
--- a/vendor/github.com/cheekybits/genny/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 cheekybits
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/github.com/cheekybits/genny/README.md b/vendor/github.com/cheekybits/genny/README.md
deleted file mode 100644
index 64a28ac7..00000000
--- a/vendor/github.com/cheekybits/genny/README.md
+++ /dev/null
@@ -1,245 +0,0 @@
-# genny - Generics for Go
-
-[![Build Status](https://travis-ci.org/cheekybits/genny.svg?branch=master)](https://travis-ci.org/cheekybits/genny) [![GoDoc](https://godoc.org/github.com/cheekybits/genny/parse?status.png)](http://godoc.org/github.com/cheekybits/genny/parse)
-
-Install:
-
-```
-go get github.com/cheekybits/genny
-```
-
-=====
-
-(pron. Jenny) by Mat Ryer ([@matryer](https://twitter.com/matryer)) and Tyler Bunnell ([@TylerJBunnell](https://twitter.com/TylerJBunnell)).
-
-Until the Go core team include support for [generics in Go](http://golang.org/doc/faq#generics), `genny` is a code-generation generics solution. It allows you write normal buildable and testable Go code which, when processed by the `genny gen` tool, will replace the generics with specific types.
-
- * Generic code is valid Go code
- * Generic code compiles and can be tested
- * Use `stdin` and `stdout` or specify in and out files
- * Supports Go 1.4's [go generate](http://tip.golang.org/doc/go1.4#gogenerate)
- * Multiple specific types will generate every permutation
- * Use `BUILTINS` and `NUMBERS` wildtype to generate specific code for all built-in (and number) Go types
- * Function names and comments also get updated
-
-## Library
-
-We have started building a [library of common things](https://github.com/cheekybits/gennylib), and you can use `genny get` to generate the specific versions you need.
-
-For example: `genny get maps/concurrentmap.go "KeyType=BUILTINS ValueType=BUILTINS"` will print out generated code for all types for a concurrent map. Any file in the library may be generated locally in this way using all the same options given to `genny gen`.
-
-## Usage
-
-```
-genny [{flags}] gen "{types}"
-
-gen - generates type specific code from generic code.
-get - fetch a generic template from the online library and gen it.
-
-{flags} - (optional) Command line flags (see below)
-{types} - (required) Specific types for each generic type in the source
-{types} format: {generic}={specific}[,another][ {generic2}={specific2}]
-
-Examples:
- Generic=Specific
- Generic1=Specific1 Generic2=Specific2
- Generic1=Specific1,Specific2 Generic2=Specific3,Specific4
-
-Flags:
- -in="": file to parse instead of stdin
- -out="": file to save output to instead of stdout
- -pkg="": package name for generated files
-```
-
- * Comma separated type lists will generate code for each type
-
-### Flags
-
- * `-in` - specify the input file (rather than using stdin)
- * `-out` - specify the output file (rather than using stdout)
-
-### go generate
-
-To use Go 1.4's `go generate` capability, insert the following comment in your source code file:
-
-```
-//go:generate genny -in=$GOFILE -out=gen-$GOFILE gen "KeyType=string,int ValueType=string,int"
-```
-
- * Start the line with `//go:generate `
- * Use the `-in` and `-out` flags to specify the files to work on
- * Use the `genny` command as usual after the flags
-
-Now, running `go generate` (in a shell) for the package will cause the generic versions of the files to be generated.
-
- * The output file will be overwritten, so it's safe to call `go generate` many times
- * Use `$GOFILE` to refer to the current file
- * The `//go:generate` line will be removed from the output
-
-To see a real example of how to use `genny` with `go generate`, look in the [example/go-generate directory](https://github.com/cheekybits/genny/tree/master/examples/go-generate).
-
-## How it works
-
-Define your generic types using the special `generic.Type` placeholder type:
-
-```go
-type KeyType generic.Type
-type ValueType generic.Type
-```
-
- * You can use as many as you like
- * Give them meaningful names
-
-Then write the generic code referencing the types as your normally would:
-
-```go
-func SetValueTypeForKeyType(key KeyType, value ValueType) { /* ... */ }
-```
-
- * Generic type names will also be replaced in comments and function names (see Real example below)
-
-Since `generic.Type` is a real Go type, your code will compile, and you can even write unit tests against your generic code.
-
-#### Generating specific versions
-
-Pass the file through the `genny gen` tool with the specific types as the argument:
-
-```
-cat generic.go | genny gen "KeyType=string ValueType=interface{}"
-```
-
-The output will be the complete Go source file with the generic types replaced with the types specified in the arguments.
-
-## Real example
-
-Given [this generic Go code](https://github.com/cheekybits/genny/tree/master/examples/queue) which compiles and is tested:
-
-```go
-package queue
-
-import "github.com/cheekybits/genny/generic"
-
-// NOTE: this is how easy it is to define a generic type
-type Something generic.Type
-
-// SomethingQueue is a queue of Somethings.
-type SomethingQueue struct {
- items []Something
-}
-
-func NewSomethingQueue() *SomethingQueue {
- return &SomethingQueue{items: make([]Something, 0)}
-}
-func (q *SomethingQueue) Push(item Something) {
- q.items = append(q.items, item)
-}
-func (q *SomethingQueue) Pop() Something {
- item := q.items[0]
- q.items = q.items[1:]
- return item
-}
-```
-
-When `genny gen` is invoked like this:
-
-```
-cat source.go | genny gen "Something=string"
-```
-
-It outputs:
-
-```go
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
-
-package queue
-
-// StringQueue is a queue of Strings.
-type StringQueue struct {
- items []string
-}
-
-func NewStringQueue() *StringQueue {
- return &StringQueue{items: make([]string, 0)}
-}
-func (q *StringQueue) Push(item string) {
- q.items = append(q.items, item)
-}
-func (q *StringQueue) Pop() string {
- item := q.items[0]
- q.items = q.items[1:]
- return item
-}
-```
-
-To get a _something_ for every built-in Go type plus one of your own types, you could run:
-
-```
-cat source.go | genny gen "Something=BUILTINS,*MyType"
-```
-
-#### More examples
-
-Check out the [test code files](https://github.com/cheekybits/genny/tree/master/parse/test) for more real examples.
-
-## Writing test code
-
-Once you have defined a generic type with some code worth testing:
-
-```go
-package slice
-
-import (
- "log"
- "reflect"
-
- "github.com/stretchr/gogen/generic"
-)
-
-type MyType generic.Type
-
-func EnsureMyTypeSlice(objectOrSlice interface{}) []MyType {
- log.Printf("%v", reflect.TypeOf(objectOrSlice))
- switch obj := objectOrSlice.(type) {
- case []MyType:
- log.Println(" returning it untouched")
- return obj
- case MyType:
- log.Println(" wrapping in slice")
- return []MyType{obj}
- default:
- panic("ensure slice needs MyType or []MyType")
- }
-}
-```
-
-You can treat it like any normal Go type in your test code:
-
-```go
-func TestEnsureMyTypeSlice(t *testing.T) {
-
- myType := new(MyType)
- slice := EnsureMyTypeSlice(myType)
- if assert.NotNil(t, slice) {
- assert.Equal(t, slice[0], myType)
- }
-
- slice = EnsureMyTypeSlice(slice)
- log.Printf("%#v", slice[0])
- if assert.NotNil(t, slice) {
- assert.Equal(t, slice[0], myType)
- }
-
-}
-```
-
-### Understanding what `generic.Type` is
-
-Because `generic.Type` is an empty interface type (literally `interface{}`) every other type will be considered to be a `generic.Type` if you are switching on the type of an object. Of course, once the specific versions are generated, this issue goes away but it's worth knowing when you are writing your tests against generic code.
-
-### Contributions
-
- * See the [API documentation for the parse package](http://godoc.org/github.com/cheekybits/genny/parse)
- * Please do TDD
- * All input welcome
diff --git a/vendor/github.com/cheekybits/genny/doc.go b/vendor/github.com/cheekybits/genny/doc.go
deleted file mode 100644
index 4c31e22b..00000000
--- a/vendor/github.com/cheekybits/genny/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package main is the command line tool for Genny.
-package main
diff --git a/vendor/github.com/cheekybits/genny/generic/doc.go b/vendor/github.com/cheekybits/genny/generic/doc.go
deleted file mode 100644
index 3bd6c869..00000000
--- a/vendor/github.com/cheekybits/genny/generic/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package generic contains the generic marker types.
-package generic
diff --git a/vendor/github.com/cheekybits/genny/generic/generic.go b/vendor/github.com/cheekybits/genny/generic/generic.go
deleted file mode 100644
index 04a2306c..00000000
--- a/vendor/github.com/cheekybits/genny/generic/generic.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package generic
-
-// Type is the placeholder type that indicates a generic value.
-// When genny is executed, variables of this type will be replaced with
-// references to the specific types.
-// var GenericType generic.Type
-type Type interface{}
-
-// Number is the placehoder type that indiccates a generic numerical value.
-// When genny is executed, variables of this type will be replaced with
-// references to the specific types.
-// var GenericType generic.Number
-type Number float64
diff --git a/vendor/github.com/cheekybits/genny/main.go b/vendor/github.com/cheekybits/genny/main.go
deleted file mode 100644
index fe06a6c0..00000000
--- a/vendor/github.com/cheekybits/genny/main.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "strings"
-
- "github.com/cheekybits/genny/out"
- "github.com/cheekybits/genny/parse"
-)
-
-/*
-
- source | genny gen [-in=""] [-out=""] [-pkg=""] "KeyType=string,int ValueType=string,int"
-
-*/
-
-const (
- _ = iota
- exitcodeInvalidArgs
- exitcodeInvalidTypeSet
- exitcodeStdinFailed
- exitcodeGenFailed
- exitcodeGetFailed
- exitcodeSourceFileInvalid
- exitcodeDestFileFailed
-)
-
-func main() {
- var (
- in = flag.String("in", "", "file to parse instead of stdin")
- out = flag.String("out", "", "file to save output to instead of stdout")
- pkgName = flag.String("pkg", "", "package name for generated files")
- prefix = "https://github.com/metabition/gennylib/raw/master/"
- )
- flag.Parse()
- args := flag.Args()
-
- if len(args) < 2 {
- usage()
- os.Exit(exitcodeInvalidArgs)
- }
-
- if strings.ToLower(args[0]) != "gen" && strings.ToLower(args[0]) != "get" {
- usage()
- os.Exit(exitcodeInvalidArgs)
- }
-
- // parse the typesets
- var setsArg = args[1]
- if strings.ToLower(args[0]) == "get" {
- setsArg = args[2]
- }
- typeSets, err := parse.TypeSet(setsArg)
- if err != nil {
- fatal(exitcodeInvalidTypeSet, err)
- }
-
- outWriter := newWriter(*out)
-
- if strings.ToLower(args[0]) == "get" {
- if len(args) != 3 {
- fmt.Println("not enough arguments to get")
- usage()
- os.Exit(exitcodeInvalidArgs)
- }
- r, err := http.Get(prefix + args[1])
- if err != nil {
- fatal(exitcodeGetFailed, err)
- }
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- fatal(exitcodeGetFailed, err)
- }
- r.Body.Close()
- br := bytes.NewReader(b)
- err = gen(*in, *pkgName, br, typeSets, outWriter)
- } else if len(*in) > 0 {
- var file *os.File
- file, err = os.Open(*in)
- if err != nil {
- fatal(exitcodeSourceFileInvalid, err)
- }
- defer file.Close()
- err = gen(*in, *pkgName, file, typeSets, outWriter)
- } else {
- var source []byte
- source, err = ioutil.ReadAll(os.Stdin)
- if err != nil {
- fatal(exitcodeStdinFailed, err)
- }
- reader := bytes.NewReader(source)
- err = gen("stdin", *pkgName, reader, typeSets, outWriter)
- }
-
- // do the work
- if err != nil {
- fatal(exitcodeGenFailed, err)
- }
-
-}
-
-func usage() {
- fmt.Fprintln(os.Stderr, `usage: genny [{flags}] gen "{types}"
-
-gen - generates type specific code from generic code.
-get - fetch a generic template from the online library and gen it.
-
-{flags} - (optional) Command line flags (see below)
-{types} - (required) Specific types for each generic type in the source
-{types} format: {generic}={specific}[,another][ {generic2}={specific2}]
-
-Examples:
- Generic=Specific
- Generic1=Specific1 Generic2=Specific2
- Generic1=Specific1,Specific2 Generic2=Specific3,Specific4
-
-Flags:`)
- flag.PrintDefaults()
-}
-
-func newWriter(fileName string) io.Writer {
- if fileName == "" {
- return os.Stdout
- }
- lf := &out.LazyFile{FileName: fileName}
- defer lf.Close()
- return lf
-}
-
-func fatal(code int, a ...interface{}) {
- fmt.Println(a...)
- os.Exit(code)
-}
-
-// gen performs the generic generation.
-func gen(filename, pkgName string, in io.ReadSeeker, typesets []map[string]string, out io.Writer) error {
-
- var output []byte
- var err error
-
- output, err = parse.Generics(filename, pkgName, in, typesets)
- if err != nil {
- return err
- }
-
- out.Write(output)
- return nil
-}
diff --git a/vendor/github.com/cheekybits/genny/out/lazy_file.go b/vendor/github.com/cheekybits/genny/out/lazy_file.go
deleted file mode 100644
index 7c8815f5..00000000
--- a/vendor/github.com/cheekybits/genny/out/lazy_file.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package out
-
-import (
- "os"
- "path"
-)
-
-// LazyFile is an io.WriteCloser which defers creation of the file it is supposed to write in
-// till the first call to its write function in order to prevent creation of file, if no write
-// is supposed to happen.
-type LazyFile struct {
- // FileName is path to the file to which genny will write.
- FileName string
- file *os.File
-}
-
-// Close closes the file if it is created. Returns nil if no file is created.
-func (lw *LazyFile) Close() error {
- if lw.file != nil {
- return lw.file.Close()
- }
- return nil
-}
-
-// Write writes to the specified file and creates the file first time it is called.
-func (lw *LazyFile) Write(p []byte) (int, error) {
- if lw.file == nil {
- err := os.MkdirAll(path.Dir(lw.FileName), 0755)
- if err != nil {
- return 0, err
- }
- lw.file, err = os.Create(lw.FileName)
- if err != nil {
- return 0, err
- }
- }
- return lw.file.Write(p)
-}
diff --git a/vendor/github.com/cheekybits/genny/parse/builtins.go b/vendor/github.com/cheekybits/genny/parse/builtins.go
deleted file mode 100644
index e0299544..00000000
--- a/vendor/github.com/cheekybits/genny/parse/builtins.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package parse
-
-// Builtins contains a slice of all built-in Go types.
-var Builtins = []string{
- "bool",
- "byte",
- "complex128",
- "complex64",
- "error",
- "float32",
- "float64",
- "int",
- "int16",
- "int32",
- "int64",
- "int8",
- "rune",
- "string",
- "uint",
- "uint16",
- "uint32",
- "uint64",
- "uint8",
- "uintptr",
-}
-
-// Numbers contains a slice of all built-in number types.
-var Numbers = []string{
- "float32",
- "float64",
- "int",
- "int16",
- "int32",
- "int64",
- "int8",
- "uint",
- "uint16",
- "uint32",
- "uint64",
- "uint8",
-}
diff --git a/vendor/github.com/cheekybits/genny/parse/doc.go b/vendor/github.com/cheekybits/genny/parse/doc.go
deleted file mode 100644
index 1be4fed8..00000000
--- a/vendor/github.com/cheekybits/genny/parse/doc.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Package parse contains the generic code generation capabilities
-// that power genny.
-//
-// genny gen "{types}"
-//
-// gen - generates type specific code (to stdout) from generic code (via stdin)
-//
-// {types} - (required) Specific types for each generic type in the source
-// {types} format: {generic}={specific}[,another][ {generic2}={specific2}]
-// Examples:
-// Generic=Specific
-// Generic1=Specific1 Generic2=Specific2
-// Generic1=Specific1,Specific2 Generic2=Specific3,Specific4
-package parse
diff --git a/vendor/github.com/cheekybits/genny/parse/errors.go b/vendor/github.com/cheekybits/genny/parse/errors.go
deleted file mode 100644
index ab812bf9..00000000
--- a/vendor/github.com/cheekybits/genny/parse/errors.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package parse
-
-import (
- "errors"
-)
-
-// errMissingSpecificType represents an error when a generic type is not
-// satisfied by a specific type.
-type errMissingSpecificType struct {
- GenericType string
-}
-
-// Error gets a human readable string describing this error.
-func (e errMissingSpecificType) Error() string {
- return "Missing specific type for '" + e.GenericType + "' generic type"
-}
-
-// errImports represents an error from goimports.
-type errImports struct {
- Err error
-}
-
-// Error gets a human readable string describing this error.
-func (e errImports) Error() string {
- return "Failed to goimports the generated code: " + e.Err.Error()
-}
-
-// errSource represents an error with the source file.
-type errSource struct {
- Err error
-}
-
-// Error gets a human readable string describing this error.
-func (e errSource) Error() string {
- return "Failed to parse source file: " + e.Err.Error()
-}
-
-type errBadTypeArgs struct {
- Message string
- Arg string
-}
-
-func (e errBadTypeArgs) Error() string {
- return "\"" + e.Arg + "\" is bad: " + e.Message
-}
-
-var errMissingTypeInformation = errors.New("No type arguments were specified and no \"// +gogen\" tag was found in the source.")
diff --git a/vendor/github.com/cheekybits/genny/parse/parse.go b/vendor/github.com/cheekybits/genny/parse/parse.go
deleted file mode 100644
index 08eb48b1..00000000
--- a/vendor/github.com/cheekybits/genny/parse/parse.go
+++ /dev/null
@@ -1,298 +0,0 @@
-package parse
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "go/ast"
- "go/parser"
- "go/scanner"
- "go/token"
- "io"
- "os"
- "strings"
- "unicode"
-
- "golang.org/x/tools/imports"
-)
-
-var header = []byte(`
-
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
-
-`)
-
-var (
- packageKeyword = []byte("package")
- importKeyword = []byte("import")
- openBrace = []byte("(")
- closeBrace = []byte(")")
- genericPackage = "generic"
- genericType = "generic.Type"
- genericNumber = "generic.Number"
- linefeed = "\r\n"
-)
-var unwantedLinePrefixes = [][]byte{
- []byte("//go:generate genny "),
-}
-
-func subIntoLiteral(lit, typeTemplate, specificType string) string {
- if lit == typeTemplate {
- return specificType
- }
- if !strings.Contains(lit, typeTemplate) {
- return lit
- }
- specificLg := wordify(specificType, true)
- specificSm := wordify(specificType, false)
- result := strings.Replace(lit, typeTemplate, specificLg, -1)
- if strings.HasPrefix(result, specificLg) && !isExported(lit) {
- return strings.Replace(result, specificLg, specificSm, 1)
- }
- return result
-}
-
-func subTypeIntoComment(line, typeTemplate, specificType string) string {
- var subbed string
- for _, w := range strings.Fields(line) {
- subbed = subbed + subIntoLiteral(w, typeTemplate, specificType) + " "
- }
- return subbed
-}
-
-// Does the heavy lifting of taking a line of our code and
-// sbustituting a type into there for our generic type
-func subTypeIntoLine(line, typeTemplate, specificType string) string {
- src := []byte(line)
- var s scanner.Scanner
- fset := token.NewFileSet()
- file := fset.AddFile("", fset.Base(), len(src))
- s.Init(file, src, nil, scanner.ScanComments)
- output := ""
- for {
- _, tok, lit := s.Scan()
- if tok == token.EOF {
- break
- } else if tok == token.COMMENT {
- subbed := subTypeIntoComment(lit, typeTemplate, specificType)
- output = output + subbed + " "
- } else if tok.IsLiteral() {
- subbed := subIntoLiteral(lit, typeTemplate, specificType)
- output = output + subbed + " "
- } else {
- output = output + tok.String() + " "
- }
- }
- return output
-}
-
-// typeSet looks like "KeyType: int, ValueType: string"
-func generateSpecific(filename string, in io.ReadSeeker, typeSet map[string]string) ([]byte, error) {
-
- // ensure we are at the beginning of the file
- in.Seek(0, os.SEEK_SET)
-
- // parse the source file
- fs := token.NewFileSet()
- file, err := parser.ParseFile(fs, filename, in, 0)
- if err != nil {
- return nil, &errSource{Err: err}
- }
-
- // make sure every generic.Type is represented in the types
- // argument.
- for _, decl := range file.Decls {
- switch it := decl.(type) {
- case *ast.GenDecl:
- for _, spec := range it.Specs {
- ts, ok := spec.(*ast.TypeSpec)
- if !ok {
- continue
- }
- switch tt := ts.Type.(type) {
- case *ast.SelectorExpr:
- if name, ok := tt.X.(*ast.Ident); ok {
- if name.Name == genericPackage {
- if _, ok := typeSet[ts.Name.Name]; !ok {
- return nil, &errMissingSpecificType{GenericType: ts.Name.Name}
- }
- }
- }
- }
- }
- }
- }
-
- in.Seek(0, os.SEEK_SET)
-
- var buf bytes.Buffer
-
- comment := ""
- scanner := bufio.NewScanner(in)
- for scanner.Scan() {
-
- line := scanner.Text()
-
- // does this line contain generic.Type?
- if strings.Contains(line, genericType) || strings.Contains(line, genericNumber) {
- comment = ""
- continue
- }
-
- for t, specificType := range typeSet {
- if strings.Contains(line, t) {
- newLine := subTypeIntoLine(line, t, specificType)
- line = newLine
- }
- }
-
- if comment != "" {
- buf.WriteString(makeLine(comment))
- comment = ""
- }
-
- // is this line a comment?
- // TODO: should we handle /* */ comments?
- if strings.HasPrefix(line, "//") {
- // record this line to print later
- comment = line
- continue
- }
-
- // write the line
- buf.WriteString(makeLine(line))
- }
-
- // write it out
- return buf.Bytes(), nil
-}
-
-// Generics parses the source file and generates the bytes replacing the
-// generic types for the keys map with the specific types (its value).
-func Generics(filename, pkgName string, in io.ReadSeeker, typeSets []map[string]string) ([]byte, error) {
-
- totalOutput := header
-
- for _, typeSet := range typeSets {
-
- // generate the specifics
- parsed, err := generateSpecific(filename, in, typeSet)
- if err != nil {
- return nil, err
- }
-
- totalOutput = append(totalOutput, parsed...)
-
- }
-
- // clean up the code line by line
- packageFound := false
- insideImportBlock := false
- var cleanOutputLines []string
- scanner := bufio.NewScanner(bytes.NewReader(totalOutput))
- for scanner.Scan() {
-
- // end of imports block?
- if insideImportBlock {
- if bytes.HasSuffix(scanner.Bytes(), closeBrace) {
- insideImportBlock = false
- }
- continue
- }
-
- if bytes.HasPrefix(scanner.Bytes(), packageKeyword) {
- if packageFound {
- continue
- } else {
- packageFound = true
- }
- } else if bytes.HasPrefix(scanner.Bytes(), importKeyword) {
- if bytes.HasSuffix(scanner.Bytes(), openBrace) {
- insideImportBlock = true
- }
- continue
- }
-
- // check all unwantedLinePrefixes - and skip them
- skipline := false
- for _, prefix := range unwantedLinePrefixes {
- if bytes.HasPrefix(scanner.Bytes(), prefix) {
- skipline = true
- continue
- }
- }
-
- if skipline {
- continue
- }
-
- cleanOutputLines = append(cleanOutputLines, makeLine(scanner.Text()))
- }
-
- cleanOutput := strings.Join(cleanOutputLines, "")
-
- output := []byte(cleanOutput)
- var err error
-
- // change package name
- if pkgName != "" {
- output = changePackage(bytes.NewReader([]byte(output)), pkgName)
- }
- // fix the imports
- output, err = imports.Process(filename, output, nil)
- if err != nil {
- return nil, &errImports{Err: err}
- }
-
- return output, nil
-}
-
-func makeLine(s string) string {
- return fmt.Sprintln(strings.TrimRight(s, linefeed))
-}
-
-// isAlphaNumeric gets whether the rune is alphanumeric or _.
-func isAlphaNumeric(r rune) bool {
- return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
-}
-
-// wordify turns a type into a nice word for function and type
-// names etc.
-func wordify(s string, exported bool) string {
- s = strings.TrimRight(s, "{}")
- s = strings.TrimLeft(s, "*&")
- s = strings.Replace(s, ".", "", -1)
- if !exported {
- return s
- }
- return strings.ToUpper(string(s[0])) + s[1:]
-}
-
-func changePackage(r io.Reader, pkgName string) []byte {
- var out bytes.Buffer
- sc := bufio.NewScanner(r)
- done := false
-
- for sc.Scan() {
- s := sc.Text()
-
- if !done && strings.HasPrefix(s, "package") {
- parts := strings.Split(s, " ")
- parts[1] = pkgName
- s = strings.Join(parts, " ")
- done = true
- }
-
- fmt.Fprintln(&out, s)
- }
- return out.Bytes()
-}
-
-func isExported(lit string) bool {
- if len(lit) == 0 {
- return false
- }
- return unicode.IsUpper(rune(lit[0]))
-}
diff --git a/vendor/github.com/cheekybits/genny/parse/typesets.go b/vendor/github.com/cheekybits/genny/parse/typesets.go
deleted file mode 100644
index c30b9728..00000000
--- a/vendor/github.com/cheekybits/genny/parse/typesets.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package parse
-
-import "strings"
-
-const (
- typeSep = " "
- keyValueSep = "="
- valuesSep = ","
- builtins = "BUILTINS"
- numbers = "NUMBERS"
-)
-
-// TypeSet turns a type string into a []map[string]string
-// that can be given to parse.Generics for it to do its magic.
-//
-// Acceptable args are:
-//
-// Person=man
-// Person=man Animal=dog
-// Person=man Animal=dog Animal2=cat
-// Person=man,woman Animal=dog,cat
-// Person=man,woman,child Animal=dog,cat Place=london,paris
-func TypeSet(arg string) ([]map[string]string, error) {
-
- types := make(map[string][]string)
- var keys []string
- for _, pair := range strings.Split(arg, typeSep) {
- segs := strings.Split(pair, keyValueSep)
- if len(segs) != 2 {
- return nil, &errBadTypeArgs{Arg: arg, Message: "Generic=Specific expected"}
- }
- key := segs[0]
- keys = append(keys, key)
- types[key] = make([]string, 0)
- for _, t := range strings.Split(segs[1], valuesSep) {
- if t == builtins {
- types[key] = append(types[key], Builtins...)
- } else if t == numbers {
- types[key] = append(types[key], Numbers...)
- } else {
- types[key] = append(types[key], t)
- }
- }
- }
-
- cursors := make(map[string]int)
- for _, key := range keys {
- cursors[key] = 0
- }
-
- outChan := make(chan map[string]string)
- go func() {
- buildTypeSet(keys, 0, cursors, types, outChan)
- close(outChan)
- }()
-
- var typeSets []map[string]string
- for typeSet := range outChan {
- typeSets = append(typeSets, typeSet)
- }
-
- return typeSets, nil
-
-}
-
-func buildTypeSet(keys []string, keyI int, cursors map[string]int, types map[string][]string, out chan<- map[string]string) {
- key := keys[keyI]
- for cursors[key] < len(types[key]) {
- if keyI < len(keys)-1 {
- buildTypeSet(keys, keyI+1, copycursors(cursors), types, out)
- } else {
- // build the typeset for this combination
- ts := make(map[string]string)
- for k, vals := range types {
- ts[k] = vals[cursors[k]]
- }
- out <- ts
- }
- cursors[key]++
- }
-}
-
-func copycursors(source map[string]int) map[string]int {
- copy := make(map[string]int)
- for k, v := range source {
- copy[k] = v
- }
- return copy
-}
diff --git a/vendor/github.com/chzyer/readline/.gitignore b/vendor/github.com/chzyer/readline/.gitignore
deleted file mode 100644
index a3062bea..00000000
--- a/vendor/github.com/chzyer/readline/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.vscode/*
diff --git a/vendor/github.com/chzyer/readline/.travis.yml b/vendor/github.com/chzyer/readline/.travis.yml
deleted file mode 100644
index 9c359554..00000000
--- a/vendor/github.com/chzyer/readline/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-go:
- - 1.x
-script:
- - GOOS=windows go install github.com/chzyer/readline/example/...
- - GOOS=linux go install github.com/chzyer/readline/example/...
- - GOOS=darwin go install github.com/chzyer/readline/example/...
- - go test -race -v
diff --git a/vendor/github.com/chzyer/readline/CHANGELOG.md b/vendor/github.com/chzyer/readline/CHANGELOG.md
deleted file mode 100644
index 14ff5be1..00000000
--- a/vendor/github.com/chzyer/readline/CHANGELOG.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# ChangeLog
-
-### 1.4 - 2016-07-25
-
-* [#60][60] Support dynamic autocompletion
-* Fix ANSI parser on Windows
-* Fix wrong column width in complete mode on Windows
-* Remove dependent package "golang.org/x/crypto/ssh/terminal"
-
-### 1.3 - 2016-05-09
-
-* [#38][38] add SetChildren for prefix completer interface
-* [#42][42] improve multiple lines compatibility
-* [#43][43] remove sub-package(runes) for gopkg compatibility
-* [#46][46] Auto complete with space prefixed line
-* [#48][48] support suspend process (ctrl+Z)
-* [#49][49] fix bug that check equals with previous command
-* [#53][53] Fix bug which causes integer divide by zero panicking when input buffer is empty
-
-### 1.2 - 2016-03-05
-
-* Add a demo for checking password strength [example/readline-pass-strength](https://github.com/chzyer/readline/blob/master/example/readline-pass-strength/readline-pass-strength.go), , written by [@sahib](https://github.com/sahib)
-* [#23][23], support stdin remapping
-* [#27][27], add a `UniqueEditLine` to `Config`, which will erase the editing line after user submited it, usually use in IM.
-* Add a demo for multiline [example/readline-multiline](https://github.com/chzyer/readline/blob/master/example/readline-multiline/readline-multiline.go) which can submit one SQL by multiple lines.
-* Supports performs even stdin/stdout is not a tty.
-* Add a new simple apis for single instance, check by [here](https://github.com/chzyer/readline/blob/master/std.go). It need to save history manually if using this api.
-* [#28][28], fixes the history is not working as expected.
-* [#33][33], vim mode now support `c`, `d`, `x (delete character)`, `r (replace character)`
-
-### 1.1 - 2015-11-20
-
-* [#12][12] Add support for key ``/``/``
-* Only enter raw mode as needed (calling `Readline()`), program will receive signal(e.g. Ctrl+C) if not interact with `readline`.
-* Bugs fixed for `PrefixCompleter`
-* Press `Ctrl+D` in empty line will cause `io.EOF` in error, Press `Ctrl+C` in anytime will cause `ErrInterrupt` instead of `io.EOF`, this will privodes a shell-like user experience.
-* Customable Interrupt/EOF prompt in `Config`
-* [#17][17] Change atomic package to use 32bit function to let it runnable on arm 32bit devices
-* Provides a new password user experience(`readline.ReadPasswordEx()`).
-
-### 1.0 - 2015-10-14
-
-* Initial public release.
-
-[12]: https://github.com/chzyer/readline/pull/12
-[17]: https://github.com/chzyer/readline/pull/17
-[23]: https://github.com/chzyer/readline/pull/23
-[27]: https://github.com/chzyer/readline/pull/27
-[28]: https://github.com/chzyer/readline/pull/28
-[33]: https://github.com/chzyer/readline/pull/33
-[38]: https://github.com/chzyer/readline/pull/38
-[42]: https://github.com/chzyer/readline/pull/42
-[43]: https://github.com/chzyer/readline/pull/43
-[46]: https://github.com/chzyer/readline/pull/46
-[48]: https://github.com/chzyer/readline/pull/48
-[49]: https://github.com/chzyer/readline/pull/49
-[53]: https://github.com/chzyer/readline/pull/53
-[60]: https://github.com/chzyer/readline/pull/60
diff --git a/vendor/github.com/chzyer/readline/LICENSE b/vendor/github.com/chzyer/readline/LICENSE
deleted file mode 100644
index c9afab3d..00000000
--- a/vendor/github.com/chzyer/readline/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Chzyer
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md
deleted file mode 100644
index fab974b7..00000000
--- a/vendor/github.com/chzyer/readline/README.md
+++ /dev/null
@@ -1,114 +0,0 @@
-[![Build Status](https://travis-ci.org/chzyer/readline.svg?branch=master)](https://travis-ci.org/chzyer/readline)
-[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.md)
-[![Version](https://img.shields.io/github/tag/chzyer/readline.svg)](https://github.com/chzyer/readline/releases)
-[![GoDoc](https://godoc.org/github.com/chzyer/readline?status.svg)](https://godoc.org/github.com/chzyer/readline)
-[![OpenCollective](https://opencollective.com/readline/badge/backers.svg)](#backers)
-[![OpenCollective](https://opencollective.com/readline/badge/sponsors.svg)](#sponsors)
-
-
-
-A powerful readline library in `Linux` `macOS` `Windows` `Solaris`
-
-## Guide
-
-* [Demo](example/readline-demo/readline-demo.go)
-* [Shortcut](doc/shortcut.md)
-
-## Repos using readline
-
-[![cockroachdb](https://img.shields.io/github/stars/cockroachdb/cockroach.svg?label=cockroachdb/cockroach)](https://github.com/cockroachdb/cockroach)
-[![robertkrimen/otto](https://img.shields.io/github/stars/robertkrimen/otto.svg?label=robertkrimen/otto)](https://github.com/robertkrimen/otto)
-[![empire](https://img.shields.io/github/stars/remind101/empire.svg?label=remind101/empire)](https://github.com/remind101/empire)
-[![mehrdadrad/mylg](https://img.shields.io/github/stars/mehrdadrad/mylg.svg?label=mehrdadrad/mylg)](https://github.com/mehrdadrad/mylg)
-[![knq/usql](https://img.shields.io/github/stars/knq/usql.svg?label=knq/usql)](https://github.com/knq/usql)
-[![youtube/doorman](https://img.shields.io/github/stars/youtube/doorman.svg?label=youtube/doorman)](https://github.com/youtube/doorman)
-[![bom-d-van/harp](https://img.shields.io/github/stars/bom-d-van/harp.svg?label=bom-d-van/harp)](https://github.com/bom-d-van/harp)
-[![abiosoft/ishell](https://img.shields.io/github/stars/abiosoft/ishell.svg?label=abiosoft/ishell)](https://github.com/abiosoft/ishell)
-[![Netflix/hal-9001](https://img.shields.io/github/stars/Netflix/hal-9001.svg?label=Netflix/hal-9001)](https://github.com/Netflix/hal-9001)
-[![docker/go-p9p](https://img.shields.io/github/stars/docker/go-p9p.svg?label=docker/go-p9p)](https://github.com/docker/go-p9p)
-
-
-## Feedback
-
-If you have any questions, please submit a github issue and any pull requests is welcomed :)
-
-* [https://twitter.com/chzyer](https://twitter.com/chzyer)
-* [http://weibo.com/2145262190](http://weibo.com/2145262190)
-
-
-## Backers
-
-Love Readline? Help me keep it alive by donating funds to cover project expenses!
-[[Become a backer](https://opencollective.com/readline#backer)]
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-## Sponsors
-
-Become a sponsor and get your logo here on our Github page. [[Become a sponsor](https://opencollective.com/readline#sponsor)]
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/vendor/github.com/chzyer/readline/ansi_windows.go b/vendor/github.com/chzyer/readline/ansi_windows.go
deleted file mode 100644
index 63b908c1..00000000
--- a/vendor/github.com/chzyer/readline/ansi_windows.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// +build windows
-
-package readline
-
-import (
- "bufio"
- "io"
- "strconv"
- "strings"
- "sync"
- "unicode/utf8"
- "unsafe"
-)
-
-const (
- _ = uint16(0)
- COLOR_FBLUE = 0x0001
- COLOR_FGREEN = 0x0002
- COLOR_FRED = 0x0004
- COLOR_FINTENSITY = 0x0008
-
- COLOR_BBLUE = 0x0010
- COLOR_BGREEN = 0x0020
- COLOR_BRED = 0x0040
- COLOR_BINTENSITY = 0x0080
-
- COMMON_LVB_UNDERSCORE = 0x8000
- COMMON_LVB_BOLD = 0x0007
-)
-
-var ColorTableFg = []word{
- 0, // 30: Black
- COLOR_FRED, // 31: Red
- COLOR_FGREEN, // 32: Green
- COLOR_FRED | COLOR_FGREEN, // 33: Yellow
- COLOR_FBLUE, // 34: Blue
- COLOR_FRED | COLOR_FBLUE, // 35: Magenta
- COLOR_FGREEN | COLOR_FBLUE, // 36: Cyan
- COLOR_FRED | COLOR_FBLUE | COLOR_FGREEN, // 37: White
-}
-
-var ColorTableBg = []word{
- 0, // 40: Black
- COLOR_BRED, // 41: Red
- COLOR_BGREEN, // 42: Green
- COLOR_BRED | COLOR_BGREEN, // 43: Yellow
- COLOR_BBLUE, // 44: Blue
- COLOR_BRED | COLOR_BBLUE, // 45: Magenta
- COLOR_BGREEN | COLOR_BBLUE, // 46: Cyan
- COLOR_BRED | COLOR_BBLUE | COLOR_BGREEN, // 47: White
-}
-
-type ANSIWriter struct {
- target io.Writer
- wg sync.WaitGroup
- ctx *ANSIWriterCtx
- sync.Mutex
-}
-
-func NewANSIWriter(w io.Writer) *ANSIWriter {
- a := &ANSIWriter{
- target: w,
- ctx: NewANSIWriterCtx(w),
- }
- return a
-}
-
-func (a *ANSIWriter) Close() error {
- a.wg.Wait()
- return nil
-}
-
-type ANSIWriterCtx struct {
- isEsc bool
- isEscSeq bool
- arg []string
- target *bufio.Writer
- wantFlush bool
-}
-
-func NewANSIWriterCtx(target io.Writer) *ANSIWriterCtx {
- return &ANSIWriterCtx{
- target: bufio.NewWriter(target),
- }
-}
-
-func (a *ANSIWriterCtx) Flush() {
- a.target.Flush()
-}
-
-func (a *ANSIWriterCtx) process(r rune) bool {
- if a.wantFlush {
- if r == 0 || r == CharEsc {
- a.wantFlush = false
- a.target.Flush()
- }
- }
- if a.isEscSeq {
- a.isEscSeq = a.ioloopEscSeq(a.target, r, &a.arg)
- return true
- }
-
- switch r {
- case CharEsc:
- a.isEsc = true
- case '[':
- if a.isEsc {
- a.arg = nil
- a.isEscSeq = true
- a.isEsc = false
- break
- }
- fallthrough
- default:
- a.target.WriteRune(r)
- a.wantFlush = true
- }
- return true
-}
-
-func (a *ANSIWriterCtx) ioloopEscSeq(w *bufio.Writer, r rune, argptr *[]string) bool {
- arg := *argptr
- var err error
-
- if r >= 'A' && r <= 'D' {
- count := short(GetInt(arg, 1))
- info, err := GetConsoleScreenBufferInfo()
- if err != nil {
- return false
- }
- switch r {
- case 'A': // up
- info.dwCursorPosition.y -= count
- case 'B': // down
- info.dwCursorPosition.y += count
- case 'C': // right
- info.dwCursorPosition.x += count
- case 'D': // left
- info.dwCursorPosition.x -= count
- }
- SetConsoleCursorPosition(&info.dwCursorPosition)
- return false
- }
-
- switch r {
- case 'J':
- killLines()
- case 'K':
- eraseLine()
- case 'm':
- color := word(0)
- for _, item := range arg {
- var c int
- c, err = strconv.Atoi(item)
- if err != nil {
- w.WriteString("[" + strings.Join(arg, ";") + "m")
- break
- }
- if c >= 30 && c < 40 {
- color ^= COLOR_FINTENSITY
- color |= ColorTableFg[c-30]
- } else if c >= 40 && c < 50 {
- color ^= COLOR_BINTENSITY
- color |= ColorTableBg[c-40]
- } else if c == 4 {
- color |= COMMON_LVB_UNDERSCORE | ColorTableFg[7]
- } else if c == 1 {
- color |= COMMON_LVB_BOLD | COLOR_FINTENSITY
- } else { // unknown code treat as reset
- color = ColorTableFg[7]
- }
- }
- if err != nil {
- break
- }
- kernel.SetConsoleTextAttribute(stdout, uintptr(color))
- case '\007': // set title
- case ';':
- if len(arg) == 0 || arg[len(arg)-1] != "" {
- arg = append(arg, "")
- *argptr = arg
- }
- return true
- default:
- if len(arg) == 0 {
- arg = append(arg, "")
- }
- arg[len(arg)-1] += string(r)
- *argptr = arg
- return true
- }
- *argptr = nil
- return false
-}
-
-func (a *ANSIWriter) Write(b []byte) (int, error) {
- a.Lock()
- defer a.Unlock()
-
- off := 0
- for len(b) > off {
- r, size := utf8.DecodeRune(b[off:])
- if size == 0 {
- return off, io.ErrShortWrite
- }
- off += size
- a.ctx.process(r)
- }
- a.ctx.Flush()
- return off, nil
-}
-
-func killLines() error {
- sbi, err := GetConsoleScreenBufferInfo()
- if err != nil {
- return err
- }
-
- size := (sbi.dwCursorPosition.y - sbi.dwSize.y) * sbi.dwSize.x
- size += sbi.dwCursorPosition.x
-
- var written int
- kernel.FillConsoleOutputAttribute(stdout, uintptr(ColorTableFg[7]),
- uintptr(size),
- sbi.dwCursorPosition.ptr(),
- uintptr(unsafe.Pointer(&written)),
- )
- return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '),
- uintptr(size),
- sbi.dwCursorPosition.ptr(),
- uintptr(unsafe.Pointer(&written)),
- )
-}
-
-func eraseLine() error {
- sbi, err := GetConsoleScreenBufferInfo()
- if err != nil {
- return err
- }
-
- size := sbi.dwSize.x
- sbi.dwCursorPosition.x = 0
- var written int
- return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '),
- uintptr(size),
- sbi.dwCursorPosition.ptr(),
- uintptr(unsafe.Pointer(&written)),
- )
-}
diff --git a/vendor/github.com/chzyer/readline/complete.go b/vendor/github.com/chzyer/readline/complete.go
deleted file mode 100644
index c08c9941..00000000
--- a/vendor/github.com/chzyer/readline/complete.go
+++ /dev/null
@@ -1,285 +0,0 @@
-package readline
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
-)
-
-type AutoCompleter interface {
- // Readline will pass the whole line and current offset to it
- // Completer need to pass all the candidates, and how long they shared the same characters in line
- // Example:
- // [go, git, git-shell, grep]
- // Do("g", 1) => ["o", "it", "it-shell", "rep"], 1
- // Do("gi", 2) => ["t", "t-shell"], 2
- // Do("git", 3) => ["", "-shell"], 3
- Do(line []rune, pos int) (newLine [][]rune, length int)
-}
-
-type TabCompleter struct{}
-
-func (t *TabCompleter) Do([]rune, int) ([][]rune, int) {
- return [][]rune{[]rune("\t")}, 0
-}
-
-type opCompleter struct {
- w io.Writer
- op *Operation
- width int
-
- inCompleteMode bool
- inSelectMode bool
- candidate [][]rune
- candidateSource []rune
- candidateOff int
- candidateChoise int
- candidateColNum int
-}
-
-func newOpCompleter(w io.Writer, op *Operation, width int) *opCompleter {
- return &opCompleter{
- w: w,
- op: op,
- width: width,
- }
-}
-
-func (o *opCompleter) doSelect() {
- if len(o.candidate) == 1 {
- o.op.buf.WriteRunes(o.candidate[0])
- o.ExitCompleteMode(false)
- return
- }
- o.nextCandidate(1)
- o.CompleteRefresh()
-}
-
-func (o *opCompleter) nextCandidate(i int) {
- o.candidateChoise += i
- o.candidateChoise = o.candidateChoise % len(o.candidate)
- if o.candidateChoise < 0 {
- o.candidateChoise = len(o.candidate) + o.candidateChoise
- }
-}
-
-func (o *opCompleter) OnComplete() bool {
- if o.width == 0 {
- return false
- }
- if o.IsInCompleteSelectMode() {
- o.doSelect()
- return true
- }
-
- buf := o.op.buf
- rs := buf.Runes()
-
- if o.IsInCompleteMode() && o.candidateSource != nil && runes.Equal(rs, o.candidateSource) {
- o.EnterCompleteSelectMode()
- o.doSelect()
- return true
- }
-
- o.ExitCompleteSelectMode()
- o.candidateSource = rs
- newLines, offset := o.op.cfg.AutoComplete.Do(rs, buf.idx)
- if len(newLines) == 0 {
- o.ExitCompleteMode(false)
- return true
- }
-
- // only Aggregate candidates in non-complete mode
- if !o.IsInCompleteMode() {
- if len(newLines) == 1 {
- buf.WriteRunes(newLines[0])
- o.ExitCompleteMode(false)
- return true
- }
-
- same, size := runes.Aggregate(newLines)
- if size > 0 {
- buf.WriteRunes(same)
- o.ExitCompleteMode(false)
- return true
- }
- }
-
- o.EnterCompleteMode(offset, newLines)
- return true
-}
-
-func (o *opCompleter) IsInCompleteSelectMode() bool {
- return o.inSelectMode
-}
-
-func (o *opCompleter) IsInCompleteMode() bool {
- return o.inCompleteMode
-}
-
-func (o *opCompleter) HandleCompleteSelect(r rune) bool {
- next := true
- switch r {
- case CharEnter, CharCtrlJ:
- next = false
- o.op.buf.WriteRunes(o.op.candidate[o.op.candidateChoise])
- o.ExitCompleteMode(false)
- case CharLineStart:
- num := o.candidateChoise % o.candidateColNum
- o.nextCandidate(-num)
- case CharLineEnd:
- num := o.candidateColNum - o.candidateChoise%o.candidateColNum - 1
- o.candidateChoise += num
- if o.candidateChoise >= len(o.candidate) {
- o.candidateChoise = len(o.candidate) - 1
- }
- case CharBackspace:
- o.ExitCompleteSelectMode()
- next = false
- case CharTab, CharForward:
- o.doSelect()
- case CharBell, CharInterrupt:
- o.ExitCompleteMode(true)
- next = false
- case CharNext:
- tmpChoise := o.candidateChoise + o.candidateColNum
- if tmpChoise >= o.getMatrixSize() {
- tmpChoise -= o.getMatrixSize()
- } else if tmpChoise >= len(o.candidate) {
- tmpChoise += o.candidateColNum
- tmpChoise -= o.getMatrixSize()
- }
- o.candidateChoise = tmpChoise
- case CharBackward:
- o.nextCandidate(-1)
- case CharPrev:
- tmpChoise := o.candidateChoise - o.candidateColNum
- if tmpChoise < 0 {
- tmpChoise += o.getMatrixSize()
- if tmpChoise >= len(o.candidate) {
- tmpChoise -= o.candidateColNum
- }
- }
- o.candidateChoise = tmpChoise
- default:
- next = false
- o.ExitCompleteSelectMode()
- }
- if next {
- o.CompleteRefresh()
- return true
- }
- return false
-}
-
-func (o *opCompleter) getMatrixSize() int {
- line := len(o.candidate) / o.candidateColNum
- if len(o.candidate)%o.candidateColNum != 0 {
- line++
- }
- return line * o.candidateColNum
-}
-
-func (o *opCompleter) OnWidthChange(newWidth int) {
- o.width = newWidth
-}
-
-func (o *opCompleter) CompleteRefresh() {
- if !o.inCompleteMode {
- return
- }
- lineCnt := o.op.buf.CursorLineCount()
- colWidth := 0
- for _, c := range o.candidate {
- w := runes.WidthAll(c)
- if w > colWidth {
- colWidth = w
- }
- }
- colWidth += o.candidateOff + 1
- same := o.op.buf.RuneSlice(-o.candidateOff)
-
- // -1 to avoid reach the end of line
- width := o.width - 1
- colNum := width / colWidth
- if colNum != 0 {
- colWidth += (width - (colWidth * colNum)) / colNum
- }
-
- o.candidateColNum = colNum
- buf := bufio.NewWriter(o.w)
- buf.Write(bytes.Repeat([]byte("\n"), lineCnt))
-
- colIdx := 0
- lines := 1
- buf.WriteString("\033[J")
- for idx, c := range o.candidate {
- inSelect := idx == o.candidateChoise && o.IsInCompleteSelectMode()
- if inSelect {
- buf.WriteString("\033[30;47m")
- }
- buf.WriteString(string(same))
- buf.WriteString(string(c))
- buf.Write(bytes.Repeat([]byte(" "), colWidth-runes.WidthAll(c)-runes.WidthAll(same)))
-
- if inSelect {
- buf.WriteString("\033[0m")
- }
-
- colIdx++
- if colIdx == colNum {
- buf.WriteString("\n")
- lines++
- colIdx = 0
- }
- }
-
- // move back
- fmt.Fprintf(buf, "\033[%dA\r", lineCnt-1+lines)
- fmt.Fprintf(buf, "\033[%dC", o.op.buf.idx+o.op.buf.PromptLen())
- buf.Flush()
-}
-
-func (o *opCompleter) aggCandidate(candidate [][]rune) int {
- offset := 0
- for i := 0; i < len(candidate[0]); i++ {
- for j := 0; j < len(candidate)-1; j++ {
- if i > len(candidate[j]) {
- goto aggregate
- }
- if candidate[j][i] != candidate[j+1][i] {
- goto aggregate
- }
- }
- offset = i
- }
-aggregate:
- return offset
-}
-
-func (o *opCompleter) EnterCompleteSelectMode() {
- o.inSelectMode = true
- o.candidateChoise = -1
- o.CompleteRefresh()
-}
-
-func (o *opCompleter) EnterCompleteMode(offset int, candidate [][]rune) {
- o.inCompleteMode = true
- o.candidate = candidate
- o.candidateOff = offset
- o.CompleteRefresh()
-}
-
-func (o *opCompleter) ExitCompleteSelectMode() {
- o.inSelectMode = false
- o.candidate = nil
- o.candidateChoise = -1
- o.candidateOff = -1
- o.candidateSource = nil
-}
-
-func (o *opCompleter) ExitCompleteMode(revent bool) {
- o.inCompleteMode = false
- o.ExitCompleteSelectMode()
-}
diff --git a/vendor/github.com/chzyer/readline/complete_helper.go b/vendor/github.com/chzyer/readline/complete_helper.go
deleted file mode 100644
index 58d72487..00000000
--- a/vendor/github.com/chzyer/readline/complete_helper.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package readline
-
-import (
- "bytes"
- "strings"
-)
-
-// Caller type for dynamic completion
-type DynamicCompleteFunc func(string) []string
-
-type PrefixCompleterInterface interface {
- Print(prefix string, level int, buf *bytes.Buffer)
- Do(line []rune, pos int) (newLine [][]rune, length int)
- GetName() []rune
- GetChildren() []PrefixCompleterInterface
- SetChildren(children []PrefixCompleterInterface)
-}
-
-type DynamicPrefixCompleterInterface interface {
- PrefixCompleterInterface
- IsDynamic() bool
- GetDynamicNames(line []rune) [][]rune
-}
-
-type PrefixCompleter struct {
- Name []rune
- Dynamic bool
- Callback DynamicCompleteFunc
- Children []PrefixCompleterInterface
-}
-
-func (p *PrefixCompleter) Tree(prefix string) string {
- buf := bytes.NewBuffer(nil)
- p.Print(prefix, 0, buf)
- return buf.String()
-}
-
-func Print(p PrefixCompleterInterface, prefix string, level int, buf *bytes.Buffer) {
- if strings.TrimSpace(string(p.GetName())) != "" {
- buf.WriteString(prefix)
- if level > 0 {
- buf.WriteString("├")
- buf.WriteString(strings.Repeat("─", (level*4)-2))
- buf.WriteString(" ")
- }
- buf.WriteString(string(p.GetName()) + "\n")
- level++
- }
- for _, ch := range p.GetChildren() {
- ch.Print(prefix, level, buf)
- }
-}
-
-func (p *PrefixCompleter) Print(prefix string, level int, buf *bytes.Buffer) {
- Print(p, prefix, level, buf)
-}
-
-func (p *PrefixCompleter) IsDynamic() bool {
- return p.Dynamic
-}
-
-func (p *PrefixCompleter) GetName() []rune {
- return p.Name
-}
-
-func (p *PrefixCompleter) GetDynamicNames(line []rune) [][]rune {
- var names = [][]rune{}
- for _, name := range p.Callback(string(line)) {
- names = append(names, []rune(name+" "))
- }
- return names
-}
-
-func (p *PrefixCompleter) GetChildren() []PrefixCompleterInterface {
- return p.Children
-}
-
-func (p *PrefixCompleter) SetChildren(children []PrefixCompleterInterface) {
- p.Children = children
-}
-
-func NewPrefixCompleter(pc ...PrefixCompleterInterface) *PrefixCompleter {
- return PcItem("", pc...)
-}
-
-func PcItem(name string, pc ...PrefixCompleterInterface) *PrefixCompleter {
- name += " "
- return &PrefixCompleter{
- Name: []rune(name),
- Dynamic: false,
- Children: pc,
- }
-}
-
-func PcItemDynamic(callback DynamicCompleteFunc, pc ...PrefixCompleterInterface) *PrefixCompleter {
- return &PrefixCompleter{
- Callback: callback,
- Dynamic: true,
- Children: pc,
- }
-}
-
-func (p *PrefixCompleter) Do(line []rune, pos int) (newLine [][]rune, offset int) {
- return doInternal(p, line, pos, line)
-}
-
-func Do(p PrefixCompleterInterface, line []rune, pos int) (newLine [][]rune, offset int) {
- return doInternal(p, line, pos, line)
-}
-
-func doInternal(p PrefixCompleterInterface, line []rune, pos int, origLine []rune) (newLine [][]rune, offset int) {
- line = runes.TrimSpaceLeft(line[:pos])
- goNext := false
- var lineCompleter PrefixCompleterInterface
- for _, child := range p.GetChildren() {
- childNames := make([][]rune, 1)
-
- childDynamic, ok := child.(DynamicPrefixCompleterInterface)
- if ok && childDynamic.IsDynamic() {
- childNames = childDynamic.GetDynamicNames(origLine)
- } else {
- childNames[0] = child.GetName()
- }
-
- for _, childName := range childNames {
- if len(line) >= len(childName) {
- if runes.HasPrefix(line, childName) {
- if len(line) == len(childName) {
- newLine = append(newLine, []rune{' '})
- } else {
- newLine = append(newLine, childName)
- }
- offset = len(childName)
- lineCompleter = child
- goNext = true
- }
- } else {
- if runes.HasPrefix(childName, line) {
- newLine = append(newLine, childName[len(line):])
- offset = len(line)
- lineCompleter = child
- }
- }
- }
- }
-
- if len(newLine) != 1 {
- return
- }
-
- tmpLine := make([]rune, 0, len(line))
- for i := offset; i < len(line); i++ {
- if line[i] == ' ' {
- continue
- }
-
- tmpLine = append(tmpLine, line[i:]...)
- return doInternal(lineCompleter, tmpLine, len(tmpLine), origLine)
- }
-
- if goNext {
- return doInternal(lineCompleter, nil, 0, origLine)
- }
- return
-}
diff --git a/vendor/github.com/chzyer/readline/complete_segment.go b/vendor/github.com/chzyer/readline/complete_segment.go
deleted file mode 100644
index 5ceadd80..00000000
--- a/vendor/github.com/chzyer/readline/complete_segment.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package readline
-
-type SegmentCompleter interface {
- // a
- // |- a1
- // |--- a11
- // |- a2
- // b
- // input:
- // DoTree([], 0) [a, b]
- // DoTree([a], 1) [a]
- // DoTree([a, ], 0) [a1, a2]
- // DoTree([a, a], 1) [a1, a2]
- // DoTree([a, a1], 2) [a1]
- // DoTree([a, a1, ], 0) [a11]
- // DoTree([a, a1, a], 1) [a11]
- DoSegment([][]rune, int) [][]rune
-}
-
-type dumpSegmentCompleter struct {
- f func([][]rune, int) [][]rune
-}
-
-func (d *dumpSegmentCompleter) DoSegment(segment [][]rune, n int) [][]rune {
- return d.f(segment, n)
-}
-
-func SegmentFunc(f func([][]rune, int) [][]rune) AutoCompleter {
- return &SegmentComplete{&dumpSegmentCompleter{f}}
-}
-
-func SegmentAutoComplete(completer SegmentCompleter) *SegmentComplete {
- return &SegmentComplete{
- SegmentCompleter: completer,
- }
-}
-
-type SegmentComplete struct {
- SegmentCompleter
-}
-
-func RetSegment(segments [][]rune, cands [][]rune, idx int) ([][]rune, int) {
- ret := make([][]rune, 0, len(cands))
- lastSegment := segments[len(segments)-1]
- for _, cand := range cands {
- if !runes.HasPrefix(cand, lastSegment) {
- continue
- }
- ret = append(ret, cand[len(lastSegment):])
- }
- return ret, idx
-}
-
-func SplitSegment(line []rune, pos int) ([][]rune, int) {
- segs := [][]rune{}
- lastIdx := -1
- line = line[:pos]
- pos = 0
- for idx, l := range line {
- if l == ' ' {
- pos = 0
- segs = append(segs, line[lastIdx+1:idx])
- lastIdx = idx
- } else {
- pos++
- }
- }
- segs = append(segs, line[lastIdx+1:])
- return segs, pos
-}
-
-func (c *SegmentComplete) Do(line []rune, pos int) (newLine [][]rune, offset int) {
-
- segment, idx := SplitSegment(line, pos)
-
- cands := c.DoSegment(segment, idx)
- newLine, offset = RetSegment(segment, cands, idx)
- for idx := range newLine {
- newLine[idx] = append(newLine[idx], ' ')
- }
- return newLine, offset
-}
diff --git a/vendor/github.com/chzyer/readline/history.go b/vendor/github.com/chzyer/readline/history.go
deleted file mode 100644
index 6b17c464..00000000
--- a/vendor/github.com/chzyer/readline/history.go
+++ /dev/null
@@ -1,330 +0,0 @@
-package readline
-
-import (
- "bufio"
- "container/list"
- "fmt"
- "os"
- "strings"
- "sync"
-)
-
-type hisItem struct {
- Source []rune
- Version int64
- Tmp []rune
-}
-
-func (h *hisItem) Clean() {
- h.Source = nil
- h.Tmp = nil
-}
-
-type opHistory struct {
- cfg *Config
- history *list.List
- historyVer int64
- current *list.Element
- fd *os.File
- fdLock sync.Mutex
- enable bool
-}
-
-func newOpHistory(cfg *Config) (o *opHistory) {
- o = &opHistory{
- cfg: cfg,
- history: list.New(),
- enable: true,
- }
- return o
-}
-
-func (o *opHistory) Reset() {
- o.history = list.New()
- o.current = nil
-}
-
-func (o *opHistory) IsHistoryClosed() bool {
- o.fdLock.Lock()
- defer o.fdLock.Unlock()
- return o.fd.Fd() == ^(uintptr(0))
-}
-
-func (o *opHistory) Init() {
- if o.IsHistoryClosed() {
- o.initHistory()
- }
-}
-
-func (o *opHistory) initHistory() {
- if o.cfg.HistoryFile != "" {
- o.historyUpdatePath(o.cfg.HistoryFile)
- }
-}
-
-// only called by newOpHistory
-func (o *opHistory) historyUpdatePath(path string) {
- o.fdLock.Lock()
- defer o.fdLock.Unlock()
- f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
- if err != nil {
- return
- }
- o.fd = f
- r := bufio.NewReader(o.fd)
- total := 0
- for ; ; total++ {
- line, err := r.ReadString('\n')
- if err != nil {
- break
- }
- // ignore the empty line
- line = strings.TrimSpace(line)
- if len(line) == 0 {
- continue
- }
- o.Push([]rune(line))
- o.Compact()
- }
- if total > o.cfg.HistoryLimit {
- o.rewriteLocked()
- }
- o.historyVer++
- o.Push(nil)
- return
-}
-
-func (o *opHistory) Compact() {
- for o.history.Len() > o.cfg.HistoryLimit && o.history.Len() > 0 {
- o.history.Remove(o.history.Front())
- }
-}
-
-func (o *opHistory) Rewrite() {
- o.fdLock.Lock()
- defer o.fdLock.Unlock()
- o.rewriteLocked()
-}
-
-func (o *opHistory) rewriteLocked() {
- if o.cfg.HistoryFile == "" {
- return
- }
-
- tmpFile := o.cfg.HistoryFile + ".tmp"
- fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0666)
- if err != nil {
- return
- }
-
- buf := bufio.NewWriter(fd)
- for elem := o.history.Front(); elem != nil; elem = elem.Next() {
- buf.WriteString(string(elem.Value.(*hisItem).Source) + "\n")
- }
- buf.Flush()
-
- // replace history file
- if err = os.Rename(tmpFile, o.cfg.HistoryFile); err != nil {
- fd.Close()
- return
- }
-
- if o.fd != nil {
- o.fd.Close()
- }
- // fd is write only, just satisfy what we need.
- o.fd = fd
-}
-
-func (o *opHistory) Close() {
- o.fdLock.Lock()
- defer o.fdLock.Unlock()
- if o.fd != nil {
- o.fd.Close()
- }
-}
-
-func (o *opHistory) FindBck(isNewSearch bool, rs []rune, start int) (int, *list.Element) {
- for elem := o.current; elem != nil; elem = elem.Prev() {
- item := o.showItem(elem.Value)
- if isNewSearch {
- start += len(rs)
- }
- if elem == o.current {
- if len(item) >= start {
- item = item[:start]
- }
- }
- idx := runes.IndexAllBckEx(item, rs, o.cfg.HistorySearchFold)
- if idx < 0 {
- continue
- }
- return idx, elem
- }
- return -1, nil
-}
-
-func (o *opHistory) FindFwd(isNewSearch bool, rs []rune, start int) (int, *list.Element) {
- for elem := o.current; elem != nil; elem = elem.Next() {
- item := o.showItem(elem.Value)
- if isNewSearch {
- start -= len(rs)
- if start < 0 {
- start = 0
- }
- }
- if elem == o.current {
- if len(item)-1 >= start {
- item = item[start:]
- } else {
- continue
- }
- }
- idx := runes.IndexAllEx(item, rs, o.cfg.HistorySearchFold)
- if idx < 0 {
- continue
- }
- if elem == o.current {
- idx += start
- }
- return idx, elem
- }
- return -1, nil
-}
-
-func (o *opHistory) showItem(obj interface{}) []rune {
- item := obj.(*hisItem)
- if item.Version == o.historyVer {
- return item.Tmp
- }
- return item.Source
-}
-
-func (o *opHistory) Prev() []rune {
- if o.current == nil {
- return nil
- }
- current := o.current.Prev()
- if current == nil {
- return nil
- }
- o.current = current
- return runes.Copy(o.showItem(current.Value))
-}
-
-func (o *opHistory) Next() ([]rune, bool) {
- if o.current == nil {
- return nil, false
- }
- current := o.current.Next()
- if current == nil {
- return nil, false
- }
-
- o.current = current
- return runes.Copy(o.showItem(current.Value)), true
-}
-
-// Disable the current history
-func (o *opHistory) Disable() {
- o.enable = false
-}
-
-// Enable the current history
-func (o *opHistory) Enable() {
- o.enable = true
-}
-
-func (o *opHistory) debug() {
- Debug("-------")
- for item := o.history.Front(); item != nil; item = item.Next() {
- Debug(fmt.Sprintf("%+v", item.Value))
- }
-}
-
-// save history
-func (o *opHistory) New(current []rune) (err error) {
-
- // history deactivated
- if !o.enable {
- return nil
- }
-
- current = runes.Copy(current)
-
- // if just use last command without modify
- // just clean lastest history
- if back := o.history.Back(); back != nil {
- prev := back.Prev()
- if prev != nil {
- if runes.Equal(current, prev.Value.(*hisItem).Source) {
- o.current = o.history.Back()
- o.current.Value.(*hisItem).Clean()
- o.historyVer++
- return nil
- }
- }
- }
-
- if len(current) == 0 {
- o.current = o.history.Back()
- if o.current != nil {
- o.current.Value.(*hisItem).Clean()
- o.historyVer++
- return nil
- }
- }
-
- if o.current != o.history.Back() {
- // move history item to current command
- currentItem := o.current.Value.(*hisItem)
- // set current to last item
- o.current = o.history.Back()
-
- current = runes.Copy(currentItem.Tmp)
- }
-
- // err only can be a IO error, just report
- err = o.Update(current, true)
-
- // push a new one to commit current command
- o.historyVer++
- o.Push(nil)
- return
-}
-
-func (o *opHistory) Revert() {
- o.historyVer++
- o.current = o.history.Back()
-}
-
-func (o *opHistory) Update(s []rune, commit bool) (err error) {
- o.fdLock.Lock()
- defer o.fdLock.Unlock()
- s = runes.Copy(s)
- if o.current == nil {
- o.Push(s)
- o.Compact()
- return
- }
- r := o.current.Value.(*hisItem)
- r.Version = o.historyVer
- if commit {
- r.Source = s
- if o.fd != nil {
- // just report the error
- _, err = o.fd.Write([]byte(string(r.Source) + "\n"))
- }
- } else {
- r.Tmp = append(r.Tmp[:0], s...)
- }
- o.current.Value = r
- o.Compact()
- return
-}
-
-func (o *opHistory) Push(s []rune) {
- s = runes.Copy(s)
- elem := o.history.PushBack(&hisItem{Source: s})
- o.current = elem
-}
diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go
deleted file mode 100644
index 4c31624f..00000000
--- a/vendor/github.com/chzyer/readline/operation.go
+++ /dev/null
@@ -1,531 +0,0 @@
-package readline
-
-import (
- "errors"
- "io"
- "sync"
-)
-
-var (
- ErrInterrupt = errors.New("Interrupt")
-)
-
-type InterruptError struct {
- Line []rune
-}
-
-func (*InterruptError) Error() string {
- return "Interrupted"
-}
-
-type Operation struct {
- m sync.Mutex
- cfg *Config
- t *Terminal
- buf *RuneBuffer
- outchan chan []rune
- errchan chan error
- w io.Writer
-
- history *opHistory
- *opSearch
- *opCompleter
- *opPassword
- *opVim
-}
-
-func (o *Operation) SetBuffer(what string) {
- o.buf.Set([]rune(what))
-}
-
-type wrapWriter struct {
- r *Operation
- t *Terminal
- target io.Writer
-}
-
-func (w *wrapWriter) Write(b []byte) (int, error) {
- if !w.t.IsReading() {
- return w.target.Write(b)
- }
-
- var (
- n int
- err error
- )
- w.r.buf.Refresh(func() {
- n, err = w.target.Write(b)
- })
-
- if w.r.IsSearchMode() {
- w.r.SearchRefresh(-1)
- }
- if w.r.IsInCompleteMode() {
- w.r.CompleteRefresh()
- }
- return n, err
-}
-
-func NewOperation(t *Terminal, cfg *Config) *Operation {
- width := cfg.FuncGetWidth()
- op := &Operation{
- t: t,
- buf: NewRuneBuffer(t, cfg.Prompt, cfg, width),
- outchan: make(chan []rune),
- errchan: make(chan error, 1),
- }
- op.w = op.buf.w
- op.SetConfig(cfg)
- op.opVim = newVimMode(op)
- op.opCompleter = newOpCompleter(op.buf.w, op, width)
- op.opPassword = newOpPassword(op)
- op.cfg.FuncOnWidthChanged(func() {
- newWidth := cfg.FuncGetWidth()
- op.opCompleter.OnWidthChange(newWidth)
- op.opSearch.OnWidthChange(newWidth)
- op.buf.OnWidthChange(newWidth)
- })
- go op.ioloop()
- return op
-}
-
-func (o *Operation) SetPrompt(s string) {
- o.buf.SetPrompt(s)
-}
-
-func (o *Operation) SetMaskRune(r rune) {
- o.buf.SetMask(r)
-}
-
-func (o *Operation) GetConfig() *Config {
- o.m.Lock()
- cfg := *o.cfg
- o.m.Unlock()
- return &cfg
-}
-
-func (o *Operation) ioloop() {
- for {
- keepInSearchMode := false
- keepInCompleteMode := false
- r := o.t.ReadRune()
- if o.GetConfig().FuncFilterInputRune != nil {
- var process bool
- r, process = o.GetConfig().FuncFilterInputRune(r)
- if !process {
- o.buf.Refresh(nil) // to refresh the line
- continue // ignore this rune
- }
- }
-
- if r == 0 { // io.EOF
- if o.buf.Len() == 0 {
- o.buf.Clean()
- select {
- case o.errchan <- io.EOF:
- }
- break
- } else {
- // if stdin got io.EOF and there is something left in buffer,
- // let's flush them by sending CharEnter.
- // And we will got io.EOF int next loop.
- r = CharEnter
- }
- }
- isUpdateHistory := true
-
- if o.IsInCompleteSelectMode() {
- keepInCompleteMode = o.HandleCompleteSelect(r)
- if keepInCompleteMode {
- continue
- }
-
- o.buf.Refresh(nil)
- switch r {
- case CharEnter, CharCtrlJ:
- o.history.Update(o.buf.Runes(), false)
- fallthrough
- case CharInterrupt:
- o.t.KickRead()
- fallthrough
- case CharBell:
- continue
- }
- }
-
- if o.IsEnableVimMode() {
- r = o.HandleVim(r, o.t.ReadRune)
- if r == 0 {
- continue
- }
- }
-
- switch r {
- case CharBell:
- if o.IsSearchMode() {
- o.ExitSearchMode(true)
- o.buf.Refresh(nil)
- }
- if o.IsInCompleteMode() {
- o.ExitCompleteMode(true)
- o.buf.Refresh(nil)
- }
- case CharTab:
- if o.GetConfig().AutoComplete == nil {
- o.t.Bell()
- break
- }
- if o.OnComplete() {
- keepInCompleteMode = true
- } else {
- o.t.Bell()
- break
- }
-
- case CharBckSearch:
- if !o.SearchMode(S_DIR_BCK) {
- o.t.Bell()
- break
- }
- keepInSearchMode = true
- case CharCtrlU:
- o.buf.KillFront()
- case CharFwdSearch:
- if !o.SearchMode(S_DIR_FWD) {
- o.t.Bell()
- break
- }
- keepInSearchMode = true
- case CharKill:
- o.buf.Kill()
- keepInCompleteMode = true
- case MetaForward:
- o.buf.MoveToNextWord()
- case CharTranspose:
- o.buf.Transpose()
- case MetaBackward:
- o.buf.MoveToPrevWord()
- case MetaDelete:
- o.buf.DeleteWord()
- case CharLineStart:
- o.buf.MoveToLineStart()
- case CharLineEnd:
- o.buf.MoveToLineEnd()
- case CharBackspace, CharCtrlH:
- if o.IsSearchMode() {
- o.SearchBackspace()
- keepInSearchMode = true
- break
- }
-
- if o.buf.Len() == 0 {
- o.t.Bell()
- break
- }
- o.buf.Backspace()
- if o.IsInCompleteMode() {
- o.OnComplete()
- }
- case CharCtrlZ:
- o.buf.Clean()
- o.t.SleepToResume()
- o.Refresh()
- case CharCtrlL:
- ClearScreen(o.w)
- o.Refresh()
- case MetaBackspace, CharCtrlW:
- o.buf.BackEscapeWord()
- case CharCtrlY:
- o.buf.Yank()
- case CharEnter, CharCtrlJ:
- if o.IsSearchMode() {
- o.ExitSearchMode(false)
- }
- o.buf.MoveToLineEnd()
- var data []rune
- if !o.GetConfig().UniqueEditLine {
- o.buf.WriteRune('\n')
- data = o.buf.Reset()
- data = data[:len(data)-1] // trim \n
- } else {
- o.buf.Clean()
- data = o.buf.Reset()
- }
- o.outchan <- data
- if !o.GetConfig().DisableAutoSaveHistory {
- // ignore IO error
- _ = o.history.New(data)
- } else {
- isUpdateHistory = false
- }
- case CharBackward:
- o.buf.MoveBackward()
- case CharForward:
- o.buf.MoveForward()
- case CharPrev:
- buf := o.history.Prev()
- if buf != nil {
- o.buf.Set(buf)
- } else {
- o.t.Bell()
- }
- case CharNext:
- buf, ok := o.history.Next()
- if ok {
- o.buf.Set(buf)
- } else {
- o.t.Bell()
- }
- case CharDelete:
- if o.buf.Len() > 0 || !o.IsNormalMode() {
- o.t.KickRead()
- if !o.buf.Delete() {
- o.t.Bell()
- }
- break
- }
-
- // treat as EOF
- if !o.GetConfig().UniqueEditLine {
- o.buf.WriteString(o.GetConfig().EOFPrompt + "\n")
- }
- o.buf.Reset()
- isUpdateHistory = false
- o.history.Revert()
- o.errchan <- io.EOF
- if o.GetConfig().UniqueEditLine {
- o.buf.Clean()
- }
- case CharInterrupt:
- if o.IsSearchMode() {
- o.t.KickRead()
- o.ExitSearchMode(true)
- break
- }
- if o.IsInCompleteMode() {
- o.t.KickRead()
- o.ExitCompleteMode(true)
- o.buf.Refresh(nil)
- break
- }
- o.buf.MoveToLineEnd()
- o.buf.Refresh(nil)
- hint := o.GetConfig().InterruptPrompt + "\n"
- if !o.GetConfig().UniqueEditLine {
- o.buf.WriteString(hint)
- }
- remain := o.buf.Reset()
- if !o.GetConfig().UniqueEditLine {
- remain = remain[:len(remain)-len([]rune(hint))]
- }
- isUpdateHistory = false
- o.history.Revert()
- o.errchan <- &InterruptError{remain}
- default:
- if o.IsSearchMode() {
- o.SearchChar(r)
- keepInSearchMode = true
- break
- }
- o.buf.WriteRune(r)
- if o.IsInCompleteMode() {
- o.OnComplete()
- keepInCompleteMode = true
- }
- }
-
- listener := o.GetConfig().Listener
- if listener != nil {
- newLine, newPos, ok := listener.OnChange(o.buf.Runes(), o.buf.Pos(), r)
- if ok {
- o.buf.SetWithIdx(newPos, newLine)
- }
- }
-
- o.m.Lock()
- if !keepInSearchMode && o.IsSearchMode() {
- o.ExitSearchMode(false)
- o.buf.Refresh(nil)
- } else if o.IsInCompleteMode() {
- if !keepInCompleteMode {
- o.ExitCompleteMode(false)
- o.Refresh()
- } else {
- o.buf.Refresh(nil)
- o.CompleteRefresh()
- }
- }
- if isUpdateHistory && !o.IsSearchMode() {
- // it will cause null history
- o.history.Update(o.buf.Runes(), false)
- }
- o.m.Unlock()
- }
-}
-
-func (o *Operation) Stderr() io.Writer {
- return &wrapWriter{target: o.GetConfig().Stderr, r: o, t: o.t}
-}
-
-func (o *Operation) Stdout() io.Writer {
- return &wrapWriter{target: o.GetConfig().Stdout, r: o, t: o.t}
-}
-
-func (o *Operation) String() (string, error) {
- r, err := o.Runes()
- return string(r), err
-}
-
-func (o *Operation) Runes() ([]rune, error) {
- o.t.EnterRawMode()
- defer o.t.ExitRawMode()
-
- listener := o.GetConfig().Listener
- if listener != nil {
- listener.OnChange(nil, 0, 0)
- }
-
- o.buf.Refresh(nil) // print prompt
- o.t.KickRead()
- select {
- case r := <-o.outchan:
- return r, nil
- case err := <-o.errchan:
- if e, ok := err.(*InterruptError); ok {
- return e.Line, ErrInterrupt
- }
- return nil, err
- }
-}
-
-func (o *Operation) PasswordEx(prompt string, l Listener) ([]byte, error) {
- cfg := o.GenPasswordConfig()
- cfg.Prompt = prompt
- cfg.Listener = l
- return o.PasswordWithConfig(cfg)
-}
-
-func (o *Operation) GenPasswordConfig() *Config {
- return o.opPassword.PasswordConfig()
-}
-
-func (o *Operation) PasswordWithConfig(cfg *Config) ([]byte, error) {
- if err := o.opPassword.EnterPasswordMode(cfg); err != nil {
- return nil, err
- }
- defer o.opPassword.ExitPasswordMode()
- return o.Slice()
-}
-
-func (o *Operation) Password(prompt string) ([]byte, error) {
- return o.PasswordEx(prompt, nil)
-}
-
-func (o *Operation) SetTitle(t string) {
- o.w.Write([]byte("\033[2;" + t + "\007"))
-}
-
-func (o *Operation) Slice() ([]byte, error) {
- r, err := o.Runes()
- if err != nil {
- return nil, err
- }
- return []byte(string(r)), nil
-}
-
-func (o *Operation) Close() {
- o.history.Close()
-}
-
-func (o *Operation) SetHistoryPath(path string) {
- if o.history != nil {
- o.history.Close()
- }
- o.cfg.HistoryFile = path
- o.history = newOpHistory(o.cfg)
-}
-
-func (o *Operation) IsNormalMode() bool {
- return !o.IsInCompleteMode() && !o.IsSearchMode()
-}
-
-func (op *Operation) SetConfig(cfg *Config) (*Config, error) {
- op.m.Lock()
- defer op.m.Unlock()
- if op.cfg == cfg {
- return op.cfg, nil
- }
- if err := cfg.Init(); err != nil {
- return op.cfg, err
- }
- old := op.cfg
- op.cfg = cfg
- op.SetPrompt(cfg.Prompt)
- op.SetMaskRune(cfg.MaskRune)
- op.buf.SetConfig(cfg)
- width := op.cfg.FuncGetWidth()
-
- if cfg.opHistory == nil {
- op.SetHistoryPath(cfg.HistoryFile)
- cfg.opHistory = op.history
- cfg.opSearch = newOpSearch(op.buf.w, op.buf, op.history, cfg, width)
- }
- op.history = cfg.opHistory
-
- // SetHistoryPath will close opHistory which already exists
- // so if we use it next time, we need to reopen it by `InitHistory()`
- op.history.Init()
-
- if op.cfg.AutoComplete != nil {
- op.opCompleter = newOpCompleter(op.buf.w, op, width)
- }
-
- op.opSearch = cfg.opSearch
- return old, nil
-}
-
-func (o *Operation) ResetHistory() {
- o.history.Reset()
-}
-
-// if err is not nil, it just mean it fail to write to file
-// other things goes fine.
-func (o *Operation) SaveHistory(content string) error {
- return o.history.New([]rune(content))
-}
-
-func (o *Operation) Refresh() {
- if o.t.IsReading() {
- o.buf.Refresh(nil)
- }
-}
-
-func (o *Operation) Clean() {
- o.buf.Clean()
-}
-
-func FuncListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) Listener {
- return &DumpListener{f: f}
-}
-
-type DumpListener struct {
- f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)
-}
-
-func (d *DumpListener) OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) {
- return d.f(line, pos, key)
-}
-
-type Listener interface {
- OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)
-}
-
-type Painter interface {
- Paint(line []rune, pos int) []rune
-}
-
-type defaultPainter struct{}
-
-func (p *defaultPainter) Paint(line []rune, _ int) []rune {
- return line
-}
diff --git a/vendor/github.com/chzyer/readline/password.go b/vendor/github.com/chzyer/readline/password.go
deleted file mode 100644
index 414288c2..00000000
--- a/vendor/github.com/chzyer/readline/password.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package readline
-
-type opPassword struct {
- o *Operation
- backupCfg *Config
-}
-
-func newOpPassword(o *Operation) *opPassword {
- return &opPassword{o: o}
-}
-
-func (o *opPassword) ExitPasswordMode() {
- o.o.SetConfig(o.backupCfg)
- o.backupCfg = nil
-}
-
-func (o *opPassword) EnterPasswordMode(cfg *Config) (err error) {
- o.backupCfg, err = o.o.SetConfig(cfg)
- return
-}
-
-func (o *opPassword) PasswordConfig() *Config {
- return &Config{
- EnableMask: true,
- InterruptPrompt: "\n",
- EOFPrompt: "\n",
- HistoryLimit: -1,
- Painter: &defaultPainter{},
-
- Stdout: o.o.cfg.Stdout,
- Stderr: o.o.cfg.Stderr,
- }
-}
diff --git a/vendor/github.com/chzyer/readline/rawreader_windows.go b/vendor/github.com/chzyer/readline/rawreader_windows.go
deleted file mode 100644
index 073ef150..00000000
--- a/vendor/github.com/chzyer/readline/rawreader_windows.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// +build windows
-
-package readline
-
-import "unsafe"
-
-const (
- VK_CANCEL = 0x03
- VK_BACK = 0x08
- VK_TAB = 0x09
- VK_RETURN = 0x0D
- VK_SHIFT = 0x10
- VK_CONTROL = 0x11
- VK_MENU = 0x12
- VK_ESCAPE = 0x1B
- VK_LEFT = 0x25
- VK_UP = 0x26
- VK_RIGHT = 0x27
- VK_DOWN = 0x28
- VK_DELETE = 0x2E
- VK_LSHIFT = 0xA0
- VK_RSHIFT = 0xA1
- VK_LCONTROL = 0xA2
- VK_RCONTROL = 0xA3
-)
-
-// RawReader translate input record to ANSI escape sequence.
-// To provides same behavior as unix terminal.
-type RawReader struct {
- ctrlKey bool
- altKey bool
-}
-
-func NewRawReader() *RawReader {
- r := new(RawReader)
- return r
-}
-
-// only process one action in one read
-func (r *RawReader) Read(buf []byte) (int, error) {
- ir := new(_INPUT_RECORD)
- var read int
- var err error
-next:
- err = kernel.ReadConsoleInputW(stdin,
- uintptr(unsafe.Pointer(ir)),
- 1,
- uintptr(unsafe.Pointer(&read)),
- )
- if err != nil {
- return 0, err
- }
- if ir.EventType != EVENT_KEY {
- goto next
- }
- ker := (*_KEY_EVENT_RECORD)(unsafe.Pointer(&ir.Event[0]))
- if ker.bKeyDown == 0 { // keyup
- if r.ctrlKey || r.altKey {
- switch ker.wVirtualKeyCode {
- case VK_RCONTROL, VK_LCONTROL:
- r.ctrlKey = false
- case VK_MENU: //alt
- r.altKey = false
- }
- }
- goto next
- }
-
- if ker.unicodeChar == 0 {
- var target rune
- switch ker.wVirtualKeyCode {
- case VK_RCONTROL, VK_LCONTROL:
- r.ctrlKey = true
- case VK_MENU: //alt
- r.altKey = true
- case VK_LEFT:
- target = CharBackward
- case VK_RIGHT:
- target = CharForward
- case VK_UP:
- target = CharPrev
- case VK_DOWN:
- target = CharNext
- }
- if target != 0 {
- return r.write(buf, target)
- }
- goto next
- }
- char := rune(ker.unicodeChar)
- if r.ctrlKey {
- switch char {
- case 'A':
- char = CharLineStart
- case 'E':
- char = CharLineEnd
- case 'R':
- char = CharBckSearch
- case 'S':
- char = CharFwdSearch
- }
- } else if r.altKey {
- switch char {
- case VK_BACK:
- char = CharBackspace
- }
- return r.writeEsc(buf, char)
- }
- return r.write(buf, char)
-}
-
-func (r *RawReader) writeEsc(b []byte, char rune) (int, error) {
- b[0] = '\033'
- n := copy(b[1:], []byte(string(char)))
- return n + 1, nil
-}
-
-func (r *RawReader) write(b []byte, char rune) (int, error) {
- n := copy(b, []byte(string(char)))
- return n, nil
-}
-
-func (r *RawReader) Close() error {
- return nil
-}
diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go
deleted file mode 100644
index 0e7aca06..00000000
--- a/vendor/github.com/chzyer/readline/readline.go
+++ /dev/null
@@ -1,326 +0,0 @@
-// Readline is a pure go implementation for GNU-Readline kind library.
-//
-// example:
-// rl, err := readline.New("> ")
-// if err != nil {
-// panic(err)
-// }
-// defer rl.Close()
-//
-// for {
-// line, err := rl.Readline()
-// if err != nil { // io.EOF
-// break
-// }
-// println(line)
-// }
-//
-package readline
-
-import "io"
-
-type Instance struct {
- Config *Config
- Terminal *Terminal
- Operation *Operation
-}
-
-type Config struct {
- // prompt supports ANSI escape sequence, so we can color some characters even in windows
- Prompt string
-
- // readline will persist historys to file where HistoryFile specified
- HistoryFile string
- // specify the max length of historys, it's 500 by default, set it to -1 to disable history
- HistoryLimit int
- DisableAutoSaveHistory bool
- // enable case-insensitive history searching
- HistorySearchFold bool
-
- // AutoCompleter will called once user press TAB
- AutoComplete AutoCompleter
-
- // Any key press will pass to Listener
- // NOTE: Listener will be triggered by (nil, 0, 0) immediately
- Listener Listener
-
- Painter Painter
-
- // If VimMode is true, readline will in vim.insert mode by default
- VimMode bool
-
- InterruptPrompt string
- EOFPrompt string
-
- FuncGetWidth func() int
-
- Stdin io.ReadCloser
- StdinWriter io.Writer
- Stdout io.Writer
- Stderr io.Writer
-
- EnableMask bool
- MaskRune rune
-
- // erase the editing line after user submited it
- // it use in IM usually.
- UniqueEditLine bool
-
- // filter input runes (may be used to disable CtrlZ or for translating some keys to different actions)
- // -> output = new (translated) rune and true/false if continue with processing this one
- FuncFilterInputRune func(rune) (rune, bool)
-
- // force use interactive even stdout is not a tty
- FuncIsTerminal func() bool
- FuncMakeRaw func() error
- FuncExitRaw func() error
- FuncOnWidthChanged func(func())
- ForceUseInteractive bool
-
- // private fields
- inited bool
- opHistory *opHistory
- opSearch *opSearch
-}
-
-func (c *Config) useInteractive() bool {
- if c.ForceUseInteractive {
- return true
- }
- return c.FuncIsTerminal()
-}
-
-func (c *Config) Init() error {
- if c.inited {
- return nil
- }
- c.inited = true
- if c.Stdin == nil {
- c.Stdin = NewCancelableStdin(Stdin)
- }
-
- c.Stdin, c.StdinWriter = NewFillableStdin(c.Stdin)
-
- if c.Stdout == nil {
- c.Stdout = Stdout
- }
- if c.Stderr == nil {
- c.Stderr = Stderr
- }
- if c.HistoryLimit == 0 {
- c.HistoryLimit = 500
- }
-
- if c.InterruptPrompt == "" {
- c.InterruptPrompt = "^C"
- } else if c.InterruptPrompt == "\n" {
- c.InterruptPrompt = ""
- }
- if c.EOFPrompt == "" {
- c.EOFPrompt = "^D"
- } else if c.EOFPrompt == "\n" {
- c.EOFPrompt = ""
- }
-
- if c.AutoComplete == nil {
- c.AutoComplete = &TabCompleter{}
- }
- if c.FuncGetWidth == nil {
- c.FuncGetWidth = GetScreenWidth
- }
- if c.FuncIsTerminal == nil {
- c.FuncIsTerminal = DefaultIsTerminal
- }
- rm := new(RawMode)
- if c.FuncMakeRaw == nil {
- c.FuncMakeRaw = rm.Enter
- }
- if c.FuncExitRaw == nil {
- c.FuncExitRaw = rm.Exit
- }
- if c.FuncOnWidthChanged == nil {
- c.FuncOnWidthChanged = DefaultOnWidthChanged
- }
-
- return nil
-}
-
-func (c Config) Clone() *Config {
- c.opHistory = nil
- c.opSearch = nil
- return &c
-}
-
-func (c *Config) SetListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) {
- c.Listener = FuncListener(f)
-}
-
-func (c *Config) SetPainter(p Painter) {
- c.Painter = p
-}
-
-func NewEx(cfg *Config) (*Instance, error) {
- t, err := NewTerminal(cfg)
- if err != nil {
- return nil, err
- }
- rl := t.Readline()
- if cfg.Painter == nil {
- cfg.Painter = &defaultPainter{}
- }
- return &Instance{
- Config: cfg,
- Terminal: t,
- Operation: rl,
- }, nil
-}
-
-func New(prompt string) (*Instance, error) {
- return NewEx(&Config{Prompt: prompt})
-}
-
-func (i *Instance) ResetHistory() {
- i.Operation.ResetHistory()
-}
-
-func (i *Instance) SetPrompt(s string) {
- i.Operation.SetPrompt(s)
-}
-
-func (i *Instance) SetMaskRune(r rune) {
- i.Operation.SetMaskRune(r)
-}
-
-// change history persistence in runtime
-func (i *Instance) SetHistoryPath(p string) {
- i.Operation.SetHistoryPath(p)
-}
-
-// readline will refresh automatic when write through Stdout()
-func (i *Instance) Stdout() io.Writer {
- return i.Operation.Stdout()
-}
-
-// readline will refresh automatic when write through Stdout()
-func (i *Instance) Stderr() io.Writer {
- return i.Operation.Stderr()
-}
-
-// switch VimMode in runtime
-func (i *Instance) SetVimMode(on bool) {
- i.Operation.SetVimMode(on)
-}
-
-func (i *Instance) IsVimMode() bool {
- return i.Operation.IsEnableVimMode()
-}
-
-func (i *Instance) GenPasswordConfig() *Config {
- return i.Operation.GenPasswordConfig()
-}
-
-// we can generate a config by `i.GenPasswordConfig()`
-func (i *Instance) ReadPasswordWithConfig(cfg *Config) ([]byte, error) {
- return i.Operation.PasswordWithConfig(cfg)
-}
-
-func (i *Instance) ReadPasswordEx(prompt string, l Listener) ([]byte, error) {
- return i.Operation.PasswordEx(prompt, l)
-}
-
-func (i *Instance) ReadPassword(prompt string) ([]byte, error) {
- return i.Operation.Password(prompt)
-}
-
-type Result struct {
- Line string
- Error error
-}
-
-func (l *Result) CanContinue() bool {
- return len(l.Line) != 0 && l.Error == ErrInterrupt
-}
-
-func (l *Result) CanBreak() bool {
- return !l.CanContinue() && l.Error != nil
-}
-
-func (i *Instance) Line() *Result {
- ret, err := i.Readline()
- return &Result{ret, err}
-}
-
-// err is one of (nil, io.EOF, readline.ErrInterrupt)
-func (i *Instance) Readline() (string, error) {
- return i.Operation.String()
-}
-
-func (i *Instance) ReadlineWithDefault(what string) (string, error) {
- i.Operation.SetBuffer(what)
- return i.Operation.String()
-}
-
-func (i *Instance) SaveHistory(content string) error {
- return i.Operation.SaveHistory(content)
-}
-
-// same as readline
-func (i *Instance) ReadSlice() ([]byte, error) {
- return i.Operation.Slice()
-}
-
-// we must make sure that call Close() before process exit.
-func (i *Instance) Close() error {
- if err := i.Terminal.Close(); err != nil {
- return err
- }
- i.Config.Stdin.Close()
- i.Operation.Close()
- return nil
-}
-func (i *Instance) Clean() {
- i.Operation.Clean()
-}
-
-func (i *Instance) Write(b []byte) (int, error) {
- return i.Stdout().Write(b)
-}
-
-// WriteStdin prefill the next Stdin fetch
-// Next time you call ReadLine() this value will be writen before the user input
-// ie :
-// i := readline.New()
-// i.WriteStdin([]byte("test"))
-// _, _= i.Readline()
-//
-// gives
-//
-// > test[cursor]
-func (i *Instance) WriteStdin(val []byte) (int, error) {
- return i.Terminal.WriteStdin(val)
-}
-
-func (i *Instance) SetConfig(cfg *Config) *Config {
- if i.Config == cfg {
- return cfg
- }
- old := i.Config
- i.Config = cfg
- i.Operation.SetConfig(cfg)
- i.Terminal.SetConfig(cfg)
- return old
-}
-
-func (i *Instance) Refresh() {
- i.Operation.Refresh()
-}
-
-// HistoryDisable the save of the commands into the history
-func (i *Instance) HistoryDisable() {
- i.Operation.history.Disable()
-}
-
-// HistoryEnable the save of the commands into the history (default on)
-func (i *Instance) HistoryEnable() {
- i.Operation.history.Enable()
-}
diff --git a/vendor/github.com/chzyer/readline/remote.go b/vendor/github.com/chzyer/readline/remote.go
deleted file mode 100644
index 74dbf569..00000000
--- a/vendor/github.com/chzyer/readline/remote.go
+++ /dev/null
@@ -1,475 +0,0 @@
-package readline
-
-import (
- "bufio"
- "bytes"
- "encoding/binary"
- "fmt"
- "io"
- "net"
- "os"
- "sync"
- "sync/atomic"
-)
-
-type MsgType int16
-
-const (
- T_DATA = MsgType(iota)
- T_WIDTH
- T_WIDTH_REPORT
- T_ISTTY_REPORT
- T_RAW
- T_ERAW // exit raw
- T_EOF
-)
-
-type RemoteSvr struct {
- eof int32
- closed int32
- width int32
- reciveChan chan struct{}
- writeChan chan *writeCtx
- conn net.Conn
- isTerminal bool
- funcWidthChan func()
- stopChan chan struct{}
-
- dataBufM sync.Mutex
- dataBuf bytes.Buffer
-}
-
-type writeReply struct {
- n int
- err error
-}
-
-type writeCtx struct {
- msg *Message
- reply chan *writeReply
-}
-
-func newWriteCtx(msg *Message) *writeCtx {
- return &writeCtx{
- msg: msg,
- reply: make(chan *writeReply),
- }
-}
-
-func NewRemoteSvr(conn net.Conn) (*RemoteSvr, error) {
- rs := &RemoteSvr{
- width: -1,
- conn: conn,
- writeChan: make(chan *writeCtx),
- reciveChan: make(chan struct{}),
- stopChan: make(chan struct{}),
- }
- buf := bufio.NewReader(rs.conn)
-
- if err := rs.init(buf); err != nil {
- return nil, err
- }
-
- go rs.readLoop(buf)
- go rs.writeLoop()
- return rs, nil
-}
-
-func (r *RemoteSvr) init(buf *bufio.Reader) error {
- m, err := ReadMessage(buf)
- if err != nil {
- return err
- }
- // receive isTerminal
- if m.Type != T_ISTTY_REPORT {
- return fmt.Errorf("unexpected init message")
- }
- r.GotIsTerminal(m.Data)
-
- // receive width
- m, err = ReadMessage(buf)
- if err != nil {
- return err
- }
- if m.Type != T_WIDTH_REPORT {
- return fmt.Errorf("unexpected init message")
- }
- r.GotReportWidth(m.Data)
-
- return nil
-}
-
-func (r *RemoteSvr) HandleConfig(cfg *Config) {
- cfg.Stderr = r
- cfg.Stdout = r
- cfg.Stdin = r
- cfg.FuncExitRaw = r.ExitRawMode
- cfg.FuncIsTerminal = r.IsTerminal
- cfg.FuncMakeRaw = r.EnterRawMode
- cfg.FuncExitRaw = r.ExitRawMode
- cfg.FuncGetWidth = r.GetWidth
- cfg.FuncOnWidthChanged = func(f func()) {
- r.funcWidthChan = f
- }
-}
-
-func (r *RemoteSvr) IsTerminal() bool {
- return r.isTerminal
-}
-
-func (r *RemoteSvr) checkEOF() error {
- if atomic.LoadInt32(&r.eof) == 1 {
- return io.EOF
- }
- return nil
-}
-
-func (r *RemoteSvr) Read(b []byte) (int, error) {
- r.dataBufM.Lock()
- n, err := r.dataBuf.Read(b)
- r.dataBufM.Unlock()
- if n == 0 {
- if err := r.checkEOF(); err != nil {
- return 0, err
- }
- }
-
- if n == 0 && err == io.EOF {
- <-r.reciveChan
- r.dataBufM.Lock()
- n, err = r.dataBuf.Read(b)
- r.dataBufM.Unlock()
- }
- if n == 0 {
- if err := r.checkEOF(); err != nil {
- return 0, err
- }
- }
-
- return n, err
-}
-
-func (r *RemoteSvr) writeMsg(m *Message) error {
- ctx := newWriteCtx(m)
- r.writeChan <- ctx
- reply := <-ctx.reply
- return reply.err
-}
-
-func (r *RemoteSvr) Write(b []byte) (int, error) {
- ctx := newWriteCtx(NewMessage(T_DATA, b))
- r.writeChan <- ctx
- reply := <-ctx.reply
- return reply.n, reply.err
-}
-
-func (r *RemoteSvr) EnterRawMode() error {
- return r.writeMsg(NewMessage(T_RAW, nil))
-}
-
-func (r *RemoteSvr) ExitRawMode() error {
- return r.writeMsg(NewMessage(T_ERAW, nil))
-}
-
-func (r *RemoteSvr) writeLoop() {
- defer r.Close()
-
-loop:
- for {
- select {
- case ctx, ok := <-r.writeChan:
- if !ok {
- break
- }
- n, err := ctx.msg.WriteTo(r.conn)
- ctx.reply <- &writeReply{n, err}
- case <-r.stopChan:
- break loop
- }
- }
-}
-
-func (r *RemoteSvr) Close() error {
- if atomic.CompareAndSwapInt32(&r.closed, 0, 1) {
- close(r.stopChan)
- r.conn.Close()
- }
- return nil
-}
-
-func (r *RemoteSvr) readLoop(buf *bufio.Reader) {
- defer r.Close()
- for {
- m, err := ReadMessage(buf)
- if err != nil {
- break
- }
- switch m.Type {
- case T_EOF:
- atomic.StoreInt32(&r.eof, 1)
- select {
- case r.reciveChan <- struct{}{}:
- default:
- }
- case T_DATA:
- r.dataBufM.Lock()
- r.dataBuf.Write(m.Data)
- r.dataBufM.Unlock()
- select {
- case r.reciveChan <- struct{}{}:
- default:
- }
- case T_WIDTH_REPORT:
- r.GotReportWidth(m.Data)
- case T_ISTTY_REPORT:
- r.GotIsTerminal(m.Data)
- }
- }
-}
-
-func (r *RemoteSvr) GotIsTerminal(data []byte) {
- if binary.BigEndian.Uint16(data) == 0 {
- r.isTerminal = false
- } else {
- r.isTerminal = true
- }
-}
-
-func (r *RemoteSvr) GotReportWidth(data []byte) {
- atomic.StoreInt32(&r.width, int32(binary.BigEndian.Uint16(data)))
- if r.funcWidthChan != nil {
- r.funcWidthChan()
- }
-}
-
-func (r *RemoteSvr) GetWidth() int {
- return int(atomic.LoadInt32(&r.width))
-}
-
-// -----------------------------------------------------------------------------
-
-type Message struct {
- Type MsgType
- Data []byte
-}
-
-func ReadMessage(r io.Reader) (*Message, error) {
- m := new(Message)
- var length int32
- if err := binary.Read(r, binary.BigEndian, &length); err != nil {
- return nil, err
- }
- if err := binary.Read(r, binary.BigEndian, &m.Type); err != nil {
- return nil, err
- }
- m.Data = make([]byte, int(length)-2)
- if _, err := io.ReadFull(r, m.Data); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func NewMessage(t MsgType, data []byte) *Message {
- return &Message{t, data}
-}
-
-func (m *Message) WriteTo(w io.Writer) (int, error) {
- buf := bytes.NewBuffer(make([]byte, 0, len(m.Data)+2+4))
- binary.Write(buf, binary.BigEndian, int32(len(m.Data)+2))
- binary.Write(buf, binary.BigEndian, m.Type)
- buf.Write(m.Data)
- n, err := buf.WriteTo(w)
- return int(n), err
-}
-
-// -----------------------------------------------------------------------------
-
-type RemoteCli struct {
- conn net.Conn
- raw RawMode
- receiveChan chan struct{}
- inited int32
- isTerminal *bool
-
- data bytes.Buffer
- dataM sync.Mutex
-}
-
-func NewRemoteCli(conn net.Conn) (*RemoteCli, error) {
- r := &RemoteCli{
- conn: conn,
- receiveChan: make(chan struct{}),
- }
- return r, nil
-}
-
-func (r *RemoteCli) MarkIsTerminal(is bool) {
- r.isTerminal = &is
-}
-
-func (r *RemoteCli) init() error {
- if !atomic.CompareAndSwapInt32(&r.inited, 0, 1) {
- return nil
- }
-
- if err := r.reportIsTerminal(); err != nil {
- return err
- }
-
- if err := r.reportWidth(); err != nil {
- return err
- }
-
- // register sig for width changed
- DefaultOnWidthChanged(func() {
- r.reportWidth()
- })
- return nil
-}
-
-func (r *RemoteCli) writeMsg(m *Message) error {
- r.dataM.Lock()
- _, err := m.WriteTo(r.conn)
- r.dataM.Unlock()
- return err
-}
-
-func (r *RemoteCli) Write(b []byte) (int, error) {
- m := NewMessage(T_DATA, b)
- r.dataM.Lock()
- _, err := m.WriteTo(r.conn)
- r.dataM.Unlock()
- return len(b), err
-}
-
-func (r *RemoteCli) reportWidth() error {
- screenWidth := GetScreenWidth()
- data := make([]byte, 2)
- binary.BigEndian.PutUint16(data, uint16(screenWidth))
- msg := NewMessage(T_WIDTH_REPORT, data)
-
- if err := r.writeMsg(msg); err != nil {
- return err
- }
- return nil
-}
-
-func (r *RemoteCli) reportIsTerminal() error {
- var isTerminal bool
- if r.isTerminal != nil {
- isTerminal = *r.isTerminal
- } else {
- isTerminal = DefaultIsTerminal()
- }
- data := make([]byte, 2)
- if isTerminal {
- binary.BigEndian.PutUint16(data, 1)
- } else {
- binary.BigEndian.PutUint16(data, 0)
- }
- msg := NewMessage(T_ISTTY_REPORT, data)
- if err := r.writeMsg(msg); err != nil {
- return err
- }
- return nil
-}
-
-func (r *RemoteCli) readLoop() {
- buf := bufio.NewReader(r.conn)
- for {
- msg, err := ReadMessage(buf)
- if err != nil {
- break
- }
- switch msg.Type {
- case T_ERAW:
- r.raw.Exit()
- case T_RAW:
- r.raw.Enter()
- case T_DATA:
- os.Stdout.Write(msg.Data)
- }
- }
-}
-
-func (r *RemoteCli) ServeBy(source io.Reader) error {
- if err := r.init(); err != nil {
- return err
- }
-
- go func() {
- defer r.Close()
- for {
- n, _ := io.Copy(r, source)
- if n == 0 {
- break
- }
- }
- }()
- defer r.raw.Exit()
- r.readLoop()
- return nil
-}
-
-func (r *RemoteCli) Close() {
- r.writeMsg(NewMessage(T_EOF, nil))
-}
-
-func (r *RemoteCli) Serve() error {
- return r.ServeBy(os.Stdin)
-}
-
-func ListenRemote(n, addr string, cfg *Config, h func(*Instance), onListen ...func(net.Listener) error) error {
- ln, err := net.Listen(n, addr)
- if err != nil {
- return err
- }
- if len(onListen) > 0 {
- if err := onListen[0](ln); err != nil {
- return err
- }
- }
- for {
- conn, err := ln.Accept()
- if err != nil {
- break
- }
- go func() {
- defer conn.Close()
- rl, err := HandleConn(*cfg, conn)
- if err != nil {
- return
- }
- h(rl)
- }()
- }
- return nil
-}
-
-func HandleConn(cfg Config, conn net.Conn) (*Instance, error) {
- r, err := NewRemoteSvr(conn)
- if err != nil {
- return nil, err
- }
- r.HandleConfig(&cfg)
-
- rl, err := NewEx(&cfg)
- if err != nil {
- return nil, err
- }
- return rl, nil
-}
-
-func DialRemote(n, addr string) error {
- conn, err := net.Dial(n, addr)
- if err != nil {
- return err
- }
- defer conn.Close()
-
- cli, err := NewRemoteCli(conn)
- if err != nil {
- return err
- }
- return cli.Serve()
-}
diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go
deleted file mode 100644
index 81d2da50..00000000
--- a/vendor/github.com/chzyer/readline/runebuf.go
+++ /dev/null
@@ -1,629 +0,0 @@
-package readline
-
-import (
- "bufio"
- "bytes"
- "io"
- "strconv"
- "strings"
- "sync"
-)
-
-type runeBufferBck struct {
- buf []rune
- idx int
-}
-
-type RuneBuffer struct {
- buf []rune
- idx int
- prompt []rune
- w io.Writer
-
- hadClean bool
- interactive bool
- cfg *Config
-
- width int
-
- bck *runeBufferBck
-
- offset string
-
- lastKill []rune
-
- sync.Mutex
-}
-
-func (r* RuneBuffer) pushKill(text []rune) {
- r.lastKill = append([]rune{}, text...)
-}
-
-func (r *RuneBuffer) OnWidthChange(newWidth int) {
- r.Lock()
- r.width = newWidth
- r.Unlock()
-}
-
-func (r *RuneBuffer) Backup() {
- r.Lock()
- r.bck = &runeBufferBck{r.buf, r.idx}
- r.Unlock()
-}
-
-func (r *RuneBuffer) Restore() {
- r.Refresh(func() {
- if r.bck == nil {
- return
- }
- r.buf = r.bck.buf
- r.idx = r.bck.idx
- })
-}
-
-func NewRuneBuffer(w io.Writer, prompt string, cfg *Config, width int) *RuneBuffer {
- rb := &RuneBuffer{
- w: w,
- interactive: cfg.useInteractive(),
- cfg: cfg,
- width: width,
- }
- rb.SetPrompt(prompt)
- return rb
-}
-
-func (r *RuneBuffer) SetConfig(cfg *Config) {
- r.Lock()
- r.cfg = cfg
- r.interactive = cfg.useInteractive()
- r.Unlock()
-}
-
-func (r *RuneBuffer) SetMask(m rune) {
- r.Lock()
- r.cfg.MaskRune = m
- r.Unlock()
-}
-
-func (r *RuneBuffer) CurrentWidth(x int) int {
- r.Lock()
- defer r.Unlock()
- return runes.WidthAll(r.buf[:x])
-}
-
-func (r *RuneBuffer) PromptLen() int {
- r.Lock()
- width := r.promptLen()
- r.Unlock()
- return width
-}
-
-func (r *RuneBuffer) promptLen() int {
- return runes.WidthAll(runes.ColorFilter(r.prompt))
-}
-
-func (r *RuneBuffer) RuneSlice(i int) []rune {
- r.Lock()
- defer r.Unlock()
-
- if i > 0 {
- rs := make([]rune, i)
- copy(rs, r.buf[r.idx:r.idx+i])
- return rs
- }
- rs := make([]rune, -i)
- copy(rs, r.buf[r.idx+i:r.idx])
- return rs
-}
-
-func (r *RuneBuffer) Runes() []rune {
- r.Lock()
- newr := make([]rune, len(r.buf))
- copy(newr, r.buf)
- r.Unlock()
- return newr
-}
-
-func (r *RuneBuffer) Pos() int {
- r.Lock()
- defer r.Unlock()
- return r.idx
-}
-
-func (r *RuneBuffer) Len() int {
- r.Lock()
- defer r.Unlock()
- return len(r.buf)
-}
-
-func (r *RuneBuffer) MoveToLineStart() {
- r.Refresh(func() {
- if r.idx == 0 {
- return
- }
- r.idx = 0
- })
-}
-
-func (r *RuneBuffer) MoveBackward() {
- r.Refresh(func() {
- if r.idx == 0 {
- return
- }
- r.idx--
- })
-}
-
-func (r *RuneBuffer) WriteString(s string) {
- r.WriteRunes([]rune(s))
-}
-
-func (r *RuneBuffer) WriteRune(s rune) {
- r.WriteRunes([]rune{s})
-}
-
-func (r *RuneBuffer) WriteRunes(s []rune) {
- r.Refresh(func() {
- tail := append(s, r.buf[r.idx:]...)
- r.buf = append(r.buf[:r.idx], tail...)
- r.idx += len(s)
- })
-}
-
-func (r *RuneBuffer) MoveForward() {
- r.Refresh(func() {
- if r.idx == len(r.buf) {
- return
- }
- r.idx++
- })
-}
-
-func (r *RuneBuffer) IsCursorInEnd() bool {
- r.Lock()
- defer r.Unlock()
- return r.idx == len(r.buf)
-}
-
-func (r *RuneBuffer) Replace(ch rune) {
- r.Refresh(func() {
- r.buf[r.idx] = ch
- })
-}
-
-func (r *RuneBuffer) Erase() {
- r.Refresh(func() {
- r.idx = 0
- r.pushKill(r.buf[:])
- r.buf = r.buf[:0]
- })
-}
-
-func (r *RuneBuffer) Delete() (success bool) {
- r.Refresh(func() {
- if r.idx == len(r.buf) {
- return
- }
- r.pushKill(r.buf[r.idx : r.idx+1])
- r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...)
- success = true
- })
- return
-}
-
-func (r *RuneBuffer) DeleteWord() {
- if r.idx == len(r.buf) {
- return
- }
- init := r.idx
- for init < len(r.buf) && IsWordBreak(r.buf[init]) {
- init++
- }
- for i := init + 1; i < len(r.buf); i++ {
- if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
- r.pushKill(r.buf[r.idx:i-1])
- r.Refresh(func() {
- r.buf = append(r.buf[:r.idx], r.buf[i-1:]...)
- })
- return
- }
- }
- r.Kill()
-}
-
-func (r *RuneBuffer) MoveToPrevWord() (success bool) {
- r.Refresh(func() {
- if r.idx == 0 {
- return
- }
-
- for i := r.idx - 1; i > 0; i-- {
- if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
- r.idx = i
- success = true
- return
- }
- }
- r.idx = 0
- success = true
- })
- return
-}
-
-func (r *RuneBuffer) KillFront() {
- r.Refresh(func() {
- if r.idx == 0 {
- return
- }
-
- length := len(r.buf) - r.idx
- r.pushKill(r.buf[:r.idx])
- copy(r.buf[:length], r.buf[r.idx:])
- r.idx = 0
- r.buf = r.buf[:length]
- })
-}
-
-func (r *RuneBuffer) Kill() {
- r.Refresh(func() {
- r.pushKill(r.buf[r.idx:])
- r.buf = r.buf[:r.idx]
- })
-}
-
-func (r *RuneBuffer) Transpose() {
- r.Refresh(func() {
- if len(r.buf) == 1 {
- r.idx++
- }
-
- if len(r.buf) < 2 {
- return
- }
-
- if r.idx == 0 {
- r.idx = 1
- } else if r.idx >= len(r.buf) {
- r.idx = len(r.buf) - 1
- }
- r.buf[r.idx], r.buf[r.idx-1] = r.buf[r.idx-1], r.buf[r.idx]
- r.idx++
- })
-}
-
-func (r *RuneBuffer) MoveToNextWord() {
- r.Refresh(func() {
- for i := r.idx + 1; i < len(r.buf); i++ {
- if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
- r.idx = i
- return
- }
- }
-
- r.idx = len(r.buf)
- })
-}
-
-func (r *RuneBuffer) MoveToEndWord() {
- r.Refresh(func() {
- // already at the end, so do nothing
- if r.idx == len(r.buf) {
- return
- }
- // if we are at the end of a word already, go to next
- if !IsWordBreak(r.buf[r.idx]) && IsWordBreak(r.buf[r.idx+1]) {
- r.idx++
- }
-
- // keep going until at the end of a word
- for i := r.idx + 1; i < len(r.buf); i++ {
- if IsWordBreak(r.buf[i]) && !IsWordBreak(r.buf[i-1]) {
- r.idx = i - 1
- return
- }
- }
- r.idx = len(r.buf)
- })
-}
-
-func (r *RuneBuffer) BackEscapeWord() {
- r.Refresh(func() {
- if r.idx == 0 {
- return
- }
- for i := r.idx - 1; i > 0; i-- {
- if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
- r.pushKill(r.buf[i:r.idx])
- r.buf = append(r.buf[:i], r.buf[r.idx:]...)
- r.idx = i
- return
- }
- }
-
- r.buf = r.buf[:0]
- r.idx = 0
- })
-}
-
-func (r *RuneBuffer) Yank() {
- if len(r.lastKill) == 0 {
- return
- }
- r.Refresh(func() {
- buf := make([]rune, 0, len(r.buf) + len(r.lastKill))
- buf = append(buf, r.buf[:r.idx]...)
- buf = append(buf, r.lastKill...)
- buf = append(buf, r.buf[r.idx:]...)
- r.buf = buf
- r.idx += len(r.lastKill)
- })
-}
-
-func (r *RuneBuffer) Backspace() {
- r.Refresh(func() {
- if r.idx == 0 {
- return
- }
-
- r.idx--
- r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...)
- })
-}
-
-func (r *RuneBuffer) MoveToLineEnd() {
- r.Refresh(func() {
- if r.idx == len(r.buf) {
- return
- }
-
- r.idx = len(r.buf)
- })
-}
-
-func (r *RuneBuffer) LineCount(width int) int {
- if width == -1 {
- width = r.width
- }
- return LineCount(width,
- runes.WidthAll(r.buf)+r.PromptLen())
-}
-
-func (r *RuneBuffer) MoveTo(ch rune, prevChar, reverse bool) (success bool) {
- r.Refresh(func() {
- if reverse {
- for i := r.idx - 1; i >= 0; i-- {
- if r.buf[i] == ch {
- r.idx = i
- if prevChar {
- r.idx++
- }
- success = true
- return
- }
- }
- return
- }
- for i := r.idx + 1; i < len(r.buf); i++ {
- if r.buf[i] == ch {
- r.idx = i
- if prevChar {
- r.idx--
- }
- success = true
- return
- }
- }
- })
- return
-}
-
-func (r *RuneBuffer) isInLineEdge() bool {
- if isWindows {
- return false
- }
- sp := r.getSplitByLine(r.buf)
- return len(sp[len(sp)-1]) == 0
-}
-
-func (r *RuneBuffer) getSplitByLine(rs []rune) []string {
- return SplitByLine(r.promptLen(), r.width, rs)
-}
-
-func (r *RuneBuffer) IdxLine(width int) int {
- r.Lock()
- defer r.Unlock()
- return r.idxLine(width)
-}
-
-func (r *RuneBuffer) idxLine(width int) int {
- if width == 0 {
- return 0
- }
- sp := r.getSplitByLine(r.buf[:r.idx])
- return len(sp) - 1
-}
-
-func (r *RuneBuffer) CursorLineCount() int {
- return r.LineCount(r.width) - r.IdxLine(r.width)
-}
-
-func (r *RuneBuffer) Refresh(f func()) {
- r.Lock()
- defer r.Unlock()
-
- if !r.interactive {
- if f != nil {
- f()
- }
- return
- }
-
- r.clean()
- if f != nil {
- f()
- }
- r.print()
-}
-
-func (r *RuneBuffer) SetOffset(offset string) {
- r.Lock()
- r.offset = offset
- r.Unlock()
-}
-
-func (r *RuneBuffer) print() {
- r.w.Write(r.output())
- r.hadClean = false
-}
-
-func (r *RuneBuffer) output() []byte {
- buf := bytes.NewBuffer(nil)
- buf.WriteString(string(r.prompt))
- if r.cfg.EnableMask && len(r.buf) > 0 {
- buf.Write([]byte(strings.Repeat(string(r.cfg.MaskRune), len(r.buf)-1)))
- if r.buf[len(r.buf)-1] == '\n' {
- buf.Write([]byte{'\n'})
- } else {
- buf.Write([]byte(string(r.cfg.MaskRune)))
- }
- if len(r.buf) > r.idx {
- buf.Write(r.getBackspaceSequence())
- }
-
- } else {
- for _, e := range r.cfg.Painter.Paint(r.buf, r.idx) {
- if e == '\t' {
- buf.WriteString(strings.Repeat(" ", TabWidth))
- } else {
- buf.WriteRune(e)
- }
- }
- if r.isInLineEdge() {
- buf.Write([]byte(" \b"))
- }
- }
- // cursor position
- if len(r.buf) > r.idx {
- buf.Write(r.getBackspaceSequence())
- }
- return buf.Bytes()
-}
-
-func (r *RuneBuffer) getBackspaceSequence() []byte {
- var sep = map[int]bool{}
-
- var i int
- for {
- if i >= runes.WidthAll(r.buf) {
- break
- }
-
- if i == 0 {
- i -= r.promptLen()
- }
- i += r.width
-
- sep[i] = true
- }
- var buf []byte
- for i := len(r.buf); i > r.idx; i-- {
- // move input to the left of one
- buf = append(buf, '\b')
- if sep[i] {
- // up one line, go to the start of the line and move cursor right to the end (r.width)
- buf = append(buf, "\033[A\r"+"\033["+strconv.Itoa(r.width)+"C"...)
- }
- }
-
- return buf
-
-}
-
-func (r *RuneBuffer) Reset() []rune {
- ret := runes.Copy(r.buf)
- r.buf = r.buf[:0]
- r.idx = 0
- return ret
-}
-
-func (r *RuneBuffer) calWidth(m int) int {
- if m > 0 {
- return runes.WidthAll(r.buf[r.idx : r.idx+m])
- }
- return runes.WidthAll(r.buf[r.idx+m : r.idx])
-}
-
-func (r *RuneBuffer) SetStyle(start, end int, style string) {
- if end < start {
- panic("end < start")
- }
-
- // goto start
- move := start - r.idx
- if move > 0 {
- r.w.Write([]byte(string(r.buf[r.idx : r.idx+move])))
- } else {
- r.w.Write(bytes.Repeat([]byte("\b"), r.calWidth(move)))
- }
- r.w.Write([]byte("\033[" + style + "m"))
- r.w.Write([]byte(string(r.buf[start:end])))
- r.w.Write([]byte("\033[0m"))
- // TODO: move back
-}
-
-func (r *RuneBuffer) SetWithIdx(idx int, buf []rune) {
- r.Refresh(func() {
- r.buf = buf
- r.idx = idx
- })
-}
-
-func (r *RuneBuffer) Set(buf []rune) {
- r.SetWithIdx(len(buf), buf)
-}
-
-func (r *RuneBuffer) SetPrompt(prompt string) {
- r.Lock()
- r.prompt = []rune(prompt)
- r.Unlock()
-}
-
-func (r *RuneBuffer) cleanOutput(w io.Writer, idxLine int) {
- buf := bufio.NewWriter(w)
-
- if r.width == 0 {
- buf.WriteString(strings.Repeat("\r\b", len(r.buf)+r.promptLen()))
- buf.Write([]byte("\033[J"))
- } else {
- buf.Write([]byte("\033[J")) // just like ^k :)
- if idxLine == 0 {
- buf.WriteString("\033[2K")
- buf.WriteString("\r")
- } else {
- for i := 0; i < idxLine; i++ {
- io.WriteString(buf, "\033[2K\r\033[A")
- }
- io.WriteString(buf, "\033[2K\r")
- }
- }
- buf.Flush()
- return
-}
-
-func (r *RuneBuffer) Clean() {
- r.Lock()
- r.clean()
- r.Unlock()
-}
-
-func (r *RuneBuffer) clean() {
- r.cleanWithIdxLine(r.idxLine(r.width))
-}
-
-func (r *RuneBuffer) cleanWithIdxLine(idxLine int) {
- if r.hadClean || !r.interactive {
- return
- }
- r.hadClean = true
- r.cleanOutput(r.w, idxLine)
-}
diff --git a/vendor/github.com/chzyer/readline/runes.go b/vendor/github.com/chzyer/readline/runes.go
deleted file mode 100644
index a669bc48..00000000
--- a/vendor/github.com/chzyer/readline/runes.go
+++ /dev/null
@@ -1,223 +0,0 @@
-package readline
-
-import (
- "bytes"
- "unicode"
- "unicode/utf8"
-)
-
-var runes = Runes{}
-var TabWidth = 4
-
-type Runes struct{}
-
-func (Runes) EqualRune(a, b rune, fold bool) bool {
- if a == b {
- return true
- }
- if !fold {
- return false
- }
- if a > b {
- a, b = b, a
- }
- if b < utf8.RuneSelf && 'A' <= a && a <= 'Z' {
- if b == a+'a'-'A' {
- return true
- }
- }
- return false
-}
-
-func (r Runes) EqualRuneFold(a, b rune) bool {
- return r.EqualRune(a, b, true)
-}
-
-func (r Runes) EqualFold(a, b []rune) bool {
- if len(a) != len(b) {
- return false
- }
- for i := 0; i < len(a); i++ {
- if r.EqualRuneFold(a[i], b[i]) {
- continue
- }
- return false
- }
-
- return true
-}
-
-func (Runes) Equal(a, b []rune) bool {
- if len(a) != len(b) {
- return false
- }
- for i := 0; i < len(a); i++ {
- if a[i] != b[i] {
- return false
- }
- }
- return true
-}
-
-func (rs Runes) IndexAllBckEx(r, sub []rune, fold bool) int {
- for i := len(r) - len(sub); i >= 0; i-- {
- found := true
- for j := 0; j < len(sub); j++ {
- if !rs.EqualRune(r[i+j], sub[j], fold) {
- found = false
- break
- }
- }
- if found {
- return i
- }
- }
- return -1
-}
-
-// Search in runes from end to front
-func (rs Runes) IndexAllBck(r, sub []rune) int {
- return rs.IndexAllBckEx(r, sub, false)
-}
-
-// Search in runes from front to end
-func (rs Runes) IndexAll(r, sub []rune) int {
- return rs.IndexAllEx(r, sub, false)
-}
-
-func (rs Runes) IndexAllEx(r, sub []rune, fold bool) int {
- for i := 0; i < len(r); i++ {
- found := true
- if len(r[i:]) < len(sub) {
- return -1
- }
- for j := 0; j < len(sub); j++ {
- if !rs.EqualRune(r[i+j], sub[j], fold) {
- found = false
- break
- }
- }
- if found {
- return i
- }
- }
- return -1
-}
-
-func (Runes) Index(r rune, rs []rune) int {
- for i := 0; i < len(rs); i++ {
- if rs[i] == r {
- return i
- }
- }
- return -1
-}
-
-func (Runes) ColorFilter(r []rune) []rune {
- newr := make([]rune, 0, len(r))
- for pos := 0; pos < len(r); pos++ {
- if r[pos] == '\033' && r[pos+1] == '[' {
- idx := runes.Index('m', r[pos+2:])
- if idx == -1 {
- continue
- }
- pos += idx + 2
- continue
- }
- newr = append(newr, r[pos])
- }
- return newr
-}
-
-var zeroWidth = []*unicode.RangeTable{
- unicode.Mn,
- unicode.Me,
- unicode.Cc,
- unicode.Cf,
-}
-
-var doubleWidth = []*unicode.RangeTable{
- unicode.Han,
- unicode.Hangul,
- unicode.Hiragana,
- unicode.Katakana,
-}
-
-func (Runes) Width(r rune) int {
- if r == '\t' {
- return TabWidth
- }
- if unicode.IsOneOf(zeroWidth, r) {
- return 0
- }
- if unicode.IsOneOf(doubleWidth, r) {
- return 2
- }
- return 1
-}
-
-func (Runes) WidthAll(r []rune) (length int) {
- for i := 0; i < len(r); i++ {
- length += runes.Width(r[i])
- }
- return
-}
-
-func (Runes) Backspace(r []rune) []byte {
- return bytes.Repeat([]byte{'\b'}, runes.WidthAll(r))
-}
-
-func (Runes) Copy(r []rune) []rune {
- n := make([]rune, len(r))
- copy(n, r)
- return n
-}
-
-func (Runes) HasPrefixFold(r, prefix []rune) bool {
- if len(r) < len(prefix) {
- return false
- }
- return runes.EqualFold(r[:len(prefix)], prefix)
-}
-
-func (Runes) HasPrefix(r, prefix []rune) bool {
- if len(r) < len(prefix) {
- return false
- }
- return runes.Equal(r[:len(prefix)], prefix)
-}
-
-func (Runes) Aggregate(candicate [][]rune) (same []rune, size int) {
- for i := 0; i < len(candicate[0]); i++ {
- for j := 0; j < len(candicate)-1; j++ {
- if i >= len(candicate[j]) || i >= len(candicate[j+1]) {
- goto aggregate
- }
- if candicate[j][i] != candicate[j+1][i] {
- goto aggregate
- }
- }
- size = i + 1
- }
-aggregate:
- if size > 0 {
- same = runes.Copy(candicate[0][:size])
- for i := 0; i < len(candicate); i++ {
- n := runes.Copy(candicate[i])
- copy(n, n[size:])
- candicate[i] = n[:len(n)-size]
- }
- }
- return
-}
-
-func (Runes) TrimSpaceLeft(in []rune) []rune {
- firstIndex := len(in)
- for i, r := range in {
- if unicode.IsSpace(r) == false {
- firstIndex = i
- break
- }
- }
- return in[firstIndex:]
-}
diff --git a/vendor/github.com/chzyer/readline/search.go b/vendor/github.com/chzyer/readline/search.go
deleted file mode 100644
index 52e8ff09..00000000
--- a/vendor/github.com/chzyer/readline/search.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package readline
-
-import (
- "bytes"
- "container/list"
- "fmt"
- "io"
-)
-
-const (
- S_STATE_FOUND = iota
- S_STATE_FAILING
-)
-
-const (
- S_DIR_BCK = iota
- S_DIR_FWD
-)
-
-type opSearch struct {
- inMode bool
- state int
- dir int
- source *list.Element
- w io.Writer
- buf *RuneBuffer
- data []rune
- history *opHistory
- cfg *Config
- markStart int
- markEnd int
- width int
-}
-
-func newOpSearch(w io.Writer, buf *RuneBuffer, history *opHistory, cfg *Config, width int) *opSearch {
- return &opSearch{
- w: w,
- buf: buf,
- cfg: cfg,
- history: history,
- width: width,
- }
-}
-
-func (o *opSearch) OnWidthChange(newWidth int) {
- o.width = newWidth
-}
-
-func (o *opSearch) IsSearchMode() bool {
- return o.inMode
-}
-
-func (o *opSearch) SearchBackspace() {
- if len(o.data) > 0 {
- o.data = o.data[:len(o.data)-1]
- o.search(true)
- }
-}
-
-func (o *opSearch) findHistoryBy(isNewSearch bool) (int, *list.Element) {
- if o.dir == S_DIR_BCK {
- return o.history.FindBck(isNewSearch, o.data, o.buf.idx)
- }
- return o.history.FindFwd(isNewSearch, o.data, o.buf.idx)
-}
-
-func (o *opSearch) search(isChange bool) bool {
- if len(o.data) == 0 {
- o.state = S_STATE_FOUND
- o.SearchRefresh(-1)
- return true
- }
- idx, elem := o.findHistoryBy(isChange)
- if elem == nil {
- o.SearchRefresh(-2)
- return false
- }
- o.history.current = elem
-
- item := o.history.showItem(o.history.current.Value)
- start, end := 0, 0
- if o.dir == S_DIR_BCK {
- start, end = idx, idx+len(o.data)
- } else {
- start, end = idx, idx+len(o.data)
- idx += len(o.data)
- }
- o.buf.SetWithIdx(idx, item)
- o.markStart, o.markEnd = start, end
- o.SearchRefresh(idx)
- return true
-}
-
-func (o *opSearch) SearchChar(r rune) {
- o.data = append(o.data, r)
- o.search(true)
-}
-
-func (o *opSearch) SearchMode(dir int) bool {
- if o.width == 0 {
- return false
- }
- alreadyInMode := o.inMode
- o.inMode = true
- o.dir = dir
- o.source = o.history.current
- if alreadyInMode {
- o.search(false)
- } else {
- o.SearchRefresh(-1)
- }
- return true
-}
-
-func (o *opSearch) ExitSearchMode(revert bool) {
- if revert {
- o.history.current = o.source
- o.buf.Set(o.history.showItem(o.history.current.Value))
- }
- o.markStart, o.markEnd = 0, 0
- o.state = S_STATE_FOUND
- o.inMode = false
- o.source = nil
- o.data = nil
-}
-
-func (o *opSearch) SearchRefresh(x int) {
- if x == -2 {
- o.state = S_STATE_FAILING
- } else if x >= 0 {
- o.state = S_STATE_FOUND
- }
- if x < 0 {
- x = o.buf.idx
- }
- x = o.buf.CurrentWidth(x)
- x += o.buf.PromptLen()
- x = x % o.width
-
- if o.markStart > 0 {
- o.buf.SetStyle(o.markStart, o.markEnd, "4")
- }
-
- lineCnt := o.buf.CursorLineCount()
- buf := bytes.NewBuffer(nil)
- buf.Write(bytes.Repeat([]byte("\n"), lineCnt))
- buf.WriteString("\033[J")
- if o.state == S_STATE_FAILING {
- buf.WriteString("failing ")
- }
- if o.dir == S_DIR_BCK {
- buf.WriteString("bck")
- } else if o.dir == S_DIR_FWD {
- buf.WriteString("fwd")
- }
- buf.WriteString("-i-search: ")
- buf.WriteString(string(o.data)) // keyword
- buf.WriteString("\033[4m \033[0m") // _
- fmt.Fprintf(buf, "\r\033[%dA", lineCnt) // move prev
- if x > 0 {
- fmt.Fprintf(buf, "\033[%dC", x) // move forward
- }
- o.w.Write(buf.Bytes())
-}
diff --git a/vendor/github.com/chzyer/readline/std.go b/vendor/github.com/chzyer/readline/std.go
deleted file mode 100644
index 61d44b75..00000000
--- a/vendor/github.com/chzyer/readline/std.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package readline
-
-import (
- "io"
- "os"
- "sync"
- "sync/atomic"
-)
-
-var (
- Stdin io.ReadCloser = os.Stdin
- Stdout io.WriteCloser = os.Stdout
- Stderr io.WriteCloser = os.Stderr
-)
-
-var (
- std *Instance
- stdOnce sync.Once
-)
-
-// global instance will not submit history automatic
-func getInstance() *Instance {
- stdOnce.Do(func() {
- std, _ = NewEx(&Config{
- DisableAutoSaveHistory: true,
- })
- })
- return std
-}
-
-// let readline load history from filepath
-// and try to persist history into disk
-// set fp to "" to prevent readline persisting history to disk
-// so the `AddHistory` will return nil error forever.
-func SetHistoryPath(fp string) {
- ins := getInstance()
- cfg := ins.Config.Clone()
- cfg.HistoryFile = fp
- ins.SetConfig(cfg)
-}
-
-// set auto completer to global instance
-func SetAutoComplete(completer AutoCompleter) {
- ins := getInstance()
- cfg := ins.Config.Clone()
- cfg.AutoComplete = completer
- ins.SetConfig(cfg)
-}
-
-// add history to global instance manually
-// raise error only if `SetHistoryPath` is set with a non-empty path
-func AddHistory(content string) error {
- ins := getInstance()
- return ins.SaveHistory(content)
-}
-
-func Password(prompt string) ([]byte, error) {
- ins := getInstance()
- return ins.ReadPassword(prompt)
-}
-
-// readline with global configs
-func Line(prompt string) (string, error) {
- ins := getInstance()
- ins.SetPrompt(prompt)
- return ins.Readline()
-}
-
-type CancelableStdin struct {
- r io.Reader
- mutex sync.Mutex
- stop chan struct{}
- closed int32
- notify chan struct{}
- data []byte
- read int
- err error
-}
-
-func NewCancelableStdin(r io.Reader) *CancelableStdin {
- c := &CancelableStdin{
- r: r,
- notify: make(chan struct{}),
- stop: make(chan struct{}),
- }
- go c.ioloop()
- return c
-}
-
-func (c *CancelableStdin) ioloop() {
-loop:
- for {
- select {
- case <-c.notify:
- c.read, c.err = c.r.Read(c.data)
- select {
- case c.notify <- struct{}{}:
- case <-c.stop:
- break loop
- }
- case <-c.stop:
- break loop
- }
- }
-}
-
-func (c *CancelableStdin) Read(b []byte) (n int, err error) {
- c.mutex.Lock()
- defer c.mutex.Unlock()
- if atomic.LoadInt32(&c.closed) == 1 {
- return 0, io.EOF
- }
-
- c.data = b
- select {
- case c.notify <- struct{}{}:
- case <-c.stop:
- return 0, io.EOF
- }
- select {
- case <-c.notify:
- return c.read, c.err
- case <-c.stop:
- return 0, io.EOF
- }
-}
-
-func (c *CancelableStdin) Close() error {
- if atomic.CompareAndSwapInt32(&c.closed, 0, 1) {
- close(c.stop)
- }
- return nil
-}
-
-// FillableStdin is a stdin reader which can prepend some data before
-// reading into the real stdin
-type FillableStdin struct {
- sync.Mutex
- stdin io.Reader
- stdinBuffer io.ReadCloser
- buf []byte
- bufErr error
-}
-
-// NewFillableStdin gives you FillableStdin
-func NewFillableStdin(stdin io.Reader) (io.ReadCloser, io.Writer) {
- r, w := io.Pipe()
- s := &FillableStdin{
- stdinBuffer: r,
- stdin: stdin,
- }
- s.ioloop()
- return s, w
-}
-
-func (s *FillableStdin) ioloop() {
- go func() {
- for {
- bufR := make([]byte, 100)
- var n int
- n, s.bufErr = s.stdinBuffer.Read(bufR)
- if s.bufErr != nil {
- if s.bufErr == io.ErrClosedPipe {
- break
- }
- }
- s.Lock()
- s.buf = append(s.buf, bufR[:n]...)
- s.Unlock()
- }
- }()
-}
-
-// Read will read from the local buffer and if no data, read from stdin
-func (s *FillableStdin) Read(p []byte) (n int, err error) {
- s.Lock()
- i := len(s.buf)
- if len(p) < i {
- i = len(p)
- }
- if i > 0 {
- n := copy(p, s.buf)
- s.buf = s.buf[:0]
- cerr := s.bufErr
- s.bufErr = nil
- s.Unlock()
- return n, cerr
- }
- s.Unlock()
- n, err = s.stdin.Read(p)
- return n, err
-}
-
-func (s *FillableStdin) Close() error {
- s.stdinBuffer.Close()
- return nil
-}
diff --git a/vendor/github.com/chzyer/readline/std_windows.go b/vendor/github.com/chzyer/readline/std_windows.go
deleted file mode 100644
index b10f91bc..00000000
--- a/vendor/github.com/chzyer/readline/std_windows.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build windows
-
-package readline
-
-func init() {
- Stdin = NewRawReader()
- Stdout = NewANSIWriter(Stdout)
- Stderr = NewANSIWriter(Stderr)
-}
diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go
deleted file mode 100644
index 133993ca..00000000
--- a/vendor/github.com/chzyer/readline/term.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris
-
-// Package terminal provides support functions for dealing with terminals, as
-// commonly found on UNIX systems.
-//
-// Putting a terminal into raw mode is the most common requirement:
-//
-// oldState, err := terminal.MakeRaw(0)
-// if err != nil {
-// panic(err)
-// }
-// defer terminal.Restore(0, oldState)
-package readline
-
-import (
- "io"
- "syscall"
-)
-
-// State contains the state of a terminal.
-type State struct {
- termios Termios
-}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd int) bool {
- _, err := getTermios(fd)
- return err == nil
-}
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd int) (*State, error) {
- var oldState State
-
- if termios, err := getTermios(fd); err != nil {
- return nil, err
- } else {
- oldState.termios = *termios
- }
-
- newState := oldState.termios
- // This attempts to replicate the behaviour documented for cfmakeraw in
- // the termios(3) manpage.
- newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
- // newState.Oflag &^= syscall.OPOST
- newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
- newState.Cflag &^= syscall.CSIZE | syscall.PARENB
- newState.Cflag |= syscall.CS8
-
- newState.Cc[syscall.VMIN] = 1
- newState.Cc[syscall.VTIME] = 0
-
- return &oldState, setTermios(fd, &newState)
-}
-
-// GetState returns the current state of a terminal which may be useful to
-// restore the terminal after a signal.
-func GetState(fd int) (*State, error) {
- termios, err := getTermios(fd)
- if err != nil {
- return nil, err
- }
-
- return &State{termios: *termios}, nil
-}
-
-// Restore restores the terminal connected to the given file descriptor to a
-// previous state.
-func restoreTerm(fd int, state *State) error {
- return setTermios(fd, &state.termios)
-}
-
-// ReadPassword reads a line of input from a terminal without local echo. This
-// is commonly used for inputting passwords and other sensitive data. The slice
-// returned does not include the \n.
-func ReadPassword(fd int) ([]byte, error) {
- oldState, err := getTermios(fd)
- if err != nil {
- return nil, err
- }
-
- newState := oldState
- newState.Lflag &^= syscall.ECHO
- newState.Lflag |= syscall.ICANON | syscall.ISIG
- newState.Iflag |= syscall.ICRNL
- if err := setTermios(fd, newState); err != nil {
- return nil, err
- }
-
- defer func() {
- setTermios(fd, oldState)
- }()
-
- var buf [16]byte
- var ret []byte
- for {
- n, err := syscall.Read(fd, buf[:])
- if err != nil {
- return nil, err
- }
- if n == 0 {
- if len(ret) == 0 {
- return nil, io.EOF
- }
- break
- }
- if buf[n-1] == '\n' {
- n--
- }
- ret = append(ret, buf[:n]...)
- if n < len(buf) {
- break
- }
- }
-
- return ret, nil
-}
diff --git a/vendor/github.com/chzyer/readline/term_bsd.go b/vendor/github.com/chzyer/readline/term_bsd.go
deleted file mode 100644
index 68b56ea6..00000000
--- a/vendor/github.com/chzyer/readline/term_bsd.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd netbsd openbsd
-
-package readline
-
-import (
- "syscall"
- "unsafe"
-)
-
-func getTermios(fd int) (*Termios, error) {
- termios := new(Termios)
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
- if err != 0 {
- return nil, err
- }
- return termios, nil
-}
-
-func setTermios(fd int, termios *Termios) error {
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
- if err != 0 {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/chzyer/readline/term_linux.go b/vendor/github.com/chzyer/readline/term_linux.go
deleted file mode 100644
index e3392b4a..00000000
--- a/vendor/github.com/chzyer/readline/term_linux.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package readline
-
-import (
- "syscall"
- "unsafe"
-)
-
-// These constants are declared here, rather than importing
-// them from the syscall package as some syscall packages, even
-// on linux, for example gccgo, do not declare them.
-const ioctlReadTermios = 0x5401 // syscall.TCGETS
-const ioctlWriteTermios = 0x5402 // syscall.TCSETS
-
-func getTermios(fd int) (*Termios, error) {
- termios := new(Termios)
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
- if err != 0 {
- return nil, err
- }
- return termios, nil
-}
-
-func setTermios(fd int, termios *Termios) error {
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
- if err != 0 {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_solaris.go
deleted file mode 100644
index 4c27273c..00000000
--- a/vendor/github.com/chzyer/readline/term_solaris.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build solaris
-
-package readline
-
-import "golang.org/x/sys/unix"
-
-// GetSize returns the dimensions of the given terminal.
-func GetSize(fd int) (int, int, error) {
- ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
- if err != nil {
- return 0, 0, err
- }
- return int(ws.Col), int(ws.Row), nil
-}
-
-type Termios unix.Termios
-
-func getTermios(fd int) (*Termios, error) {
- termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
- if err != nil {
- return nil, err
- }
- return (*Termios)(termios), nil
-}
-
-func setTermios(fd int, termios *Termios) error {
- return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios))
-}
diff --git a/vendor/github.com/chzyer/readline/term_unix.go b/vendor/github.com/chzyer/readline/term_unix.go
deleted file mode 100644
index d3ea2424..00000000
--- a/vendor/github.com/chzyer/readline/term_unix.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
-
-package readline
-
-import (
- "syscall"
- "unsafe"
-)
-
-type Termios syscall.Termios
-
-// GetSize returns the dimensions of the given terminal.
-func GetSize(fd int) (int, int, error) {
- var dimensions [4]uint16
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0)
- if err != 0 {
- return 0, 0, err
- }
- return int(dimensions[1]), int(dimensions[0]), nil
-}
diff --git a/vendor/github.com/chzyer/readline/term_windows.go b/vendor/github.com/chzyer/readline/term_windows.go
deleted file mode 100644
index 1290e00b..00000000
--- a/vendor/github.com/chzyer/readline/term_windows.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-// Package terminal provides support functions for dealing with terminals, as
-// commonly found on UNIX systems.
-//
-// Putting a terminal into raw mode is the most common requirement:
-//
-// oldState, err := terminal.MakeRaw(0)
-// if err != nil {
-// panic(err)
-// }
-// defer terminal.Restore(0, oldState)
-package readline
-
-import (
- "io"
- "syscall"
- "unsafe"
-)
-
-const (
- enableLineInput = 2
- enableEchoInput = 4
- enableProcessedInput = 1
- enableWindowInput = 8
- enableMouseInput = 16
- enableInsertMode = 32
- enableQuickEditMode = 64
- enableExtendedFlags = 128
- enableAutoPosition = 256
- enableProcessedOutput = 1
- enableWrapAtEolOutput = 2
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-
-var (
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
- procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
-)
-
-type (
- coord struct {
- x short
- y short
- }
- smallRect struct {
- left short
- top short
- right short
- bottom short
- }
- consoleScreenBufferInfo struct {
- size coord
- cursorPosition coord
- attributes word
- window smallRect
- maximumWindowSize coord
- }
-)
-
-type State struct {
- mode uint32
-}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd int) bool {
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd int) (*State, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
- _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0)
- if e != 0 {
- return nil, error(e)
- }
- return &State{st}, nil
-}
-
-// GetState returns the current state of a terminal which may be useful to
-// restore the terminal after a signal.
-func GetState(fd int) (*State, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- return &State{st}, nil
-}
-
-// Restore restores the terminal connected to the given file descriptor to a
-// previous state.
-func restoreTerm(fd int, state *State) error {
- _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
- return err
-}
-
-// GetSize returns the dimensions of the given terminal.
-func GetSize(fd int) (width, height int, err error) {
- var info consoleScreenBufferInfo
- _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
- if e != 0 {
- return 0, 0, error(e)
- }
- return int(info.size.x), int(info.size.y), nil
-}
-
-// ReadPassword reads a line of input from a terminal without local echo. This
-// is commonly used for inputting passwords and other sensitive data. The slice
-// returned does not include the \n.
-func ReadPassword(fd int) ([]byte, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- old := st
-
- st &^= (enableEchoInput)
- st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
- _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
- if e != 0 {
- return nil, error(e)
- }
-
- defer func() {
- syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
- }()
-
- var buf [16]byte
- var ret []byte
- for {
- n, err := syscall.Read(syscall.Handle(fd), buf[:])
- if err != nil {
- return nil, err
- }
- if n == 0 {
- if len(ret) == 0 {
- return nil, io.EOF
- }
- break
- }
- if buf[n-1] == '\n' {
- n--
- }
- if n > 0 && buf[n-1] == '\r' {
- n--
- }
- ret = append(ret, buf[:n]...)
- if n < len(buf) {
- break
- }
- }
-
- return ret, nil
-}
diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go
deleted file mode 100644
index 1078631c..00000000
--- a/vendor/github.com/chzyer/readline/terminal.go
+++ /dev/null
@@ -1,238 +0,0 @@
-package readline
-
-import (
- "bufio"
- "fmt"
- "io"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-type Terminal struct {
- m sync.Mutex
- cfg *Config
- outchan chan rune
- closed int32
- stopChan chan struct{}
- kickChan chan struct{}
- wg sync.WaitGroup
- isReading int32
- sleeping int32
-
- sizeChan chan string
-}
-
-func NewTerminal(cfg *Config) (*Terminal, error) {
- if err := cfg.Init(); err != nil {
- return nil, err
- }
- t := &Terminal{
- cfg: cfg,
- kickChan: make(chan struct{}, 1),
- outchan: make(chan rune),
- stopChan: make(chan struct{}, 1),
- sizeChan: make(chan string, 1),
- }
-
- go t.ioloop()
- return t, nil
-}
-
-// SleepToResume will sleep myself, and return only if I'm resumed.
-func (t *Terminal) SleepToResume() {
- if !atomic.CompareAndSwapInt32(&t.sleeping, 0, 1) {
- return
- }
- defer atomic.StoreInt32(&t.sleeping, 0)
-
- t.ExitRawMode()
- ch := WaitForResume()
- SuspendMe()
- <-ch
- t.EnterRawMode()
-}
-
-func (t *Terminal) EnterRawMode() (err error) {
- return t.cfg.FuncMakeRaw()
-}
-
-func (t *Terminal) ExitRawMode() (err error) {
- return t.cfg.FuncExitRaw()
-}
-
-func (t *Terminal) Write(b []byte) (int, error) {
- return t.cfg.Stdout.Write(b)
-}
-
-// WriteStdin prefill the next Stdin fetch
-// Next time you call ReadLine() this value will be writen before the user input
-func (t *Terminal) WriteStdin(b []byte) (int, error) {
- return t.cfg.StdinWriter.Write(b)
-}
-
-type termSize struct {
- left int
- top int
-}
-
-func (t *Terminal) GetOffset(f func(offset string)) {
- go func() {
- f(<-t.sizeChan)
- }()
- t.Write([]byte("\033[6n"))
-}
-
-func (t *Terminal) Print(s string) {
- fmt.Fprintf(t.cfg.Stdout, "%s", s)
-}
-
-func (t *Terminal) PrintRune(r rune) {
- fmt.Fprintf(t.cfg.Stdout, "%c", r)
-}
-
-func (t *Terminal) Readline() *Operation {
- return NewOperation(t, t.cfg)
-}
-
-// return rune(0) if meet EOF
-func (t *Terminal) ReadRune() rune {
- ch, ok := <-t.outchan
- if !ok {
- return rune(0)
- }
- return ch
-}
-
-func (t *Terminal) IsReading() bool {
- return atomic.LoadInt32(&t.isReading) == 1
-}
-
-func (t *Terminal) KickRead() {
- select {
- case t.kickChan <- struct{}{}:
- default:
- }
-}
-
-func (t *Terminal) ioloop() {
- t.wg.Add(1)
- defer func() {
- t.wg.Done()
- close(t.outchan)
- }()
-
- var (
- isEscape bool
- isEscapeEx bool
- expectNextChar bool
- )
-
- buf := bufio.NewReader(t.getStdin())
- for {
- if !expectNextChar {
- atomic.StoreInt32(&t.isReading, 0)
- select {
- case <-t.kickChan:
- atomic.StoreInt32(&t.isReading, 1)
- case <-t.stopChan:
- return
- }
- }
- expectNextChar = false
- r, _, err := buf.ReadRune()
- if err != nil {
- if strings.Contains(err.Error(), "interrupted system call") {
- expectNextChar = true
- continue
- }
- break
- }
-
- if isEscape {
- isEscape = false
- if r == CharEscapeEx {
- expectNextChar = true
- isEscapeEx = true
- continue
- }
- r = escapeKey(r, buf)
- } else if isEscapeEx {
- isEscapeEx = false
- if key := readEscKey(r, buf); key != nil {
- r = escapeExKey(key)
- // offset
- if key.typ == 'R' {
- if _, _, ok := key.Get2(); ok {
- select {
- case t.sizeChan <- key.attr:
- default:
- }
- }
- expectNextChar = true
- continue
- }
- }
- if r == 0 {
- expectNextChar = true
- continue
- }
- }
-
- expectNextChar = true
- switch r {
- case CharEsc:
- if t.cfg.VimMode {
- t.outchan <- r
- break
- }
- isEscape = true
- case CharInterrupt, CharEnter, CharCtrlJ, CharDelete:
- expectNextChar = false
- fallthrough
- default:
- t.outchan <- r
- }
- }
-
-}
-
-func (t *Terminal) Bell() {
- fmt.Fprintf(t, "%c", CharBell)
-}
-
-func (t *Terminal) Close() error {
- if atomic.SwapInt32(&t.closed, 1) != 0 {
- return nil
- }
- if closer, ok := t.cfg.Stdin.(io.Closer); ok {
- closer.Close()
- }
- close(t.stopChan)
- t.wg.Wait()
- return t.ExitRawMode()
-}
-
-func (t *Terminal) GetConfig() *Config {
- t.m.Lock()
- cfg := *t.cfg
- t.m.Unlock()
- return &cfg
-}
-
-func (t *Terminal) getStdin() io.Reader {
- t.m.Lock()
- r := t.cfg.Stdin
- t.m.Unlock()
- return r
-}
-
-func (t *Terminal) SetConfig(c *Config) error {
- if err := c.Init(); err != nil {
- return err
- }
- t.m.Lock()
- t.cfg = c
- t.m.Unlock()
- return nil
-}
diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go
deleted file mode 100644
index af4e0052..00000000
--- a/vendor/github.com/chzyer/readline/utils.go
+++ /dev/null
@@ -1,277 +0,0 @@
-package readline
-
-import (
- "bufio"
- "bytes"
- "container/list"
- "fmt"
- "os"
- "strconv"
- "strings"
- "sync"
- "time"
- "unicode"
-)
-
-var (
- isWindows = false
-)
-
-const (
- CharLineStart = 1
- CharBackward = 2
- CharInterrupt = 3
- CharDelete = 4
- CharLineEnd = 5
- CharForward = 6
- CharBell = 7
- CharCtrlH = 8
- CharTab = 9
- CharCtrlJ = 10
- CharKill = 11
- CharCtrlL = 12
- CharEnter = 13
- CharNext = 14
- CharPrev = 16
- CharBckSearch = 18
- CharFwdSearch = 19
- CharTranspose = 20
- CharCtrlU = 21
- CharCtrlW = 23
- CharCtrlY = 25
- CharCtrlZ = 26
- CharEsc = 27
- CharEscapeEx = 91
- CharBackspace = 127
-)
-
-const (
- MetaBackward rune = -iota - 1
- MetaForward
- MetaDelete
- MetaBackspace
- MetaTranspose
-)
-
-// WaitForResume need to call before current process got suspend.
-// It will run a ticker until a long duration is occurs,
-// which means this process is resumed.
-func WaitForResume() chan struct{} {
- ch := make(chan struct{})
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- ticker := time.NewTicker(10 * time.Millisecond)
- t := time.Now()
- wg.Done()
- for {
- now := <-ticker.C
- if now.Sub(t) > 100*time.Millisecond {
- break
- }
- t = now
- }
- ticker.Stop()
- ch <- struct{}{}
- }()
- wg.Wait()
- return ch
-}
-
-func Restore(fd int, state *State) error {
- err := restoreTerm(fd, state)
- if err != nil {
- // errno 0 means everything is ok :)
- if err.Error() == "errno 0" {
- return nil
- } else {
- return err
- }
- }
- return nil
-}
-
-func IsPrintable(key rune) bool {
- isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
- return key >= 32 && !isInSurrogateArea
-}
-
-// translate Esc[X
-func escapeExKey(key *escapeKeyPair) rune {
- var r rune
- switch key.typ {
- case 'D':
- r = CharBackward
- case 'C':
- r = CharForward
- case 'A':
- r = CharPrev
- case 'B':
- r = CharNext
- case 'H':
- r = CharLineStart
- case 'F':
- r = CharLineEnd
- case '~':
- if key.attr == "3" {
- r = CharDelete
- }
- default:
- }
- return r
-}
-
-type escapeKeyPair struct {
- attr string
- typ rune
-}
-
-func (e *escapeKeyPair) Get2() (int, int, bool) {
- sp := strings.Split(e.attr, ";")
- if len(sp) < 2 {
- return -1, -1, false
- }
- s1, err := strconv.Atoi(sp[0])
- if err != nil {
- return -1, -1, false
- }
- s2, err := strconv.Atoi(sp[1])
- if err != nil {
- return -1, -1, false
- }
- return s1, s2, true
-}
-
-func readEscKey(r rune, reader *bufio.Reader) *escapeKeyPair {
- p := escapeKeyPair{}
- buf := bytes.NewBuffer(nil)
- for {
- if r == ';' {
- } else if unicode.IsNumber(r) {
- } else {
- p.typ = r
- break
- }
- buf.WriteRune(r)
- r, _, _ = reader.ReadRune()
- }
- p.attr = buf.String()
- return &p
-}
-
-// translate EscX to Meta+X
-func escapeKey(r rune, reader *bufio.Reader) rune {
- switch r {
- case 'b':
- r = MetaBackward
- case 'f':
- r = MetaForward
- case 'd':
- r = MetaDelete
- case CharTranspose:
- r = MetaTranspose
- case CharBackspace:
- r = MetaBackspace
- case 'O':
- d, _, _ := reader.ReadRune()
- switch d {
- case 'H':
- r = CharLineStart
- case 'F':
- r = CharLineEnd
- default:
- reader.UnreadRune()
- }
- case CharEsc:
-
- }
- return r
-}
-
-func SplitByLine(start, screenWidth int, rs []rune) []string {
- var ret []string
- buf := bytes.NewBuffer(nil)
- currentWidth := start
- for _, r := range rs {
- w := runes.Width(r)
- currentWidth += w
- buf.WriteRune(r)
- if currentWidth >= screenWidth {
- ret = append(ret, buf.String())
- buf.Reset()
- currentWidth = 0
- }
- }
- ret = append(ret, buf.String())
- return ret
-}
-
-// calculate how many lines for N character
-func LineCount(screenWidth, w int) int {
- r := w / screenWidth
- if w%screenWidth != 0 {
- r++
- }
- return r
-}
-
-func IsWordBreak(i rune) bool {
- switch {
- case i >= 'a' && i <= 'z':
- case i >= 'A' && i <= 'Z':
- case i >= '0' && i <= '9':
- default:
- return true
- }
- return false
-}
-
-func GetInt(s []string, def int) int {
- if len(s) == 0 {
- return def
- }
- c, err := strconv.Atoi(s[0])
- if err != nil {
- return def
- }
- return c
-}
-
-type RawMode struct {
- state *State
-}
-
-func (r *RawMode) Enter() (err error) {
- r.state, err = MakeRaw(GetStdin())
- return err
-}
-
-func (r *RawMode) Exit() error {
- if r.state == nil {
- return nil
- }
- return Restore(GetStdin(), r.state)
-}
-
-// -----------------------------------------------------------------------------
-
-func sleep(n int) {
- Debug(n)
- time.Sleep(2000 * time.Millisecond)
-}
-
-// print a linked list to Debug()
-func debugList(l *list.List) {
- idx := 0
- for e := l.Front(); e != nil; e = e.Next() {
- Debug(idx, fmt.Sprintf("%+v", e.Value))
- idx++
- }
-}
-
-// append log info to another file
-func Debug(o ...interface{}) {
- f, _ := os.OpenFile("debug.tmp", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
- fmt.Fprintln(f, o...)
- f.Close()
-}
diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go
deleted file mode 100644
index f88dac97..00000000
--- a/vendor/github.com/chzyer/readline/utils_unix.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris
-
-package readline
-
-import (
- "io"
- "os"
- "os/signal"
- "sync"
- "syscall"
-)
-
-type winsize struct {
- Row uint16
- Col uint16
- Xpixel uint16
- Ypixel uint16
-}
-
-// SuspendMe use to send suspend signal to myself, when we in the raw mode.
-// For OSX it need to send to parent's pid
-// For Linux it need to send to myself
-func SuspendMe() {
- p, _ := os.FindProcess(os.Getppid())
- p.Signal(syscall.SIGTSTP)
- p, _ = os.FindProcess(os.Getpid())
- p.Signal(syscall.SIGTSTP)
-}
-
-// get width of the terminal
-func getWidth(stdoutFd int) int {
- cols, _, err := GetSize(stdoutFd)
- if err != nil {
- return -1
- }
- return cols
-}
-
-func GetScreenWidth() int {
- w := getWidth(syscall.Stdout)
- if w < 0 {
- w = getWidth(syscall.Stderr)
- }
- return w
-}
-
-// ClearScreen clears the console screen
-func ClearScreen(w io.Writer) (int, error) {
- return w.Write([]byte("\033[H"))
-}
-
-func DefaultIsTerminal() bool {
- return IsTerminal(syscall.Stdin) && (IsTerminal(syscall.Stdout) || IsTerminal(syscall.Stderr))
-}
-
-func GetStdin() int {
- return syscall.Stdin
-}
-
-// -----------------------------------------------------------------------------
-
-var (
- widthChange sync.Once
- widthChangeCallback func()
-)
-
-func DefaultOnWidthChanged(f func()) {
- widthChangeCallback = f
- widthChange.Do(func() {
- ch := make(chan os.Signal, 1)
- signal.Notify(ch, syscall.SIGWINCH)
-
- go func() {
- for {
- _, ok := <-ch
- if !ok {
- break
- }
- widthChangeCallback()
- }
- }()
- })
-}
diff --git a/vendor/github.com/chzyer/readline/utils_windows.go b/vendor/github.com/chzyer/readline/utils_windows.go
deleted file mode 100644
index 5bfa55dc..00000000
--- a/vendor/github.com/chzyer/readline/utils_windows.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// +build windows
-
-package readline
-
-import (
- "io"
- "syscall"
-)
-
-func SuspendMe() {
-}
-
-func GetStdin() int {
- return int(syscall.Stdin)
-}
-
-func init() {
- isWindows = true
-}
-
-// get width of the terminal
-func GetScreenWidth() int {
- info, _ := GetConsoleScreenBufferInfo()
- if info == nil {
- return -1
- }
- return int(info.dwSize.x)
-}
-
-// ClearScreen clears the console screen
-func ClearScreen(_ io.Writer) error {
- return SetConsoleCursorPosition(&_COORD{0, 0})
-}
-
-func DefaultIsTerminal() bool {
- return true
-}
-
-func DefaultOnWidthChanged(func()) {
-
-}
diff --git a/vendor/github.com/chzyer/readline/vim.go b/vendor/github.com/chzyer/readline/vim.go
deleted file mode 100644
index bedf2c1a..00000000
--- a/vendor/github.com/chzyer/readline/vim.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package readline
-
-const (
- VIM_NORMAL = iota
- VIM_INSERT
- VIM_VISUAL
-)
-
-type opVim struct {
- cfg *Config
- op *Operation
- vimMode int
-}
-
-func newVimMode(op *Operation) *opVim {
- ov := &opVim{
- cfg: op.cfg,
- op: op,
- }
- ov.SetVimMode(ov.cfg.VimMode)
- return ov
-}
-
-func (o *opVim) SetVimMode(on bool) {
- if o.cfg.VimMode && !on { // turn off
- o.ExitVimMode()
- }
- o.cfg.VimMode = on
- o.vimMode = VIM_INSERT
-}
-
-func (o *opVim) ExitVimMode() {
- o.vimMode = VIM_INSERT
-}
-
-func (o *opVim) IsEnableVimMode() bool {
- return o.cfg.VimMode
-}
-
-func (o *opVim) handleVimNormalMovement(r rune, readNext func() rune) (t rune, handled bool) {
- rb := o.op.buf
- handled = true
- switch r {
- case 'h':
- t = CharBackward
- case 'j':
- t = CharNext
- case 'k':
- t = CharPrev
- case 'l':
- t = CharForward
- case '0', '^':
- rb.MoveToLineStart()
- case '$':
- rb.MoveToLineEnd()
- case 'x':
- rb.Delete()
- if rb.IsCursorInEnd() {
- rb.MoveBackward()
- }
- case 'r':
- rb.Replace(readNext())
- case 'd':
- next := readNext()
- switch next {
- case 'd':
- rb.Erase()
- case 'w':
- rb.DeleteWord()
- case 'h':
- rb.Backspace()
- case 'l':
- rb.Delete()
- }
- case 'p':
- rb.Yank()
- case 'b', 'B':
- rb.MoveToPrevWord()
- case 'w', 'W':
- rb.MoveToNextWord()
- case 'e', 'E':
- rb.MoveToEndWord()
- case 'f', 'F', 't', 'T':
- next := readNext()
- prevChar := r == 't' || r == 'T'
- reverse := r == 'F' || r == 'T'
- switch next {
- case CharEsc:
- default:
- rb.MoveTo(next, prevChar, reverse)
- }
- default:
- return r, false
- }
- return t, true
-}
-
-func (o *opVim) handleVimNormalEnterInsert(r rune, readNext func() rune) (t rune, handled bool) {
- rb := o.op.buf
- handled = true
- switch r {
- case 'i':
- case 'I':
- rb.MoveToLineStart()
- case 'a':
- rb.MoveForward()
- case 'A':
- rb.MoveToLineEnd()
- case 's':
- rb.Delete()
- case 'S':
- rb.Erase()
- case 'c':
- next := readNext()
- switch next {
- case 'c':
- rb.Erase()
- case 'w':
- rb.DeleteWord()
- case 'h':
- rb.Backspace()
- case 'l':
- rb.Delete()
- }
- default:
- return r, false
- }
-
- o.EnterVimInsertMode()
- return
-}
-
-func (o *opVim) HandleVimNormal(r rune, readNext func() rune) (t rune) {
- switch r {
- case CharEnter, CharInterrupt:
- o.ExitVimMode()
- return r
- }
-
- if r, handled := o.handleVimNormalMovement(r, readNext); handled {
- return r
- }
-
- if r, handled := o.handleVimNormalEnterInsert(r, readNext); handled {
- return r
- }
-
- // invalid operation
- o.op.t.Bell()
- return 0
-}
-
-func (o *opVim) EnterVimInsertMode() {
- o.vimMode = VIM_INSERT
-}
-
-func (o *opVim) ExitVimInsertMode() {
- o.vimMode = VIM_NORMAL
-}
-
-func (o *opVim) HandleVim(r rune, readNext func() rune) rune {
- if o.vimMode == VIM_NORMAL {
- return o.HandleVimNormal(r, readNext)
- }
- if r == CharEsc {
- o.ExitVimInsertMode()
- return 0
- }
-
- switch o.vimMode {
- case VIM_INSERT:
- return r
- case VIM_VISUAL:
- }
- return r
-}
diff --git a/vendor/github.com/chzyer/readline/windows_api.go b/vendor/github.com/chzyer/readline/windows_api.go
deleted file mode 100644
index 63f4f7b7..00000000
--- a/vendor/github.com/chzyer/readline/windows_api.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// +build windows
-
-package readline
-
-import (
- "reflect"
- "syscall"
- "unsafe"
-)
-
-var (
- kernel = NewKernel()
- stdout = uintptr(syscall.Stdout)
- stdin = uintptr(syscall.Stdin)
-)
-
-type Kernel struct {
- SetConsoleCursorPosition,
- SetConsoleTextAttribute,
- FillConsoleOutputCharacterW,
- FillConsoleOutputAttribute,
- ReadConsoleInputW,
- GetConsoleScreenBufferInfo,
- GetConsoleCursorInfo,
- GetStdHandle CallFunc
-}
-
-type short int16
-type word uint16
-type dword uint32
-type wchar uint16
-
-type _COORD struct {
- x short
- y short
-}
-
-func (c *_COORD) ptr() uintptr {
- return uintptr(*(*int32)(unsafe.Pointer(c)))
-}
-
-const (
- EVENT_KEY = 0x0001
- EVENT_MOUSE = 0x0002
- EVENT_WINDOW_BUFFER_SIZE = 0x0004
- EVENT_MENU = 0x0008
- EVENT_FOCUS = 0x0010
-)
-
-type _KEY_EVENT_RECORD struct {
- bKeyDown int32
- wRepeatCount word
- wVirtualKeyCode word
- wVirtualScanCode word
- unicodeChar wchar
- dwControlKeyState dword
-}
-
-// KEY_EVENT_RECORD KeyEvent;
-// MOUSE_EVENT_RECORD MouseEvent;
-// WINDOW_BUFFER_SIZE_RECORD WindowBufferSizeEvent;
-// MENU_EVENT_RECORD MenuEvent;
-// FOCUS_EVENT_RECORD FocusEvent;
-type _INPUT_RECORD struct {
- EventType word
- Padding uint16
- Event [16]byte
-}
-
-type _CONSOLE_SCREEN_BUFFER_INFO struct {
- dwSize _COORD
- dwCursorPosition _COORD
- wAttributes word
- srWindow _SMALL_RECT
- dwMaximumWindowSize _COORD
-}
-
-type _SMALL_RECT struct {
- left short
- top short
- right short
- bottom short
-}
-
-type _CONSOLE_CURSOR_INFO struct {
- dwSize dword
- bVisible bool
-}
-
-type CallFunc func(u ...uintptr) error
-
-func NewKernel() *Kernel {
- k := &Kernel{}
- kernel32 := syscall.NewLazyDLL("kernel32.dll")
- v := reflect.ValueOf(k).Elem()
- t := v.Type()
- for i := 0; i < t.NumField(); i++ {
- name := t.Field(i).Name
- f := kernel32.NewProc(name)
- v.Field(i).Set(reflect.ValueOf(k.Wrap(f)))
- }
- return k
-}
-
-func (k *Kernel) Wrap(p *syscall.LazyProc) CallFunc {
- return func(args ...uintptr) error {
- var r0 uintptr
- var e1 syscall.Errno
- size := uintptr(len(args))
- if len(args) <= 3 {
- buf := make([]uintptr, 3)
- copy(buf, args)
- r0, _, e1 = syscall.Syscall(p.Addr(), size,
- buf[0], buf[1], buf[2])
- } else {
- buf := make([]uintptr, 6)
- copy(buf, args)
- r0, _, e1 = syscall.Syscall6(p.Addr(), size,
- buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
- )
- }
-
- if int(r0) == 0 {
- if e1 != 0 {
- return error(e1)
- } else {
- return syscall.EINVAL
- }
- }
- return nil
- }
-
-}
-
-func GetConsoleScreenBufferInfo() (*_CONSOLE_SCREEN_BUFFER_INFO, error) {
- t := new(_CONSOLE_SCREEN_BUFFER_INFO)
- err := kernel.GetConsoleScreenBufferInfo(
- stdout,
- uintptr(unsafe.Pointer(t)),
- )
- return t, err
-}
-
-func GetConsoleCursorInfo() (*_CONSOLE_CURSOR_INFO, error) {
- t := new(_CONSOLE_CURSOR_INFO)
- err := kernel.GetConsoleCursorInfo(stdout, uintptr(unsafe.Pointer(t)))
- return t, err
-}
-
-func SetConsoleCursorPosition(c *_COORD) error {
- return kernel.SetConsoleCursorPosition(stdout, c.ptr())
-}
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
deleted file mode 100644
index 1cade6ce..00000000
--- a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Brian Goff
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
deleted file mode 100644
index b4800567..00000000
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package md2man
-
-import (
- "github.com/russross/blackfriday/v2"
-)
-
-// Render converts a markdown document into a roff formatted document.
-func Render(doc []byte) []byte {
- renderer := NewRoffRenderer()
-
- return blackfriday.Run(doc,
- []blackfriday.Option{blackfriday.WithRenderer(renderer),
- blackfriday.WithExtensions(renderer.GetExtensions())}...)
-}
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
deleted file mode 100644
index 0668a66c..00000000
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
+++ /dev/null
@@ -1,345 +0,0 @@
-package md2man
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
-
- "github.com/russross/blackfriday/v2"
-)
-
-// roffRenderer implements the blackfriday.Renderer interface for creating
-// roff format (manpages) from markdown text
-type roffRenderer struct {
- extensions blackfriday.Extensions
- listCounters []int
- firstHeader bool
- defineTerm bool
- listDepth int
-}
-
-const (
- titleHeader = ".TH "
- topLevelHeader = "\n\n.SH "
- secondLevelHdr = "\n.SH "
- otherHeader = "\n.SS "
- crTag = "\n"
- emphTag = "\\fI"
- emphCloseTag = "\\fP"
- strongTag = "\\fB"
- strongCloseTag = "\\fP"
- breakTag = "\n.br\n"
- paraTag = "\n.PP\n"
- hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
- linkTag = "\n\\[la]"
- linkCloseTag = "\\[ra]"
- codespanTag = "\\fB\\fC"
- codespanCloseTag = "\\fR"
- codeTag = "\n.PP\n.RS\n\n.nf\n"
- codeCloseTag = "\n.fi\n.RE\n"
- quoteTag = "\n.PP\n.RS\n"
- quoteCloseTag = "\n.RE\n"
- listTag = "\n.RS\n"
- listCloseTag = "\n.RE\n"
- arglistTag = "\n.TP\n"
- tableStart = "\n.TS\nallbox;\n"
- tableEnd = ".TE\n"
- tableCellStart = "T{\n"
- tableCellEnd = "\nT}\n"
-)
-
-// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
-// from markdown
-func NewRoffRenderer() *roffRenderer { // nolint: golint
- var extensions blackfriday.Extensions
-
- extensions |= blackfriday.NoIntraEmphasis
- extensions |= blackfriday.Tables
- extensions |= blackfriday.FencedCode
- extensions |= blackfriday.SpaceHeadings
- extensions |= blackfriday.Footnotes
- extensions |= blackfriday.Titleblock
- extensions |= blackfriday.DefinitionLists
- return &roffRenderer{
- extensions: extensions,
- }
-}
-
-// GetExtensions returns the list of extensions used by this renderer implementation
-func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
- return r.extensions
-}
-
-// RenderHeader handles outputting the header at document start
-func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
- // disable hyphenation
- out(w, ".nh\n")
-}
-
-// RenderFooter handles outputting the footer at the document end; the roff
-// renderer has no footer information
-func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
-}
-
-// RenderNode is called for each node in a markdown document; based on the node
-// type the equivalent roff output is sent to the writer
-func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
-
- var walkAction = blackfriday.GoToNext
-
- switch node.Type {
- case blackfriday.Text:
- r.handleText(w, node, entering)
- case blackfriday.Softbreak:
- out(w, crTag)
- case blackfriday.Hardbreak:
- out(w, breakTag)
- case blackfriday.Emph:
- if entering {
- out(w, emphTag)
- } else {
- out(w, emphCloseTag)
- }
- case blackfriday.Strong:
- if entering {
- out(w, strongTag)
- } else {
- out(w, strongCloseTag)
- }
- case blackfriday.Link:
- if !entering {
- out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag)
- }
- case blackfriday.Image:
- // ignore images
- walkAction = blackfriday.SkipChildren
- case blackfriday.Code:
- out(w, codespanTag)
- escapeSpecialChars(w, node.Literal)
- out(w, codespanCloseTag)
- case blackfriday.Document:
- break
- case blackfriday.Paragraph:
- // roff .PP markers break lists
- if r.listDepth > 0 {
- return blackfriday.GoToNext
- }
- if entering {
- out(w, paraTag)
- } else {
- out(w, crTag)
- }
- case blackfriday.BlockQuote:
- if entering {
- out(w, quoteTag)
- } else {
- out(w, quoteCloseTag)
- }
- case blackfriday.Heading:
- r.handleHeading(w, node, entering)
- case blackfriday.HorizontalRule:
- out(w, hruleTag)
- case blackfriday.List:
- r.handleList(w, node, entering)
- case blackfriday.Item:
- r.handleItem(w, node, entering)
- case blackfriday.CodeBlock:
- out(w, codeTag)
- escapeSpecialChars(w, node.Literal)
- out(w, codeCloseTag)
- case blackfriday.Table:
- r.handleTable(w, node, entering)
- case blackfriday.TableCell:
- r.handleTableCell(w, node, entering)
- case blackfriday.TableHead:
- case blackfriday.TableBody:
- case blackfriday.TableRow:
- // no action as cell entries do all the nroff formatting
- return blackfriday.GoToNext
- default:
- fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
- }
- return walkAction
-}
-
-func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) {
- var (
- start, end string
- )
- // handle special roff table cell text encapsulation
- if node.Parent.Type == blackfriday.TableCell {
- if len(node.Literal) > 30 {
- start = tableCellStart
- end = tableCellEnd
- } else {
- // end rows that aren't terminated by "tableCellEnd" with a cr if end of row
- if node.Parent.Next == nil && !node.Parent.IsHeader {
- end = crTag
- }
- }
- }
- out(w, start)
- escapeSpecialChars(w, node.Literal)
- out(w, end)
-}
-
-func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
- if entering {
- switch node.Level {
- case 1:
- if !r.firstHeader {
- out(w, titleHeader)
- r.firstHeader = true
- break
- }
- out(w, topLevelHeader)
- case 2:
- out(w, secondLevelHdr)
- default:
- out(w, otherHeader)
- }
- }
-}
-
-func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
- openTag := listTag
- closeTag := listCloseTag
- if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
- // tags for definition lists handled within Item node
- openTag = ""
- closeTag = ""
- }
- if entering {
- r.listDepth++
- if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
- r.listCounters = append(r.listCounters, 1)
- }
- out(w, openTag)
- } else {
- if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
- r.listCounters = r.listCounters[:len(r.listCounters)-1]
- }
- out(w, closeTag)
- r.listDepth--
- }
-}
-
-func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
- if entering {
- if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
- out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
- r.listCounters[len(r.listCounters)-1]++
- } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
- // state machine for handling terms and following definitions
- // since blackfriday does not distinguish them properly, nor
- // does it seperate them into separate lists as it should
- if !r.defineTerm {
- out(w, arglistTag)
- r.defineTerm = true
- } else {
- r.defineTerm = false
- }
- } else {
- out(w, ".IP \\(bu 2\n")
- }
- } else {
- out(w, "\n")
- }
-}
-
-func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
- if entering {
- out(w, tableStart)
- //call walker to count cells (and rows?) so format section can be produced
- columns := countColumns(node)
- out(w, strings.Repeat("l ", columns)+"\n")
- out(w, strings.Repeat("l ", columns)+".\n")
- } else {
- out(w, tableEnd)
- }
-}
-
-func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
- var (
- start, end string
- )
- if node.IsHeader {
- start = codespanTag
- end = codespanCloseTag
- }
- if entering {
- if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
- out(w, "\t"+start)
- } else {
- out(w, start)
- }
- } else {
- // need to carriage return if we are at the end of the header row
- if node.IsHeader && node.Next == nil {
- end = end + crTag
- }
- out(w, end)
- }
-}
-
-// because roff format requires knowing the column count before outputting any table
-// data we need to walk a table tree and count the columns
-func countColumns(node *blackfriday.Node) int {
- var columns int
-
- node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
- switch node.Type {
- case blackfriday.TableRow:
- if !entering {
- return blackfriday.Terminate
- }
- case blackfriday.TableCell:
- if entering {
- columns++
- }
- default:
- }
- return blackfriday.GoToNext
- })
- return columns
-}
-
-func out(w io.Writer, output string) {
- io.WriteString(w, output) // nolint: errcheck
-}
-
-func needsBackslash(c byte) bool {
- for _, r := range []byte("-_&\\~") {
- if c == r {
- return true
- }
- }
- return false
-}
-
-func escapeSpecialChars(w io.Writer, text []byte) {
- for i := 0; i < len(text); i++ {
- // escape initial apostrophe or period
- if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
- out(w, "\\&")
- }
-
- // directly copy normal characters
- org := i
-
- for i < len(text) && !needsBackslash(text[i]) {
- i++
- }
- if i > org {
- w.Write(text[org:i]) // nolint: errcheck
- }
-
- // escape a character
- if i >= len(text) {
- break
- }
-
- w.Write([]byte{'\\', text[i]}) // nolint: errcheck
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/.deepsource.toml b/vendor/github.com/dgraph-io/badger/.deepsource.toml
deleted file mode 100644
index 266045f0..00000000
--- a/vendor/github.com/dgraph-io/badger/.deepsource.toml
+++ /dev/null
@@ -1,18 +0,0 @@
-version = 1
-
-test_patterns = [
- 'integration/testgc/**',
- '**/*_test.go'
-]
-
-exclude_patterns = [
-
-]
-
-[[analyzers]]
-name = 'go'
-enabled = true
-
-
- [analyzers.meta]
- import_path = 'github.com/dgraph-io/badger'
diff --git a/vendor/github.com/dgraph-io/badger/.gitignore b/vendor/github.com/dgraph-io/badger/.gitignore
deleted file mode 100644
index e3efdf58..00000000
--- a/vendor/github.com/dgraph-io/badger/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-p/
-badger-test*/
diff --git a/vendor/github.com/dgraph-io/badger/.golangci.yml b/vendor/github.com/dgraph-io/badger/.golangci.yml
deleted file mode 100644
index fecb8644..00000000
--- a/vendor/github.com/dgraph-io/badger/.golangci.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-run:
- tests: false
-
-linters-settings:
- lll:
- line-length: 100
-
-linters:
- disable-all: true
- enable:
- - errcheck
- - ineffassign
- - gas
- - gofmt
- - golint
- - gosimple
- - govet
- - lll
- - varcheck
- - unused
-
-issues:
- exclude-rules:
- - linters:
- - gosec
- text: "G404: "
-
\ No newline at end of file
diff --git a/vendor/github.com/dgraph-io/badger/.travis.yml b/vendor/github.com/dgraph-io/badger/.travis.yml
deleted file mode 100644
index ea05101a..00000000
--- a/vendor/github.com/dgraph-io/badger/.travis.yml
+++ /dev/null
@@ -1,45 +0,0 @@
-language: go
-
-go:
- - "1.12"
- - "1.13"
- - tip
-os:
- - osx
-env:
- jobs:
- - GOARCH=386
- - GOARCH=amd64
- global:
- - secure: CRkV2+/jlO0gXzzS50XGxfMS117FNwiVjxNY/LeWq06RKD+dDCPxTJl3JCNe3l0cYEPAglV2uMMYukDiTqJ7e+HI4nh4N4mv6lwx39N8dAvJe1x5ITS2T4qk4kTjuQb1Q1vw/ZOxoQqmvNKj2uRmBdJ/HHmysbRJ1OzCWML3OXdUwJf0AYlJzTjpMfkOKr7sTtE4rwyyQtd4tKH1fGdurgI9ZuFd9qvYxK2qcJhsQ6CNqMXt+7FkVkN1rIPmofjjBTNryzUr4COFXuWH95aDAif19DeBW4lbNgo1+FpDsrgmqtuhl6NAuptI8q/imow2KXBYJ8JPXsxW8DVFj0IIp0RCd3GjaEnwBEbxAyiIHLfW7AudyTS/dJOvZffPqXnuJ8xj3OPIdNe4xY0hWl8Ju2HhKfLOAHq7VadHZWd3IHLil70EiL4/JLD1rNbMImUZisFaA8pyrcIvYYebjOnk4TscwKFLedClRSX1XsMjWWd0oykQtrdkHM2IxknnBpaLu7mFnfE07f6dkG0nlpyu4SCLey7hr5FdcEmljA0nIxTSYDg6035fQkBEAbe7hlESOekkVNT9IZPwG+lmt3vU4ofi6NqNbJecOuSB+h36IiZ9s4YQtxYNnLgW14zjuFGGyT5smc3IjBT7qngDjKIgyrSVoRkY/8udy9qbUgvBeW8=
-
-
-jobs:
- allow_failures:
- - go: tip
- exclude:
- # Exclude builds for 386 architecture on 1.12 and tip
- # Since we don't want it to run for 32 bit
- - go: "1.12"
- env: GOARCH=386
- - go: tip
- env: GOARCH=386
-
-notifications:
- email: false
- slack:
- secure: X7uBLWYbuUhf8QFE16CoS5z7WvFR8EN9j6cEectMW6mKZ3vwXGwVXRIPsgUq/606DsQdCCx34MR8MRWYGlu6TBolbSe9y0EP0i46yipPz22YtuT7umcVUbGEyx8MZKgG0v1u/zA0O4aCsOBpGAA3gxz8h3JlEHDt+hv6U8xRsSllVLzLSNb5lwxDtcfEDxVVqP47GMEgjLPM28Pyt5qwjk7o5a4YSVzkfdxBXxd3gWzFUWzJ5E3cTacli50dK4GVfiLcQY2aQYoYO7AAvDnvP+TPfjDkBlUEE4MUz5CDIN51Xb+WW33sX7g+r3Bj7V5IRcF973RiYkpEh+3eoiPnyWyxhDZBYilty3b+Hysp6d4Ov/3I3ll7Bcny5+cYjakjkMH3l9w3gs6Y82GlpSLSJshKWS8vPRsxFe0Pstj6QSJXTd9EBaFr+l1ScXjJv/Sya9j8N9FfTuOTESWuaL1auX4Y7zEEVHlA8SCNOO8K0eTfxGZnC/YcIHsR8rePEAcFxfOYQppkyLF/XvAtnb/LMUuu0g4y2qNdme6Oelvyar1tFEMRtbl4mRCdu/krXBFtkrsfUaVY6WTPdvXAGotsFJ0wuA53zGVhlcd3+xAlSlR3c1QX95HIMeivJKb5L4nTjP+xnrmQNtnVk+tG4LSH2ltuwcZSSczModtcBmRefrk=
-
-script: >-
- if [ $TRAVIS_OS_NAME = "linux" ] && [ $go_32 ]; then
- uname -a
- GOOS=linux GOARCH=arm go test -v ./...
- # Another round of tests after turning off mmap.
- GOOS=linux GOARCH=arm go test -v -vlog_mmap=false github.com/dgraph-io/badger
- else
- go test -v ./...
- # Another round of tests after turning off mmap.
- go test -v -vlog_mmap=false github.com/dgraph-io/badger
- # Cross-compile for Plan 9
- GOOS=plan9 go build ./...
- fi
diff --git a/vendor/github.com/dgraph-io/badger/CHANGELOG.md b/vendor/github.com/dgraph-io/badger/CHANGELOG.md
deleted file mode 100644
index fce00ab5..00000000
--- a/vendor/github.com/dgraph-io/badger/CHANGELOG.md
+++ /dev/null
@@ -1,270 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
-and this project adheres to [Serialization Versioning](VERSIONING.md).
-
-## [Unreleased]
-
-## [1.6.2] - 2020-09-10
-
-### Fixed
- - Fix Sequence generates duplicate values (#1281)
- - Ensure `bitValuePointer` flag is cleared for LSM entry values written to LSM (#1313)
- - Confirm `badgerMove` entry required before rewrite (#1302)
- - Drop move keys when its key prefix is dropped (#1331)
- - Compaction: Expired keys and delete markers are never purged (#1354)
- - Restore: Account for value size as well (#1358)
- - GC: Consider size of value while rewriting (#1357)
- - Rework DB.DropPrefix (#1381)
- - Update head while replaying value log (#1372)
- - Remove vlog file if bootstrap, syncDir or mmap fails (#1434)
- - Levels: Compaction incorrectly drops some delete markers (#1422)
- - Fix(replay) - Update head for LSM entries also (#1456)
- - Fix(Backup/Restore): Keep all versions (#1462)
- - Fix build on Plan 9 (#1451)
-
-## [1.6.1] - 2020-03-26
-
-### New APIs
- - Badger.DB
- - NewWriteBatchAt (#948)
- - Badger.Options
- - WithEventLogging (#1035)
- - WithVerifyValueChecksum (#1052)
- - WithBypassLockGuard (#1243)
-
-### Features
- - Support checksum verification for values read from vlog (#1052)
- - Add EventLogging option (#1035)
- - Support WriteBatch API in managed mode (#948)
- - Add support for watching nil prefix in Subscribe API (#1246)
-
-### Fixed
- - Initialize vlog before starting compactions in db.Open (#1226)
- - Fix int overflow for 32bit (#1216)
- - Remove the 'this entry should've caught' log from value.go (#1170)
- - Fix merge iterator duplicates issue (#1157)
- - Fix segmentation fault in vlog.Read (header.Decode) (#1150)
- - Fix VerifyValueChecksum checks (#1138)
- - Fix windows dataloss issue (#1134)
- - Fix request increment ref bug (#1121)
- - Limit manifest's change set size (#1119)
- - Fix deadlock in discard stats (#1070)
- - Acquire lock before unmapping vlog files (#1050)
- - Set move key's expiresAt for keys with TTL (#1006)
- - Fix deadlock when flushing discard stats. (#976)
- - Fix table.Smallest/Biggest and iterator Prefix bug (#997)
- - Fix boundaries on GC batch size (#987)
- - Lock log file before munmap (#949)
- - VlogSize to store correct directory name to expvar.Map (#956)
- - Fix transaction too big issue in restore (#957)
- - Fix race condition in updateDiscardStats (#973)
- - Cast results of len to uint32 to fix compilation in i386 arch. (#961)
- - Drop discard stats if we can't unmarshal it (#936)
- - Open all vlog files in RDWR mode (#923)
- - Fix race condition in flushDiscardStats function (#921)
- - Ensure rewrite in vlog is within transactional limits (#911)
- - Fix prefix bug in key iterator and allow all versions (#950)
- - Fix discard stats moved by GC bug (#929)
-
-### Performance
- - Use fastRand instead of locked-rand in skiplist (#1173)
- - Fix checkOverlap in compaction (#1166)
- - Optimize createTable in stream_writer.go (#1132)
- - Add capacity to slice creation when capacity is known (#1103)
- - Introduce fast merge iterator (#1080)
- - Introduce StreamDone in Stream Writer (#1061)
- - Flush vlog buffer if it grows beyond threshold (#1067)
- - Binary search based table picker (#983)
- - Making the stream writer APIs goroutine-safe (#959)
- - Replace FarmHash with AESHash for Oracle conflicts (#952)
- - Change file picking strategy in compaction (#894)
- - Use trie for prefix matching (#851)
- - Fix busy-wait loop in Watermark (#920)
-
-
-## [1.6.0] - 2019-07-01
-
-This is a release including almost 200 commits, so expect many changes - some of them
-not backward compatible.
-
-Regarding backward compatibility in Badger versions, you might be interested on reading
-[VERSIONING.md](VERSIONING.md).
-
-_Note_: The hashes in parentheses correspond to the commits that impacted the given feature.
-
-### New APIs
-
-- badger.DB
- - DropPrefix (291295e)
- - Flatten (7e41bba)
- - KeySplits (4751ef1)
- - MaxBatchCount (b65e2a3)
- - MaxBatchSize (b65e2a3)
- - PrintKeyValueHistogram (fd59907)
- - Subscribe (26128a7)
- - Sync (851e462)
-
-- badger.DefaultOptions() and badger.LSMOnlyOptions() (91ce687)
- - badger.Options.WithX methods
-
-- badger.Entry (e9447c9)
- - NewEntry
- - WithMeta
- - WithDiscard
- - WithTTL
-
-- badger.Item
- - KeySize (fd59907)
- - ValueSize (5242a99)
-
-- badger.IteratorOptions
- - PickTable (7d46029, 49a49e3)
- - Prefix (7d46029)
-
-- badger.Logger (fbb2778)
-
-- badger.Options
- - CompactL0OnClose (7e41bba)
- - Logger (3f66663)
- - LogRotatesToFlush (2237832)
-
-- badger.Stream (14cbd89, 3258067)
-- badger.StreamWriter (7116e16)
-- badger.TableInfo.KeyCount (fd59907)
-- badger.TableManifest (2017987)
-- badger.Tx.NewKeyIterator (49a49e3)
-- badger.WriteBatch (6daccf9, 7e78e80)
-
-### Modified APIs
-
-#### Breaking changes:
-
-- badger.DefaultOptions and badger.LSMOnlyOptions are now functions rather than variables (91ce687)
-- badger.Item.Value now receives a function that returns an error (439fd46)
-- badger.Txn.Commit doesn't receive any params now (6daccf9)
-- badger.DB.Tables now receives a boolean (76b5341)
-
-#### Not breaking changes:
-
-- badger.LSMOptions changed values (799c33f)
-- badger.DB.NewIterator now allows multiple iterators per RO txn (41d9656)
-- badger.Options.TableLoadingMode's new default is options.MemoryMap (6b97bac)
-
-### Removed APIs
-
-- badger.ManagedDB (d22c0e8)
-- badger.Options.DoNotCompact (7e41bba)
-- badger.Txn.SetWithX (e9447c9)
-
-### Tools:
-
-- badger bank disect (13db058)
-- badger bank test (13db058) --mmap (03870e3)
-- badger fill (7e41bba)
-- badger flatten (7e41bba)
-- badger info --histogram (fd59907) --history --lookup --show-keys --show-meta --with-prefix (09e9b63) --show-internal (fb2eed9)
-- badger benchmark read (239041e)
-- badger benchmark write (6d3b67d)
-
-## [1.5.5] - 2019-06-20
-
-* Introduce support for Go Modules
-
-## [1.5.3] - 2018-07-11
-Bug Fixes:
-* Fix a panic caused due to item.vptr not copying over vs.Value, when looking
- for a move key.
-
-## [1.5.2] - 2018-06-19
-Bug Fixes:
-* Fix the way move key gets generated.
-* If a transaction has unclosed, or multiple iterators running simultaneously,
- throw a panic. Every iterator must be properly closed. At any point in time,
- only one iterator per transaction can be running. This is to avoid bugs in a
- transaction data structure which is thread unsafe.
-
-* *Warning: This change might cause panics in user code. Fix is to properly
- close your iterators, and only have one running at a time per transaction.*
-
-## [1.5.1] - 2018-06-04
-Bug Fixes:
-* Fix for infinite yieldItemValue recursion. #503
-* Fix recursive addition of `badgerMove` prefix. https://github.com/dgraph-io/badger/commit/2e3a32f0ccac3066fb4206b28deb39c210c5266f
-* Use file size based window size for sampling, instead of fixing it to 10MB. #501
-
-Cleanup:
-* Clarify comments and documentation.
-* Move badger tool one directory level up.
-
-## [1.5.0] - 2018-05-08
-* Introduce `NumVersionsToKeep` option. This option is used to discard many
- versions of the same key, which saves space.
-* Add a new `SetWithDiscard` method, which would indicate that all the older
- versions of the key are now invalid. Those versions would be discarded during
- compactions.
-* Value log GC moves are now bound to another keyspace to ensure latest versions
- of data are always at the top in LSM tree.
-* Introduce `ValueLogMaxEntries` to restrict the number of key-value pairs per
- value log file. This helps bound the time it takes to garbage collect one
- file.
-
-## [1.4.0] - 2018-05-04
-* Make mmap-ing of value log optional.
-* Run GC multiple times, based on recorded discard statistics.
-* Add MergeOperator.
-* Force compact L0 on clsoe (#439).
-* Add truncate option to warn about data loss (#452).
-* Discard key versions during compaction (#464).
-* Introduce new `LSMOnlyOptions`, to make Badger act like a typical LSM based DB.
-
-Bug fix:
-* (Temporary) Check max version across all tables in Get (removed in next
- release).
-* Update commit and read ts while loading from backup.
-* Ensure all transaction entries are part of the same value log file.
-* On commit, run unlock callbacks before doing writes (#413).
-* Wait for goroutines to finish before closing iterators (#421).
-
-## [1.3.0] - 2017-12-12
-* Add `DB.NextSequence()` method to generate monotonically increasing integer
- sequences.
-* Add `DB.Size()` method to return the size of LSM and value log files.
-* Tweaked mmap code to make Windows 32-bit builds work.
-* Tweaked build tags on some files to make iOS builds work.
-* Fix `DB.PurgeOlderVersions()` to not violate some constraints.
-
-## [1.2.0] - 2017-11-30
-* Expose a `Txn.SetEntry()` method to allow setting the key-value pair
- and all the metadata at the same time.
-
-## [1.1.1] - 2017-11-28
-* Fix bug where txn.Get was returing key deleted in same transaction.
-* Fix race condition while decrementing reference in oracle.
-* Update doneCommit in the callback for CommitAsync.
-* Iterator see writes of current txn.
-
-## [1.1.0] - 2017-11-13
-* Create Badger directory if it does not exist when `badger.Open` is called.
-* Added `Item.ValueCopy()` to avoid deadlocks in long-running iterations
-* Fixed 64-bit alignment issues to make Badger run on Arm v7
-
-## [1.0.1] - 2017-11-06
-* Fix an uint16 overflow when resizing key slice
-
-[Unreleased]: https://github.com/dgraph-io/badger/compare/v1.6.2...HEAD
-[1.6.2]: https://github.com/dgraph-io/badger/compare/v1.6.1...v1.6.2
-[1.6.1]: https://github.com/dgraph-io/badger/compare/v1.6.0...v1.6.1
-[1.6.0]: https://github.com/dgraph-io/badger/compare/v1.5.5...v1.6.0
-[1.5.5]: https://github.com/dgraph-io/badger/compare/v1.5.3...v1.5.5
-[1.5.3]: https://github.com/dgraph-io/badger/compare/v1.5.2...v1.5.3
-[1.5.2]: https://github.com/dgraph-io/badger/compare/v1.5.1...v1.5.2
-[1.5.1]: https://github.com/dgraph-io/badger/compare/v1.5.0...v1.5.1
-[1.5.0]: https://github.com/dgraph-io/badger/compare/v1.4.0...v1.5.0
-[1.4.0]: https://github.com/dgraph-io/badger/compare/v1.3.0...v1.4.0
-[1.3.0]: https://github.com/dgraph-io/badger/compare/v1.2.0...v1.3.0
-[1.2.0]: https://github.com/dgraph-io/badger/compare/v1.1.1...v1.2.0
-[1.1.1]: https://github.com/dgraph-io/badger/compare/v1.1.0...v1.1.1
-[1.1.0]: https://github.com/dgraph-io/badger/compare/v1.0.1...v1.1.0
-[1.0.1]: https://github.com/dgraph-io/badger/compare/v1.0.0...v1.0.1
diff --git a/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md b/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md
deleted file mode 100644
index bf7bbc29..00000000
--- a/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Code of Conduct
-
-Our Code of Conduct can be found here:
-
-https://dgraph.io/conduct
diff --git a/vendor/github.com/dgraph-io/badger/LICENSE b/vendor/github.com/dgraph-io/badger/LICENSE
deleted file mode 100644
index d9a10c0d..00000000
--- a/vendor/github.com/dgraph-io/badger/LICENSE
+++ /dev/null
@@ -1,176 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
diff --git a/vendor/github.com/dgraph-io/badger/README.md b/vendor/github.com/dgraph-io/badger/README.md
deleted file mode 100644
index 535f2a0d..00000000
--- a/vendor/github.com/dgraph-io/badger/README.md
+++ /dev/null
@@ -1,898 +0,0 @@
-# BadgerDB [![GoDoc](https://godoc.org/github.com/dgraph-io/badger?status.svg)](https://godoc.org/github.com/dgraph-io/badger) [![Go Report Card](https://goreportcard.com/badge/github.com/dgraph-io/badger)](https://goreportcard.com/report/github.com/dgraph-io/badger) [![Sourcegraph](https://sourcegraph.com/github.com/dgraph-io/badger/-/badge.svg)](https://sourcegraph.com/github.com/dgraph-io/badger?badge) [![Build Status](https://teamcity.dgraph.io/guestAuth/app/rest/builds/buildType:(id:Badger_UnitTests)/statusIcon.svg)](https://teamcity.dgraph.io/viewLog.html?buildTypeId=Badger_UnitTests&buildId=lastFinished&guest=1) ![Appveyor](https://ci.appveyor.com/api/projects/status/github/dgraph-io/badger?branch=master&svg=true) [![Coverage Status](https://coveralls.io/repos/github/dgraph-io/badger/badge.svg?branch=master)](https://coveralls.io/github/dgraph-io/badger?branch=master)
-
-![Badger mascot](images/diggy-shadow.png)
-
-BadgerDB is an embeddable, persistent and fast key-value (KV) database written
-in pure Go. It is the underlying database for [Dgraph](https://dgraph.io), a
-fast, distributed graph database. It's meant to be a performant alternative to
-non-Go-based key-value stores like RocksDB.
-
-## Project Status [Jun 26, 2019]
-
-Badger is stable and is being used to serve data sets worth hundreds of
-terabytes. Badger supports concurrent ACID transactions with serializable
-snapshot isolation (SSI) guarantees. A Jepsen-style bank test runs nightly for
-8h, with `--race` flag and ensures the maintenance of transactional guarantees.
-Badger has also been tested to work with filesystem level anomalies, to ensure
-persistence and consistency.
-
-Badger v1.0 was released in Nov 2017, and the latest version that is data-compatible
-with v1.0 is v1.6.0.
-
-Badger v2.0, a new release coming up very soon will use a new storage format which won't
-be compatible with all of the v1.x. The [Changelog] is kept fairly up-to-date.
-
-For more details on our version naming schema please read [Choosing a version](#choosing-a-version).
-
-[Changelog]:https://github.com/dgraph-io/badger/blob/master/CHANGELOG.md
-
-## Table of Contents
- * [Getting Started](#getting-started)
- + [Installing](#installing)
- - [Choosing a version](#choosing-a-version)
- + [Opening a database](#opening-a-database)
- + [Transactions](#transactions)
- - [Read-only transactions](#read-only-transactions)
- - [Read-write transactions](#read-write-transactions)
- - [Managing transactions manually](#managing-transactions-manually)
- + [Using key/value pairs](#using-keyvalue-pairs)
- + [Monotonically increasing integers](#monotonically-increasing-integers)
- * [Merge Operations](#merge-operations)
- + [Setting Time To Live(TTL) and User Metadata on Keys](#setting-time-to-livettl-and-user-metadata-on-keys)
- + [Iterating over keys](#iterating-over-keys)
- - [Prefix scans](#prefix-scans)
- - [Key-only iteration](#key-only-iteration)
- + [Stream](#stream)
- + [Garbage Collection](#garbage-collection)
- + [Database backup](#database-backup)
- + [Memory usage](#memory-usage)
- + [Statistics](#statistics)
- * [Resources](#resources)
- + [Blog Posts](#blog-posts)
- * [Contact](#contact)
- * [Design](#design)
- + [Comparisons](#comparisons)
- + [Benchmarks](#benchmarks)
- * [Other Projects Using Badger](#other-projects-using-badger)
- * [Frequently Asked Questions](#frequently-asked-questions)
-
-## Getting Started
-
-### Installing
-To start using Badger, install Go 1.11 or above and run `go get`:
-
-```sh
-$ go get github.com/dgraph-io/badger/...
-```
-
-This will retrieve the library and install the `badger` command line
-utility into your `$GOBIN` path.
-
-#### Choosing a version
-
-BadgerDB is a pretty special package from the point of view that the most important change we can
-make to it is not on its API but rather on how data is stored on disk.
-
-This is why we follow a version naming schema that differs from Semantic Versioning.
-
-- New major versions are released when the data format on disk changes in an incompatible way.
-- New minor versions are released whenever the API changes but data compatibility is maintained.
- Note that the changes on the API could be backward-incompatible - unlike Semantic Versioning.
-- New patch versions are released when there's no changes to the data format nor the API.
-
-Following these rules:
-
-- v1.5.0 and v1.6.0 can be used on top of the same files without any concerns, as their major
- version is the same, therefore the data format on disk is compatible.
-- v1.6.0 and v2.0.0 are data incompatible as their major version implies, so files created with
- v1.6.0 will need to be converted into the new format before they can be used by v2.0.0.
-
-For a longer explanation on the reasons behind using a new versioning naming schema, you can read
-[VERSIONING.md](VERSIONING.md).
-
-### Opening a database
-The top-level object in Badger is a `DB`. It represents multiple files on disk
-in specific directories, which contain the data for a single database.
-
-To open your database, use the `badger.Open()` function, with the appropriate
-options. The `Dir` and `ValueDir` options are mandatory and must be
-specified by the client. They can be set to the same value to simplify things.
-
-```go
-package main
-
-import (
- "log"
-
- badger "github.com/dgraph-io/badger"
-)
-
-func main() {
- // Open the Badger database located in the /tmp/badger directory.
- // It will be created if it doesn't exist.
- db, err := badger.Open(badger.DefaultOptions("/tmp/badger"))
- if err != nil {
- log.Fatal(err)
- }
- defer db.Close()
-  // Your code here…
-}
-```
-
-Please note that Badger obtains a lock on the directories so multiple processes
-cannot open the same database at the same time.
-
-### Transactions
-
-#### Read-only transactions
-To start a read-only transaction, you can use the `DB.View()` method:
-
-```go
-err := db.View(func(txn *badger.Txn) error {
-  // Your code here…
- Â return nil
-})
-```
-
-You cannot perform any writes or deletes within this transaction. Badger
-ensures that you get a consistent view of the database within this closure. Any
-writes that happen elsewhere after the transaction has started, will not be
-seen by calls made within the closure.
-
-#### Read-write transactions
-To start a read-write transaction, you can use the `DB.Update()` method:
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
-  // Your code here…
- Â return nil
-})
-```
-
-All database operations are allowed inside a read-write transaction.
-
-Always check the returned error value. If you return an error
-within your closure it will be passed through.
-
-An `ErrConflict` error will be reported in case of a conflict. Depending on the state
-of your application, you have the option to retry the operation if you receive
-this error.
-
-An `ErrTxnTooBig` will be reported in case the number of pending writes/deletes in
-the transaction exceeds a certain limit. In that case, it is best to commit the
-transaction and start a new transaction immediately. Here is an example (we are
-not checking for errors in some places for simplicity):
-
-```go
-updates := make(map[string]string)
-txn := db.NewTransaction(true)
-for k,v := range updates {
- if err := txn.Set([]byte(k),[]byte(v)); err == badger.ErrTxnTooBig {
- _ = txn.Commit()
- txn = db.NewTransaction(true)
- _ = txn.Set([]byte(k),[]byte(v))
- }
-}
-_ = txn.Commit()
-```
-
-#### Managing transactions manually
-The `DB.View()` and `DB.Update()` methods are wrappers around the
-`DB.NewTransaction()` and `Txn.Commit()` methods (or `Txn.Discard()` in case of
-read-only transactions). These helper methods will start the transaction,
-execute a function, and then safely discard your transaction if an error is
-returned. This is the recommended way to use Badger transactions.
-
-However, sometimes you may want to manually create and commit your
-transactions. You can use the `DB.NewTransaction()` function directly, which
-takes in a boolean argument to specify whether a read-write transaction is
-required. For read-write transactions, it is necessary to call `Txn.Commit()`
-to ensure the transaction is committed. For read-only transactions, calling
-`Txn.Discard()` is sufficient. `Txn.Commit()` also calls `Txn.Discard()`
-internally to cleanup the transaction, so just calling `Txn.Commit()` is
-sufficient for read-write transaction. However, if your code doesn’t call
-`Txn.Commit()` for some reason (for e.g it returns prematurely with an error),
-then please make sure you call `Txn.Discard()` in a `defer` block. Refer to the
-code below.
-
-```go
-// Start a writable transaction.
-txn := db.NewTransaction(true)
-defer txn.Discard()
-
-// Use the transaction...
-err := txn.Set([]byte("answer"), []byte("42"))
-if err != nil {
- return err
-}
-
-// Commit the transaction and check for error.
-if err := txn.Commit(); err != nil {
- return err
-}
-```
-
-The first argument to `DB.NewTransaction()` is a boolean stating if the transaction
-should be writable.
-
-Badger allows an optional callback to the `Txn.Commit()` method. Normally, the
-callback can be set to `nil`, and the method will return after all the writes
-have succeeded. However, if this callback is provided, the `Txn.Commit()`
-method returns as soon as it has checked for any conflicts. The actual writing
-to the disk happens asynchronously, and the callback is invoked once the
-writing has finished, or an error has occurred. This can improve the throughput
-of the application in some cases. But it also means that a transaction is not
-durable until the callback has been invoked with a `nil` error value.
-
-### Using key/value pairs
-To save a key/value pair, use the `Txn.Set()` method:
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- err := txn.Set([]byte("answer"), []byte("42"))
- return err
-})
-```
-
-Key/Value pair can also be saved by first creating `Entry`, then setting this
-`Entry` using `Txn.SetEntry()`. `Entry` also exposes methods to set properties
-on it.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := badger.NewEntry([]byte("answer"), []byte("42"))
- err := txn.SetEntry(e)
- return err
-})
-```
-
-This will set the value of the `"answer"` key to `"42"`. To retrieve this
-value, we can use the `Txn.Get()` method:
-
-```go
-err := db.View(func(txn *badger.Txn) error {
- item, err := txn.Get([]byte("answer"))
- handle(err)
-
- var valNot, valCopy []byte
- err := item.Value(func(val []byte) error {
- // This func with val would only be called if item.Value encounters no error.
-
- // Accessing val here is valid.
- fmt.Printf("The answer is: %s\n", val)
-
- // Copying or parsing val is valid.
- valCopy = append([]byte{}, val...)
-
- // Assigning val slice to another variable is NOT OK.
- valNot = val // Do not do this.
- return nil
- })
- handle(err)
-
- // DO NOT access val here. It is the most common cause of bugs.
- fmt.Printf("NEVER do this. %s\n", valNot)
-
- // You must copy it to use it outside item.Value(...).
- fmt.Printf("The answer is: %s\n", valCopy)
-
- // Alternatively, you could also use item.ValueCopy().
- valCopy, err = item.ValueCopy(nil)
- handle(err)
- fmt.Printf("The answer is: %s\n", valCopy)
-
- return nil
-})
-```
-
-`Txn.Get()` returns `ErrKeyNotFound` if the value is not found.
-
-Please note that values returned from `Get()` are only valid while the
-transaction is open. If you need to use a value outside of the transaction
-then you must use `copy()` to copy it to another byte slice.
-
-Use the `Txn.Delete()` method to delete a key.
-
-### Monotonically increasing integers
-
-To get unique monotonically increasing integers with strong durability, you can
-use the `DB.GetSequence` method. This method returns a `Sequence` object, which
-is thread-safe and can be used concurrently via various goroutines.
-
-Badger would lease a range of integers to hand out from memory, with the
-bandwidth provided to `DB.GetSequence`. The frequency at which disk writes are
-done is determined by this lease bandwidth and the frequency of `Next`
-invocations. Setting a bandwidth too low would do more disk writes, setting it
-too high would result in wasted integers if Badger is closed or crashes.
-To avoid wasted integers, call `Release` before closing Badger.
-
-```go
-seq, err := db.GetSequence(key, 1000)
-defer seq.Release()
-for {
- num, err := seq.Next()
-}
-```
-
-### Merge Operations
-Badger provides support for ordered merge operations. You can define a func
-of type `MergeFunc` which takes in an existing value, and a value to be
-_merged_ with it. It returns a new value which is the result of the _merge_
-operation. All values are specified in byte arrays. For e.g., here is a merge
-function (`add`) which appends a `[]byte` value to an existing `[]byte` value.
-
-```Go
-// Merge function to append one byte slice to another
-func add(originalValue, newValue []byte) []byte {
- return append(originalValue, newValue...)
-}
-```
-
-This function can then be passed to the `DB.GetMergeOperator()` method, along
-with a key, and a duration value. The duration specifies how often the merge
-function is run on values that have been added using the `MergeOperator.Add()`
-method.
-
-`MergeOperator.Get()` method can be used to retrieve the cumulative value of the key
-associated with the merge operation.
-
-```Go
-key := []byte("merge")
-
-m := db.GetMergeOperator(key, add, 200*time.Millisecond)
-defer m.Stop()
-
-m.Add([]byte("A"))
-m.Add([]byte("B"))
-m.Add([]byte("C"))
-
-res, _ := m.Get() // res should have value ABC encoded
-```
-
-Example: Merge operator which increments a counter
-
-```Go
-func uint64ToBytes(i uint64) []byte {
- var buf [8]byte
- binary.BigEndian.PutUint64(buf[:], i)
- return buf[:]
-}
-
-func bytesToUint64(b []byte) uint64 {
- return binary.BigEndian.Uint64(b)
-}
-
-// Merge function to add two uint64 numbers
-func add(existing, new []byte) []byte {
- return uint64ToBytes(bytesToUint64(existing) + bytesToUint64(new))
-}
-```
-It can be used as
-```Go
-key := []byte("merge")
-
-m := db.GetMergeOperator(key, add, 200*time.Millisecond)
-defer m.Stop()
-
-m.Add(uint64ToBytes(1))
-m.Add(uint64ToBytes(2))
-m.Add(uint64ToBytes(3))
-
-res, _ := m.Get() // res should have value 6 encoded
-```
-
-### Setting Time To Live(TTL) and User Metadata on Keys
-Badger allows setting an optional Time to Live (TTL) value on keys. Once the TTL has
-elapsed, the key will no longer be retrievable and will be eligible for garbage
-collection. A TTL can be set as a `time.Duration` value using the `Entry.WithTTL()`
-and `Txn.SetEntry()` API methods.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := badger.NewEntry([]byte("answer"), []byte("42")).WithTTL(time.Hour)
- err := txn.SetEntry(e)
- return err
-})
-```
-
-An optional user metadata value can be set on each key. A user metadata value
-is represented by a single byte. It can be used to set certain bits along
-with the key to aid in interpreting or decoding the key-value pair. User
-metadata can be set using `Entry.WithMeta()` and `Txn.SetEntry()` API methods.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := badger.NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1))
- err := txn.SetEntry(e)
- return err
-})
-```
-
-`Entry` APIs can be used to add the user metadata and TTL for same key. This `Entry`
-then can be set using `Txn.SetEntry()`.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := badger.NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1)).WithTTL(time.Hour)
- err := txn.SetEntry(e)
- return err
-})
-```
-
-### Iterating over keys
-To iterate over keys, we can use an `Iterator`, which can be obtained using the
-`Txn.NewIterator()` method. Iteration happens in byte-wise lexicographical sorting
-order.
-
-
-```go
-err := db.View(func(txn *badger.Txn) error {
- opts := badger.DefaultIteratorOptions
- opts.PrefetchSize = 10
- it := txn.NewIterator(opts)
- defer it.Close()
- for it.Rewind(); it.Valid(); it.Next() {
- item := it.Item()
- k := item.Key()
- err := item.Value(func(v []byte) error {
- fmt.Printf("key=%s, value=%s\n", k, v)
- return nil
- })
- if err != nil {
- return err
- }
- }
- return nil
-})
-```
-
-The iterator allows you to move to a specific point in the list of keys and move
-forward or backward through the keys one at a time.
-
-By default, Badger prefetches the values of the next 100 items. You can adjust
-that with the `IteratorOptions.PrefetchSize` field. However, setting it to
-a value higher than `GOMAXPROCS` (which we recommend to be 128 or higher)
-shouldn’t give any additional benefits. You can also turn off the fetching of
-values altogether. See section below on key-only iteration.
-
-#### Prefix scans
-To iterate over a key prefix, you can combine `Seek()` and `ValidForPrefix()`:
-
-```go
-db.View(func(txn *badger.Txn) error {
- it := txn.NewIterator(badger.DefaultIteratorOptions)
- defer it.Close()
- prefix := []byte("1234")
- for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
- item := it.Item()
- k := item.Key()
- err := item.Value(func(v []byte) error {
- fmt.Printf("key=%s, value=%s\n", k, v)
- return nil
- })
- if err != nil {
- return err
- }
- }
- return nil
-})
-```
-
-#### Key-only iteration
-Badger supports a unique mode of iteration called _key-only_ iteration. It is
-several order of magnitudes faster than regular iteration, because it involves
-access to the LSM-tree only, which is usually resident entirely in RAM. To
-enable key-only iteration, you need to set the `IteratorOptions.PrefetchValues`
-field to `false`. This can also be used to do sparse reads for selected keys
-during an iteration, by calling `item.Value()` only when required.
-
-```go
-err := db.View(func(txn *badger.Txn) error {
- opts := badger.DefaultIteratorOptions
- opts.PrefetchValues = false
- it := txn.NewIterator(opts)
- defer it.Close()
- for it.Rewind(); it.Valid(); it.Next() {
- item := it.Item()
- k := item.Key()
- fmt.Printf("key=%s\n", k)
- }
- return nil
-})
-```
-
-### Stream
-Badger provides a Stream framework, which concurrently iterates over all or a
-portion of the DB, converting data into custom key-values, and streams it out
-serially to be sent over network, written to disk, or even written back to
-Badger. This is a lot faster way to iterate over Badger than using a single
-Iterator. Stream supports Badger in both managed and normal mode.
-
-Stream uses the natural boundaries created by SSTables within the LSM tree, to
-quickly generate key ranges. Each goroutine then picks a range and runs an
-iterator to iterate over it. Each iterator iterates over all versions of values
-and is created from the same transaction, thus working over a snapshot of the
-DB. Every time a new key is encountered, it calls `ChooseKey(item)`, followed
-by `KeyToList(key, itr)`. This allows a user to select or reject that key, and
-if selected, convert the value versions into custom key-values. The goroutine
-batches up 4MB worth of key-values, before sending it over to a channel.
-Another goroutine further batches up data from this channel using *smart
-batching* algorithm and calls `Send` serially.
-
-This framework is designed for high throughput key-value iteration, spreading
-the work of iteration across many goroutines. `DB.Backup` uses this framework to
-provide full and incremental backups quickly. Dgraph is a heavy user of this
-framework. In fact, this framework was developed and used within Dgraph, before
-getting ported over to Badger.
-
-```go
-stream := db.NewStream()
-// db.NewStreamAt(readTs) for managed mode.
-
-// -- Optional settings
-stream.NumGo = 16 // Set number of goroutines to use for iteration.
-stream.Prefix = []byte("some-prefix") // Leave nil for iteration over the whole DB.
-stream.LogPrefix = "Badger.Streaming" // For identifying stream logs. Outputs to Logger.
-
-// ChooseKey is called concurrently for every key. If left nil, assumes true by default.
-stream.ChooseKey = func(item *badger.Item) bool {
- return bytes.HasSuffix(item.Key(), []byte("er"))
-}
-
-// KeyToList is called concurrently for chosen keys. This can be used to convert
-// Badger data into custom key-values. If nil, uses stream.ToList, a default
-// implementation, which picks all valid key-values.
-stream.KeyToList = nil
-
-// -- End of optional settings.
-
-// Send is called serially, while Stream.Orchestrate is running.
-stream.Send = func(list *pb.KVList) error {
- return proto.MarshalText(w, list) // Write to w.
-}
-
-// Run the stream
-if err := stream.Orchestrate(context.Background()); err != nil {
- return err
-}
-// Done.
-```
-
-### Garbage Collection
-Badger values need to be garbage collected, because of two reasons:
-
-* Badger keeps values separately from the LSM tree. This means that the compaction operations
-that clean up the LSM tree do not touch the values at all. Values need to be cleaned up
-separately.
-
-* Concurrent read/write transactions could leave behind multiple values for a single key, because they
-are stored with different versions. These could accumulate, and take up unneeded space beyond the
-time these older versions are needed.
-
-Badger relies on the client to perform garbage collection at a time of their choosing. It provides
-the following method, which can be invoked at an appropriate time:
-
-* `DB.RunValueLogGC()`: This method is designed to do garbage collection while
- Badger is online. Along with randomly picking a file, it uses statistics generated by the
- LSM-tree compactions to pick files that are likely to lead to maximum space
- reclamation. It is recommended to be called during periods of low activity in
- your system, or periodically. One call would only result in removal of at max
- one log file. As an optimization, you could also immediately re-run it whenever
- it returns nil error (indicating a successful value log GC), as shown below.
-
- ```go
- ticker := time.NewTicker(5 * time.Minute)
- defer ticker.Stop()
- for range ticker.C {
- again:
- err := db.RunValueLogGC(0.7)
- if err == nil {
- goto again
- }
- }
- ```
-
-* `DB.PurgeOlderVersions()`: This method is **DEPRECATED** since v1.5.0. Now, Badger's LSM tree automatically discards older/invalid versions of keys.
-
-**Note: The RunValueLogGC method would not garbage collect the latest value log.**
-
-### Database backup
-There are two public API methods `DB.Backup()` and `DB.Load()` which can be
-used to do online backups and restores. Badger v0.9 provides a CLI tool
-`badger`, which can do offline backup/restore. Make sure you have `$GOPATH/bin`
-in your PATH to use this tool.
-
-The command below will create a version-agnostic backup of the database, to a
-file `badger.bak` in the current working directory
-
-```
-badger backup --dir
-```
-
-To restore `badger.bak` in the current working directory to a new database:
-
-```
-badger restore --dir
-```
-
-See `badger --help` for more details.
-
-If you have a Badger database that was created using v0.8 (or below), you can
-use the `badger_backup` tool provided in v0.8.1, and then restore it using the
-command above to upgrade your database to work with the latest version.
-
-```
-badger_backup --dir --backup-file badger.bak
-```
-
-We recommend all users to use the `Backup` and `Restore` APIs and tools. However,
-Badger is also rsync-friendly because all files are immutable, barring the
-latest value log which is append-only. So, rsync can be used as rudimentary way
-to perform a backup. In the following script, we repeat rsync to ensure that the
-LSM tree remains consistent with the MANIFEST file while doing a full backup.
-
-```
-#!/bin/bash
-set -o history
-set -o histexpand
-# Makes a complete copy of a Badger database directory.
-# Repeat rsync if the MANIFEST and SSTables are updated.
-rsync -avz --delete db/ dst
-while !! | grep -q "(MANIFEST\|\.sst)$"; do :; done
-```
-
-### Memory usage
-Badger's memory usage can be managed by tweaking several options available in
-the `Options` struct that is passed in when opening the database using
-`DB.Open`.
-
-- `Options.ValueLogLoadingMode` can be set to `options.FileIO` (instead of the
- default `options.MemoryMap`) to avoid memory-mapping log files. This can be
- useful in environments with low RAM.
-- Number of memtables (`Options.NumMemtables`)
- - If you modify `Options.NumMemtables`, also adjust `Options.NumLevelZeroTables` and
- `Options.NumLevelZeroTablesStall` accordingly.
-- Number of concurrent compactions (`Options.NumCompactors`)
-- Mode in which LSM tree is loaded (`Options.TableLoadingMode`)
-- Size of table (`Options.MaxTableSize`)
-- Size of value log file (`Options.ValueLogFileSize`)
-
-If you want to decrease the memory usage of Badger instance, tweak these
-options (ideally one at a time) until you achieve the desired
-memory usage.
-
-### Statistics
-Badger records metrics using the [expvar] package, which is included in the Go
-standard library. All the metrics are documented in [y/metrics.go][metrics]
-file.
-
-`expvar` package adds a handler in to the default HTTP server (which has to be
-started explicitly), and serves up the metrics at the `/debug/vars` endpoint.
-These metrics can then be collected by a system like [Prometheus], to get
-better visibility into what Badger is doing.
-
-[expvar]: https://golang.org/pkg/expvar/
-[metrics]: https://github.com/dgraph-io/badger/blob/master/y/metrics.go
-[Prometheus]: https://prometheus.io/
-
-## Resources
-
-### Blog Posts
-1. [Introducing Badger: A fast key-value store written natively in
-Go](https://open.dgraph.io/post/badger/)
-2. [Make Badger crash resilient with ALICE](https://blog.dgraph.io/post/alice/)
-3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://blog.dgraph.io/post/badger-lmdb-boltdb/)
-4. [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/)
-
-## Design
-Badger was written with these design goals in mind:
-
-- Write a key-value database in pure Go.
-- Use latest research to build the fastest KV database for data sets spanning terabytes.
-- Optimize for SSDs.
-
-Badger’s design is based on a paper titled _[WiscKey: Separating Keys from
-Values in SSD-conscious Storage][wisckey]_.
-
-[wisckey]: https://www.usenix.org/system/files/conference/fast16/fast16-papers-lu.pdf
-
-### Comparisons
-| Feature | Badger | RocksDB | BoltDB |
-| ------- | ------ | ------- | ------ |
-| Design | LSM tree with value log | LSM tree only | B+ tree |
-| High Read throughput | Yes | No | Yes |
-| High Write throughput | Yes | Yes | No |
-| Designed for SSDs | Yes (with latest research 1) | Not specifically 2 | No |
-| Embeddable | Yes | Yes | Yes |
-| Sorted KV access | Yes | Yes | Yes |
-| Pure Go (no Cgo) | Yes | No | Yes |
-| Transactions | Yes, ACID, concurrent with SSI3 | Yes (but non-ACID) | Yes, ACID |
-| Snapshots | Yes | Yes | Yes |
-| TTL support | Yes | Yes | No |
-| 3D access (key-value-version) | Yes4 | No | No |
-
-1 The [WISCKEY paper][wisckey] (on which Badger is based) saw big
-wins with separating values from keys, significantly reducing the write
-amplification compared to a typical LSM tree.
-
-2 RocksDB is an SSD optimized version of LevelDB, which was designed specifically for rotating disks.
-As such RocksDB's design isn't aimed at SSDs.
-
-3 SSI: Serializable Snapshot Isolation. For more details, see the blog post [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/)
-
-4 Badger provides direct access to value versions via its Iterator API.
-Users can also specify how many versions to keep per key via Options.
-
-### Benchmarks
-We have run comprehensive benchmarks against RocksDB, Bolt and LMDB. The
-benchmarking code, and the detailed logs for the benchmarks can be found in the
-[badger-bench] repo. More explanation, including graphs can be found the blog posts (linked
-above).
-
-[badger-bench]: https://github.com/dgraph-io/badger-bench
-
-## Other Projects Using Badger
-Below is a list of known projects that use Badger:
-
-* [0-stor](https://github.com/zero-os/0-stor) - Single device object store.
-* [Dgraph](https://github.com/dgraph-io/dgraph) - Distributed graph database.
-* [Jaeger](https://github.com/jaegertracing/jaeger) - Distributed tracing platform.
-* [TalariaDB](https://github.com/grab/talaria) - Distributed, low latency time-series database.
-* [Dispatch Protocol](https://github.com/dispatchlabs/disgo) - Blockchain protocol for distributed application data analytics.
-* [Sandglass](https://github.com/celrenheit/sandglass) - distributed, horizontally scalable, persistent, time sorted message queue.
-* [Usenet Express](https://usenetexpress.com/) - Serving over 300TB of data with Badger.
-* [go-ipfs](https://github.com/ipfs/go-ipfs) - Go client for the InterPlanetary File System (IPFS), a new hypermedia distribution protocol.
-* [gorush](https://github.com/appleboy/gorush) - A push notification server written in Go.
-* [emitter](https://github.com/emitter-io/emitter) - Scalable, low latency, distributed pub/sub broker with message storage, uses MQTT, gossip and badger.
-* [GarageMQ](https://github.com/valinurovam/garagemq) - AMQP server written in Go.
-* [RedixDB](https://alash3al.github.io/redix/) - A real-time persistent key-value store with the same redis protocol.
-* [BBVA](https://github.com/BBVA/raft-badger) - Raft backend implementation using BadgerDB for Hashicorp raft.
-* [Riot](https://github.com/go-ego/riot) - An open-source, distributed search engine.
-* [Fantom](https://github.com/Fantom-foundation/go-lachesis) - aBFT Consensus platform for distributed applications.
-* [decred](https://github.com/decred/dcrdata) - An open, progressive, and self-funding cryptocurrency with a system of community-based governance integrated into its blockchain.
-* [OpenNetSys](https://github.com/opennetsys/c3-go) - Create useful dApps in any software language.
-* [HoneyTrap](https://github.com/honeytrap/honeytrap) - An extensible and opensource system for running, monitoring and managing honeypots.
-* [Insolar](https://github.com/insolar/insolar) - Enterprise-ready blockchain platform.
-* [IoTeX](https://github.com/iotexproject/iotex-core) - The next generation of the decentralized network for IoT powered by scalability- and privacy-centric blockchains.
-* [go-sessions](https://github.com/kataras/go-sessions) - The sessions manager for Go net/http and fasthttp.
-* [Babble](https://github.com/mosaicnetworks/babble) - BFT Consensus platform for distributed applications.
-* [Tormenta](https://github.com/jpincas/tormenta) - Embedded object-persistence layer / simple JSON database for Go projects.
-* [BadgerHold](https://github.com/timshannon/badgerhold) - An embeddable NoSQL store for querying Go types built on Badger
-* [Goblero](https://github.com/didil/goblero) - Pure Go embedded persistent job queue backed by BadgerDB
-* [Surfline](https://www.surfline.com) - Serving global wave and weather forecast data with Badger.
-* [Cete](https://github.com/mosuka/cete) - Simple and highly available distributed key-value store built on Badger. Makes it easy bringing up a cluster of Badger with Raft consensus algorithm by hashicorp/raft.
-* [Volument](https://volument.com/) - A new take on website analytics backed by Badger.
-* [Sloop](https://github.com/salesforce/sloop) - Kubernetes History Visualization.
-* [KVdb](https://kvdb.io/) - Hosted key-value store and serverless platform built on top of Badger.
-* [Dkron](https://dkron.io/) - Distributed, fault tolerant job scheduling system.
-
-If you are using Badger in a project please send a pull request to add it to the list.
-
-## Frequently Asked Questions
-### My writes are getting stuck. Why?
-
-**Update: With the new `Value(func(v []byte))` API, this deadlock can no longer
-happen.**
-
-The following is true for users on Badger v1.x.
-
-This can happen if a long running iteration with `Prefetch` is set to false, but
-a `Item::Value` call is made internally in the loop. That causes Badger to
-acquire read locks over the value log files to avoid value log GC removing the
-file from underneath. As a side effect, this also blocks a new value log GC
-file from being created, when the value log file boundary is hit.
-
-Please see Github issues [#293](https://github.com/dgraph-io/badger/issues/293)
-and [#315](https://github.com/dgraph-io/badger/issues/315).
-
-There are multiple workarounds during iteration:
-
-1. Use `Item::ValueCopy` instead of `Item::Value` when retrieving value.
-1. Set `Prefetch` to true. Badger would then copy over the value and release the
- file lock immediately.
-1. When `Prefetch` is false, don't call `Item::Value` and do a pure key-only
- iteration. This might be useful if you just want to delete a lot of keys.
-1. Do the writes in a separate transaction after the reads.
-
-### My writes are really slow. Why?
-
-Are you creating a new transaction for every single key update, and waiting for
-it to `Commit` fully before creating a new one? This will lead to very low
-throughput.
-
-We have created `WriteBatch` API which provides a way to batch up
-many updates into a single transaction and `Commit` that transaction using
-callbacks to avoid blocking. This amortizes the cost of a transaction really
-well, and provides the most efficient way to do bulk writes.
-
-```go
-wb := db.NewWriteBatch()
-defer wb.Cancel()
-
-for i := 0; i < N; i++ {
- err := wb.Set(key(i), value(i), 0) // Will create txns as needed.
- handle(err)
-}
-handle(wb.Flush()) // Wait for all txns to finish.
-```
-
-Note that `WriteBatch` API does not allow any reads. For read-modify-write
-workloads, you should be using the `Transaction` API.
-
-### I don't see any disk writes. Why?
-
-If you're using Badger with `SyncWrites=false`, then your writes might not be written to value log
-and won't get synced to disk immediately. Writes to LSM tree are done inmemory first, before they
-get compacted to disk. The compaction would only happen once `MaxTableSize` has been reached. So, if
-you're doing a few writes and then checking, you might not see anything on disk. Once you `Close`
-the database, you'll see these writes on disk.
-
-### Reverse iteration doesn't give me the right results.
-
-Just like forward iteration goes to the first key which is equal or greater than the SEEK key, reverse iteration goes to the first key which is equal or lesser than the SEEK key. Therefore, SEEK key would not be part of the results. You can typically add a `0xff` byte as a suffix to the SEEK key to include it in the results. See the following issues: [#436](https://github.com/dgraph-io/badger/issues/436) and [#347](https://github.com/dgraph-io/badger/issues/347).
-
-### Which instances should I use for Badger?
-
-We recommend using instances which provide local SSD storage, without any limit
-on the maximum IOPS. In AWS, these are storage optimized instances like i3. They
-provide local SSDs which clock 100K IOPS over 4KB blocks easily.
-
-### I'm getting a closed channel error. Why?
-
-```
-panic: close of closed channel
-panic: send on closed channel
-```
-
-If you're seeing panics like above, this would be because you're operating on a closed DB. This can happen, if you call `Close()` before sending a write, or multiple times. You should ensure that you only call `Close()` once, and all your read/write operations finish before closing.
-
-### Are there any Go specific settings that I should use?
-
-We *highly* recommend setting a high number for `GOMAXPROCS`, which allows Go to
-observe the full IOPS throughput provided by modern SSDs. In Dgraph, we have set
-it to 128. For more details, [see this
-thread](https://groups.google.com/d/topic/golang-nuts/jPb_h3TvlKE/discussion).
-
-### Are there any Linux specific settings that I should use?
-
-We recommend setting `max file descriptors` to a high number depending upon the expected size of
-your data. On Linux and Mac, you can check the file descriptor limit with `ulimit -n -H` for the
-hard limit and `ulimit -n -S` for the soft limit. A soft limit of `65535` is a good lower bound.
-You can adjust the limit as needed.
-
-### I see "manifest has unsupported version: X (we support Y)" error.
-
-This error means you have a badger directory which was created by an older version of badger and
-you're trying to open in a newer version of badger. The underlying data format can change across
-badger versions and users will have to migrate their data directory.
-Badger data can be migrated from version X of badger to version Y of badger by following the steps
-listed below.
-Assume you were on badger v1.6.0 and you wish to migrate to v2.0.0 version.
-1. Install badger version v1.6.0
- - `cd $GOPATH/src/github.com/dgraph-io/badger`
- - `git checkout v1.6.0`
- - `cd badger && go install`
-
- This should install the old badger binary in your $GOBIN.
-2. Create Backup
- - `badger backup --dir path/to/badger/directory -f badger.backup`
-3. Install badger version v2.0.0
- - `cd $GOPATH/src/github.com/dgraph-io/badger`
- - `git checkout v2.0.0`
- - `cd badger && go install`
-
- This should install new badger binary in your $GOBIN
-4. Install badger version v2.0.0
- - `badger restore --dir path/to/new/badger/directory -f badger.backup`
-
- This will create a new directory on `path/to/new/badger/directory` and add badger data in
- newer format to it.
-
-NOTE - The above steps shouldn't cause any data loss but please ensure the new data is valid before
-deleting the old badger directory.
-## Contact
-- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions.
-- Please use [Github issue tracker](https://github.com/dgraph-io/badger/issues) for filing bugs or feature requests.
-- Join [![Slack Status](http://slack.dgraph.io/badge.svg)](http://slack.dgraph.io).
-- Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs).
-
diff --git a/vendor/github.com/dgraph-io/badger/VERSIONING.md b/vendor/github.com/dgraph-io/badger/VERSIONING.md
deleted file mode 100644
index a890a36f..00000000
--- a/vendor/github.com/dgraph-io/badger/VERSIONING.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# Serialization Versioning: Semantic Versioning for databases
-
-Semantic Versioning, commonly known as SemVer, is a great idea that has been very widely adopted as
-a way to decide how to name software versions. The whole concept is very well summarized on
-semver.org with the following lines:
-
-> Given a version number MAJOR.MINOR.PATCH, increment the:
->
-> 1. MAJOR version when you make incompatible API changes,
-> 2. MINOR version when you add functionality in a backwards-compatible manner, and
-> 3. PATCH version when you make backwards-compatible bug fixes.
->
-> Additional labels for pre-release and build metadata are available as extensions to the
-> MAJOR.MINOR.PATCH format.
-
-Unfortunately, API changes are not the most important changes for libraries that serialize data for
-later consumption. For these libraries, such as BadgerDB, changes to the API are much easier to
-handle than change to the data format used to store data on disk.
-
-## Serialization Version specification
-
-Serialization Versioning, like Semantic Versioning, uses 3 numbers and also calls them
-MAJOR.MINOR.PATCH, but the semantics of the numbers are slightly modified:
-
-Given a version number MAJOR.MINOR.PATCH, increment the:
-
-- MAJOR version when you make changes that require a transformation of the dataset before it can be
-used again.
-- MINOR version when old datasets are still readable but the API might have changed in
-backwards-compatible or incompatible ways.
-- PATCH version when you make backwards-compatible bug fixes.
-
-Additional labels for pre-release and build metadata are available as extensions to the
-MAJOR.MINOR.PATCH format.
-
-Following this naming strategy, migration from v1.x to v2.x requires a migration strategy for your
-existing dataset, and as such has to be carefully planned. Migrations in between different minor
-versions (e.g. v1.5.x and v1.6.x) might break your build, as the API *might* have changed, but once
-your code compiles there's no need for any data migration. Lastly, changes in between two different
-patch versions should never break your build or dataset.
-
-For more background on our decision to adopt Serialization Versioning, read the blog post
-[Semantic Versioning, Go Modules, and Databases][blog] and the original proposal on
-[this comment on Dgraph's Discuss forum][discuss].
-
-[blog]: https://blog.dgraph.io/post/serialization-versioning/
-[discuss]: https://discuss.dgraph.io/t/go-modules-on-badger-and-dgraph/4662/7
\ No newline at end of file
diff --git a/vendor/github.com/dgraph-io/badger/appveyor.yml b/vendor/github.com/dgraph-io/badger/appveyor.yml
deleted file mode 100644
index 36853e9d..00000000
--- a/vendor/github.com/dgraph-io/badger/appveyor.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-# version format
-version: "{build}"
-
-# Operating system (build VM template)
-os: Windows Server 2012 R2
-
-# Platform.
-platform: x64
-
-clone_folder: c:\gopath\src\github.com\dgraph-io\badger
-
-# Environment variables
-environment:
- GOVERSION: 1.12
- GOPATH: c:\gopath
- GO111MODULE: on
-
-# scripts that run after cloning repository
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
- - go env
- - python --version
-
-# To run your custom scripts instead of automatic MSBuild
-build_script:
- # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648
- - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)'
- - cd c:\gopath\src\github.com\dgraph-io\badger
- - git branch
- - go get -t ./...
-
-# To run your custom scripts instead of automatic tests
-test_script:
- # Unit tests
- - ps: Add-AppveyorTest "Unit Tests" -Outcome Running
- - go test -v github.com/dgraph-io/badger/...
- - go test -v -vlog_mmap=false github.com/dgraph-io/badger/...
- - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
-
-notifications:
- - provider: Email
- to:
- - pawan@dgraph.io
- on_build_failure: true
- on_build_status_changed: true
-# to disable deployment
-deploy: off
-
diff --git a/vendor/github.com/dgraph-io/badger/backup.go b/vendor/github.com/dgraph-io/badger/backup.go
deleted file mode 100644
index 365668d0..00000000
--- a/vendor/github.com/dgraph-io/badger/backup.go
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bufio"
- "bytes"
- "context"
- "encoding/binary"
- "io"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/y"
- "github.com/golang/protobuf/proto"
-)
-
-// flushThreshold determines when a buffer will be flushed. When performing a
-// backup/restore, the entries will be batched up until the total size of batch
-// is more than flushThreshold or entry size (without the value size) is more
-// than the maxBatchSize.
-const flushThreshold = 100 << 20
-
-// Backup is a wrapper function over Stream.Backup to generate full and incremental backups of the
-// DB. For more control over how many goroutines are used to generate the backup, or if you wish to
-// backup only a certain range of keys, use Stream.Backup directly.
-func (db *DB) Backup(w io.Writer, since uint64) (uint64, error) {
- stream := db.NewStream()
- stream.LogPrefix = "DB.Backup"
- return stream.Backup(w, since)
-}
-
-// Backup dumps a protobuf-encoded list of all entries in the database into the
-// given writer, that are newer than the specified version. It returns a
-// timestamp indicating when the entries were dumped which can be passed into a
-// later invocation to generate an incremental dump, of entries that have been
-// added/modified since the last invocation of Stream.Backup().
-//
-// This can be used to backup the data in a database at a given point in time.
-func (stream *Stream) Backup(w io.Writer, since uint64) (uint64, error) {
- stream.KeyToList = func(key []byte, itr *Iterator) (*pb.KVList, error) {
- list := &pb.KVList{}
- for ; itr.Valid(); itr.Next() {
- item := itr.Item()
- if !bytes.Equal(item.Key(), key) {
- return list, nil
- }
- if item.Version() < since {
- // Ignore versions less than given timestamp, or skip older
- // versions of the given key.
- return list, nil
- }
-
- var valCopy []byte
- if !item.IsDeletedOrExpired() {
- // No need to copy value, if item is deleted or expired.
- var err error
- valCopy, err = item.ValueCopy(nil)
- if err != nil {
- stream.db.opt.Errorf("Key [%x, %d]. Error while fetching value [%v]\n",
- item.Key(), item.Version(), err)
- return nil, err
- }
- }
-
- // clear txn bits
- meta := item.meta &^ (bitTxn | bitFinTxn)
- kv := &pb.KV{
- Key: item.KeyCopy(nil),
- Value: valCopy,
- UserMeta: []byte{item.UserMeta()},
- Version: item.Version(),
- ExpiresAt: item.ExpiresAt(),
- Meta: []byte{meta},
- }
- list.Kv = append(list.Kv, kv)
-
- switch {
- case item.DiscardEarlierVersions():
- // If we need to discard earlier versions of this item, add a delete
- // marker just below the current version.
- list.Kv = append(list.Kv, &pb.KV{
- Key: item.KeyCopy(nil),
- Version: item.Version() - 1,
- Meta: []byte{bitDelete},
- })
- return list, nil
-
- case item.IsDeletedOrExpired():
- return list, nil
- }
- }
- return list, nil
- }
-
- var maxVersion uint64
- stream.Send = func(list *pb.KVList) error {
- for _, kv := range list.Kv {
- if maxVersion < kv.Version {
- maxVersion = kv.Version
- }
- }
- return writeTo(list, w)
- }
-
- if err := stream.Orchestrate(context.Background()); err != nil {
- return 0, err
- }
- return maxVersion, nil
-}
-
-func writeTo(list *pb.KVList, w io.Writer) error {
- if err := binary.Write(w, binary.LittleEndian, uint64(proto.Size(list))); err != nil {
- return err
- }
- buf, err := proto.Marshal(list)
- if err != nil {
- return err
- }
- _, err = w.Write(buf)
- return err
-}
-
-// KVLoader is used to write KVList objects in to badger. It can be used to restore a backup.
-type KVLoader struct {
- db *DB
- throttle *y.Throttle
- entries []*Entry
- entriesSize int64
- totalSize int64
-}
-
-// NewKVLoader returns a new instance of KVLoader.
-func (db *DB) NewKVLoader(maxPendingWrites int) *KVLoader {
- return &KVLoader{
- db: db,
- throttle: y.NewThrottle(maxPendingWrites),
- entries: make([]*Entry, 0, db.opt.maxBatchCount),
- }
-}
-
-// Set writes the key-value pair to the database.
-func (l *KVLoader) Set(kv *pb.KV) error {
- var userMeta, meta byte
- if len(kv.UserMeta) > 0 {
- userMeta = kv.UserMeta[0]
- }
- if len(kv.Meta) > 0 {
- meta = kv.Meta[0]
- }
- e := &Entry{
- Key: y.KeyWithTs(kv.Key, kv.Version),
- Value: kv.Value,
- UserMeta: userMeta,
- ExpiresAt: kv.ExpiresAt,
- meta: meta,
- }
- estimatedSize := int64(e.estimateSize(l.db.opt.ValueThreshold))
- // Flush entries if inserting the next entry would overflow the transactional limits.
- if int64(len(l.entries))+1 >= l.db.opt.maxBatchCount ||
- l.entriesSize+estimatedSize >= l.db.opt.maxBatchSize ||
- l.totalSize >= flushThreshold {
- if err := l.send(); err != nil {
- return err
- }
- }
- l.entries = append(l.entries, e)
- l.entriesSize += estimatedSize
- l.totalSize += estimatedSize + int64(len(e.Value))
- return nil
-}
-
-func (l *KVLoader) send() error {
- if err := l.throttle.Do(); err != nil {
- return err
- }
- if err := l.db.batchSetAsync(l.entries, func(err error) {
- l.throttle.Done(err)
- }); err != nil {
- return err
- }
-
- l.entries = make([]*Entry, 0, l.db.opt.maxBatchCount)
- l.entriesSize = 0
- l.totalSize = 0
- return nil
-}
-
-// Finish is meant to be called after all the key-value pairs have been loaded.
-func (l *KVLoader) Finish() error {
- if len(l.entries) > 0 {
- if err := l.send(); err != nil {
- return err
- }
- }
- return l.throttle.Finish()
-}
-
-// Load reads a protobuf-encoded list of all entries from a reader and writes
-// them to the database. This can be used to restore the database from a backup
-// made by calling DB.Backup(). If more complex logic is needed to restore a badger
-// backup, the KVLoader interface should be used instead.
-//
-// DB.Load() should be called on a database that is not running any other
-// concurrent transactions while it is running.
-func (db *DB) Load(r io.Reader, maxPendingWrites int) error {
- br := bufio.NewReaderSize(r, 16<<10)
- unmarshalBuf := make([]byte, 1<<10)
-
- ldr := db.NewKVLoader(maxPendingWrites)
- for {
- var sz uint64
- err := binary.Read(br, binary.LittleEndian, &sz)
- if err == io.EOF {
- break
- } else if err != nil {
- return err
- }
-
- if cap(unmarshalBuf) < int(sz) {
- unmarshalBuf = make([]byte, sz)
- }
-
- if _, err = io.ReadFull(br, unmarshalBuf[:sz]); err != nil {
- return err
- }
-
- list := &pb.KVList{}
- if err := proto.Unmarshal(unmarshalBuf[:sz], list); err != nil {
- return err
- }
-
- for _, kv := range list.Kv {
- if err := ldr.Set(kv); err != nil {
- return err
- }
-
- // Update nextTxnTs, memtable stores this
- // timestamp in badger head when flushed.
- if kv.Version >= db.orc.nextTxnTs {
- db.orc.nextTxnTs = kv.Version + 1
- }
- }
- }
-
- if err := ldr.Finish(); err != nil {
- return err
- }
- db.orc.txnMark.Done(db.orc.nextTxnTs - 1)
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/batch.go b/vendor/github.com/dgraph-io/badger/batch.go
deleted file mode 100644
index 76230a0b..00000000
--- a/vendor/github.com/dgraph-io/badger/batch.go
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright 2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "sync"
-
- "github.com/dgraph-io/badger/y"
-)
-
-// WriteBatch holds the necessary info to perform batched writes.
-type WriteBatch struct {
- sync.Mutex
- txn *Txn
- db *DB
- throttle *y.Throttle
- err error
- commitTs uint64
-}
-
-// NewWriteBatch creates a new WriteBatch. This provides a way to conveniently do a lot of writes,
-// batching them up as tightly as possible in a single transaction and using callbacks to avoid
-// waiting for them to commit, thus achieving good performance. This API hides away the logic of
-// creating and committing transactions. Due to the nature of SSI guaratees provided by Badger,
-// blind writes can never encounter transaction conflicts (ErrConflict).
-func (db *DB) NewWriteBatch() *WriteBatch {
- if db.opt.managedTxns {
- panic("cannot use NewWriteBatch in managed mode. Use NewWriteBatchAt instead")
- }
- return db.newWriteBatch()
-}
-
-func (db *DB) newWriteBatch() *WriteBatch {
- return &WriteBatch{
- db: db,
- txn: db.newTransaction(true, true),
- throttle: y.NewThrottle(16),
- }
-}
-
-// SetMaxPendingTxns sets a limit on maximum number of pending transactions while writing batches.
-// This function should be called before using WriteBatch. Default value of MaxPendingTxns is
-// 16 to minimise memory usage.
-func (wb *WriteBatch) SetMaxPendingTxns(max int) {
- wb.throttle = y.NewThrottle(max)
-}
-
-// Cancel function must be called if there's a chance that Flush might not get
-// called. If neither Flush or Cancel is called, the transaction oracle would
-// never get a chance to clear out the row commit timestamp map, thus causing an
-// unbounded memory consumption. Typically, you can call Cancel as a defer
-// statement right after NewWriteBatch is called.
-//
-// Note that any committed writes would still go through despite calling Cancel.
-func (wb *WriteBatch) Cancel() {
- if err := wb.throttle.Finish(); err != nil {
- wb.db.opt.Errorf("WatchBatch.Cancel error while finishing: %v", err)
- }
- wb.txn.Discard()
-}
-
-func (wb *WriteBatch) callback(err error) {
- // sync.WaitGroup is thread-safe, so it doesn't need to be run inside wb.Lock.
- defer wb.throttle.Done(err)
- if err == nil {
- return
- }
-
- wb.Lock()
- defer wb.Unlock()
- if wb.err != nil {
- return
- }
- wb.err = err
-}
-
-// SetEntry is the equivalent of Txn.SetEntry.
-func (wb *WriteBatch) SetEntry(e *Entry) error {
- wb.Lock()
- defer wb.Unlock()
-
- if err := wb.txn.SetEntry(e); err != ErrTxnTooBig {
- return err
- }
- // Txn has reached it's zenith. Commit now.
- if cerr := wb.commit(); cerr != nil {
- return cerr
- }
- // This time the error must not be ErrTxnTooBig, otherwise, we make the
- // error permanent.
- if err := wb.txn.SetEntry(e); err != nil {
- wb.err = err
- return err
- }
- return nil
-}
-
-// Set is equivalent of Txn.Set().
-func (wb *WriteBatch) Set(k, v []byte) error {
- e := &Entry{Key: k, Value: v}
- return wb.SetEntry(e)
-}
-
-// Delete is equivalent of Txn.Delete.
-func (wb *WriteBatch) Delete(k []byte) error {
- wb.Lock()
- defer wb.Unlock()
-
- if err := wb.txn.Delete(k); err != ErrTxnTooBig {
- return err
- }
- if err := wb.commit(); err != nil {
- return err
- }
- if err := wb.txn.Delete(k); err != nil {
- wb.err = err
- return err
- }
- return nil
-}
-
-// Caller to commit must hold a write lock.
-func (wb *WriteBatch) commit() error {
- if wb.err != nil {
- return wb.err
- }
- if err := wb.throttle.Do(); err != nil {
- return err
- }
- wb.txn.CommitWith(wb.callback)
- wb.txn = wb.db.newTransaction(true, true)
- wb.txn.readTs = 0 // We're not reading anything.
- wb.txn.commitTs = wb.commitTs
- return wb.err
-}
-
-// Flush must be called at the end to ensure that any pending writes get committed to Badger. Flush
-// returns any error stored by WriteBatch.
-func (wb *WriteBatch) Flush() error {
- wb.Lock()
- _ = wb.commit()
- wb.txn.Discard()
- wb.Unlock()
-
- if err := wb.throttle.Finish(); err != nil {
- return err
- }
-
- return wb.err
-}
-
-// Error returns any errors encountered so far. No commits would be run once an error is detected.
-func (wb *WriteBatch) Error() error {
- wb.Lock()
- defer wb.Unlock()
- return wb.err
-}
diff --git a/vendor/github.com/dgraph-io/badger/compaction.go b/vendor/github.com/dgraph-io/badger/compaction.go
deleted file mode 100644
index 375e40be..00000000
--- a/vendor/github.com/dgraph-io/badger/compaction.go
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "fmt"
- "log"
- "math"
- "sync"
-
- "golang.org/x/net/trace"
-
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
-)
-
-type keyRange struct {
- left []byte
- right []byte
- inf bool
-}
-
-var infRange = keyRange{inf: true}
-
-func (r keyRange) String() string {
- return fmt.Sprintf("[left=%x, right=%x, inf=%v]", r.left, r.right, r.inf)
-}
-
-func (r keyRange) equals(dst keyRange) bool {
- return bytes.Equal(r.left, dst.left) &&
- bytes.Equal(r.right, dst.right) &&
- r.inf == dst.inf
-}
-
-func (r keyRange) overlapsWith(dst keyRange) bool {
- if r.inf || dst.inf {
- return true
- }
-
- // If my left is greater than dst right, we have no overlap.
- if y.CompareKeys(r.left, dst.right) > 0 {
- return false
- }
- // If my right is less than dst left, we have no overlap.
- if y.CompareKeys(r.right, dst.left) < 0 {
- return false
- }
- // We have overlap.
- return true
-}
-
-func getKeyRange(tables ...*table.Table) keyRange {
- if len(tables) == 0 {
- return keyRange{}
- }
- smallest := tables[0].Smallest()
- biggest := tables[0].Biggest()
- for i := 1; i < len(tables); i++ {
- if y.CompareKeys(tables[i].Smallest(), smallest) < 0 {
- smallest = tables[i].Smallest()
- }
- if y.CompareKeys(tables[i].Biggest(), biggest) > 0 {
- biggest = tables[i].Biggest()
- }
- }
-
- // We pick all the versions of the smallest and the biggest key. Note that version zero would
- // be the rightmost key, considering versions are default sorted in descending order.
- return keyRange{
- left: y.KeyWithTs(y.ParseKey(smallest), math.MaxUint64),
- right: y.KeyWithTs(y.ParseKey(biggest), 0),
- }
-}
-
-type levelCompactStatus struct {
- ranges []keyRange
- delSize int64
-}
-
-func (lcs *levelCompactStatus) debug() string {
- var b bytes.Buffer
- for _, r := range lcs.ranges {
- b.WriteString(r.String())
- }
- return b.String()
-}
-
-func (lcs *levelCompactStatus) overlapsWith(dst keyRange) bool {
- for _, r := range lcs.ranges {
- if r.overlapsWith(dst) {
- return true
- }
- }
- return false
-}
-
-func (lcs *levelCompactStatus) remove(dst keyRange) bool {
- final := lcs.ranges[:0]
- var found bool
- for _, r := range lcs.ranges {
- if !r.equals(dst) {
- final = append(final, r)
- } else {
- found = true
- }
- }
- lcs.ranges = final
- return found
-}
-
-type compactStatus struct {
- sync.RWMutex
- levels []*levelCompactStatus
-}
-
-func (cs *compactStatus) toLog(tr trace.Trace) {
- cs.RLock()
- defer cs.RUnlock()
-
- tr.LazyPrintf("Compaction status:")
- for i, l := range cs.levels {
- if l.debug() == "" {
- continue
- }
- tr.LazyPrintf("[%d] %s", i, l.debug())
- }
-}
-
-func (cs *compactStatus) overlapsWith(level int, this keyRange) bool {
- cs.RLock()
- defer cs.RUnlock()
-
- thisLevel := cs.levels[level]
- return thisLevel.overlapsWith(this)
-}
-
-func (cs *compactStatus) delSize(l int) int64 {
- cs.RLock()
- defer cs.RUnlock()
- return cs.levels[l].delSize
-}
-
-type thisAndNextLevelRLocked struct{}
-
-// compareAndAdd will check whether we can run this compactDef. That it doesn't overlap with any
-// other running compaction. If it can be run, it would store this run in the compactStatus state.
-func (cs *compactStatus) compareAndAdd(_ thisAndNextLevelRLocked, cd compactDef) bool {
- cs.Lock()
- defer cs.Unlock()
-
- level := cd.thisLevel.level
-
- y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels))
- thisLevel := cs.levels[level]
- nextLevel := cs.levels[level+1]
-
- if thisLevel.overlapsWith(cd.thisRange) {
- return false
- }
- if nextLevel.overlapsWith(cd.nextRange) {
- return false
- }
- // Check whether this level really needs compaction or not. Otherwise, we'll end up
- // running parallel compactions for the same level.
- // Update: We should not be checking size here. Compaction priority already did the size checks.
- // Here we should just be executing the wish of others.
-
- thisLevel.ranges = append(thisLevel.ranges, cd.thisRange)
- nextLevel.ranges = append(nextLevel.ranges, cd.nextRange)
- thisLevel.delSize += cd.thisSize
- return true
-}
-
-func (cs *compactStatus) delete(cd compactDef) {
- cs.Lock()
- defer cs.Unlock()
-
- level := cd.thisLevel.level
- y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels))
-
- thisLevel := cs.levels[level]
- nextLevel := cs.levels[level+1]
-
- thisLevel.delSize -= cd.thisSize
- found := thisLevel.remove(cd.thisRange)
- found = nextLevel.remove(cd.nextRange) && found
-
- if !found {
- this := cd.thisRange
- next := cd.nextRange
- fmt.Printf("Looking for: [%q, %q, %v] in this level.\n", this.left, this.right, this.inf)
- fmt.Printf("This Level:\n%s\n", thisLevel.debug())
- fmt.Println()
- fmt.Printf("Looking for: [%q, %q, %v] in next level.\n", next.left, next.right, next.inf)
- fmt.Printf("Next Level:\n%s\n", nextLevel.debug())
- log.Fatal("keyRange not found")
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/db.go b/vendor/github.com/dgraph-io/badger/db.go
deleted file mode 100644
index 1d340ecd..00000000
--- a/vendor/github.com/dgraph-io/badger/db.go
+++ /dev/null
@@ -1,1560 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "expvar"
- "io"
- "math"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/options"
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/skl"
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
- humanize "github.com/dustin/go-humanize"
- "github.com/pkg/errors"
- "golang.org/x/net/trace"
-)
-
-var (
- badgerPrefix = []byte("!badger!") // Prefix for internal keys used by badger.
- head = []byte("!badger!head") // For storing value offset for replay.
- txnKey = []byte("!badger!txn") // For indicating end of entries in txn.
- badgerMove = []byte("!badger!move") // For key-value pairs which got moved during GC.
- lfDiscardStatsKey = []byte("!badger!discard") // For storing lfDiscardStats
-)
-
-type closers struct {
- updateSize *y.Closer
- compactors *y.Closer
- memtable *y.Closer
- writes *y.Closer
- valueGC *y.Closer
- pub *y.Closer
-}
-
-// DB provides the various functions required to interact with Badger.
-// DB is thread-safe.
-type DB struct {
- sync.RWMutex // Guards list of inmemory tables, not individual reads and writes.
-
- dirLockGuard *directoryLockGuard
- // nil if Dir and ValueDir are the same
- valueDirGuard *directoryLockGuard
-
- closers closers
- elog trace.EventLog
- mt *skl.Skiplist // Our latest (actively written) in-memory table
- imm []*skl.Skiplist // Add here only AFTER pushing to flushChan.
- opt Options
- manifest *manifestFile
- lc *levelsController
- vlog valueLog
- vhead valuePointer // less than or equal to a pointer to the last vlog value put into mt
- writeCh chan *request
- flushChan chan flushTask // For flushing memtables.
- closeOnce sync.Once // For closing DB only once.
-
- // Number of log rotates since the last memtable flush. We will access this field via atomic
- // functions. Since we are not going to use any 64bit atomic functions, there is no need for
- // 64 bit alignment of this struct(see #311).
- logRotates int32
-
- blockWrites int32
-
- orc *oracle
-
- pub *publisher
-}
-
-const (
- kvWriteChCapacity = 1000
-)
-
-func (db *DB) replayFunction() func(Entry, valuePointer) error {
- type txnEntry struct {
- nk []byte
- v y.ValueStruct
- }
-
- var txn []txnEntry
- var lastCommit uint64
-
- toLSM := func(nk []byte, vs y.ValueStruct) {
- for err := db.ensureRoomForWrite(); err != nil; err = db.ensureRoomForWrite() {
- db.elog.Printf("Replay: Making room for writes")
- time.Sleep(10 * time.Millisecond)
- }
- db.mt.Put(nk, vs)
- }
-
- first := true
- return func(e Entry, vp valuePointer) error { // Function for replaying.
- if first {
- db.elog.Printf("First key=%q\n", e.Key)
- }
- first = false
- db.orc.Lock()
- if db.orc.nextTxnTs < y.ParseTs(e.Key) {
- db.orc.nextTxnTs = y.ParseTs(e.Key)
- }
- db.orc.Unlock()
-
- nk := make([]byte, len(e.Key))
- copy(nk, e.Key)
- var nv []byte
- meta := e.meta
- if db.shouldWriteValueToLSM(e) {
- nv = make([]byte, len(e.Value))
- copy(nv, e.Value)
- } else {
- nv = make([]byte, vptrSize)
- vp.Encode(nv)
- meta = meta | bitValuePointer
- }
- // Update vhead. If the crash happens while replay was in progess
- // and the head is not updated, we will end up replaying all the
- // files starting from file zero, again.
- db.updateHead([]valuePointer{vp})
-
- v := y.ValueStruct{
- Value: nv,
- Meta: meta,
- UserMeta: e.UserMeta,
- ExpiresAt: e.ExpiresAt,
- }
-
- if e.meta&bitFinTxn > 0 {
- txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
- if err != nil {
- return errors.Wrapf(err, "Unable to parse txn fin: %q", e.Value)
- }
- y.AssertTrue(lastCommit == txnTs)
- y.AssertTrue(len(txn) > 0)
- // Got the end of txn. Now we can store them.
- for _, t := range txn {
- toLSM(t.nk, t.v)
- }
- txn = txn[:0]
- lastCommit = 0
-
- } else if e.meta&bitTxn > 0 {
- txnTs := y.ParseTs(nk)
- if lastCommit == 0 {
- lastCommit = txnTs
- }
- if lastCommit != txnTs {
- db.opt.Warningf("Found an incomplete txn at timestamp %d. Discarding it.\n",
- lastCommit)
- txn = txn[:0]
- lastCommit = txnTs
- }
- te := txnEntry{nk: nk, v: v}
- txn = append(txn, te)
-
- } else {
- // This entry is from a rewrite.
- toLSM(nk, v)
-
- // We shouldn't get this entry in the middle of a transaction.
- y.AssertTrue(lastCommit == 0)
- y.AssertTrue(len(txn) == 0)
- }
- return nil
- }
-}
-
-// Open returns a new DB object.
-func Open(opt Options) (db *DB, err error) {
- opt.maxBatchSize = (15 * opt.MaxTableSize) / 100
- opt.maxBatchCount = opt.maxBatchSize / int64(skl.MaxNodeSize)
-
- if opt.ValueThreshold > ValueThresholdLimit {
- return nil, ErrValueThreshold
- }
-
- if opt.ReadOnly {
- // Can't truncate if the DB is read only.
- opt.Truncate = false
- // Do not perform compaction in read only mode.
- opt.CompactL0OnClose = false
- }
-
- for _, path := range []string{opt.Dir, opt.ValueDir} {
- dirExists, err := exists(path)
- if err != nil {
- return nil, y.Wrapf(err, "Invalid Dir: %q", path)
- }
- if !dirExists {
- if opt.ReadOnly {
- return nil, errors.Errorf("Cannot find directory %q for read-only open", path)
- }
- // Try to create the directory
- err = os.Mkdir(path, 0700)
- if err != nil {
- return nil, y.Wrapf(err, "Error Creating Dir: %q", path)
- }
- }
- }
- var dirLockGuard, valueDirLockGuard *directoryLockGuard
- if !opt.BypassLockGuard {
- absDir, err := filepath.Abs(opt.Dir)
- if err != nil {
- return nil, err
- }
- absValueDir, err := filepath.Abs(opt.ValueDir)
- if err != nil {
- return nil, err
- }
- dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly)
- if err != nil {
- return nil, err
- }
- defer func() {
- if dirLockGuard != nil {
- _ = dirLockGuard.release()
- }
- }()
- if absValueDir != absDir {
- valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly)
- if err != nil {
- return nil, err
- }
- defer func() {
- if valueDirLockGuard != nil {
- _ = valueDirLockGuard.release()
- }
- }()
- }
- }
- if !(opt.ValueLogFileSize <= 2<<30 && opt.ValueLogFileSize >= 1<<20) {
- return nil, ErrValueLogSize
- }
- if !(opt.ValueLogLoadingMode == options.FileIO ||
- opt.ValueLogLoadingMode == options.MemoryMap) {
- return nil, ErrInvalidLoadingMode
- }
- manifestFile, manifest, err := openOrCreateManifestFile(opt.Dir, opt.ReadOnly)
- if err != nil {
- return nil, err
- }
- defer func() {
- if manifestFile != nil {
- _ = manifestFile.close()
- }
- }()
-
- elog := y.NoEventLog
- if opt.EventLogging {
- elog = trace.NewEventLog("Badger", "DB")
- }
-
- db = &DB{
- imm: make([]*skl.Skiplist, 0, opt.NumMemtables),
- flushChan: make(chan flushTask, opt.NumMemtables),
- writeCh: make(chan *request, kvWriteChCapacity),
- opt: opt,
- manifest: manifestFile,
- elog: elog,
- dirLockGuard: dirLockGuard,
- valueDirGuard: valueDirLockGuard,
- orc: newOracle(opt),
- pub: newPublisher(),
- }
-
- // Calculate initial size.
- db.calculateSize()
- db.closers.updateSize = y.NewCloser(1)
- go db.updateSize(db.closers.updateSize)
- db.mt = skl.NewSkiplist(arenaSize(opt))
-
- // newLevelsController potentially loads files in directory.
- if db.lc, err = newLevelsController(db, &manifest); err != nil {
- return nil, err
- }
-
- // Initialize vlog struct.
- db.vlog.init(db)
-
- if !opt.ReadOnly {
- db.closers.compactors = y.NewCloser(1)
- db.lc.startCompact(db.closers.compactors)
-
- db.closers.memtable = y.NewCloser(1)
- go func() {
- _ = db.flushMemtable(db.closers.memtable) // Need levels controller to be up.
- }()
- }
-
- headKey := y.KeyWithTs(head, math.MaxUint64)
- // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
- vs, err := db.get(headKey)
- if err != nil {
- return nil, errors.Wrap(err, "Retrieving head")
- }
- db.orc.nextTxnTs = vs.Version
- var vptr valuePointer
- if len(vs.Value) > 0 {
- vptr.Decode(vs.Value)
- }
-
- replayCloser := y.NewCloser(1)
- go db.doWrites(replayCloser)
-
- if err = db.vlog.open(db, vptr, db.replayFunction()); err != nil {
- return db, err
- }
- replayCloser.SignalAndWait() // Wait for replay to be applied first.
-
- // Let's advance nextTxnTs to one more than whatever we observed via
- // replaying the logs.
- db.orc.txnMark.Done(db.orc.nextTxnTs)
- // In normal mode, we must update readMark so older versions of keys can be removed during
- // compaction when run in offline mode via the flatten tool.
- db.orc.readMark.Done(db.orc.nextTxnTs)
- db.orc.incrementNextTs()
-
- db.writeCh = make(chan *request, kvWriteChCapacity)
- db.closers.writes = y.NewCloser(1)
- go db.doWrites(db.closers.writes)
-
- db.closers.valueGC = y.NewCloser(1)
- go db.vlog.waitOnGC(db.closers.valueGC)
-
- db.closers.pub = y.NewCloser(1)
- go db.pub.listenForUpdates(db.closers.pub)
-
- valueDirLockGuard = nil
- dirLockGuard = nil
- manifestFile = nil
- return db, nil
-}
-
-// Close closes a DB. It's crucial to call it to ensure all the pending updates make their way to
-// disk. Calling DB.Close() multiple times would still only close the DB once.
-func (db *DB) Close() error {
- var err error
- db.closeOnce.Do(func() {
- err = db.close()
- })
- return err
-}
-
-func (db *DB) close() (err error) {
- db.elog.Printf("Closing database")
-
- atomic.StoreInt32(&db.blockWrites, 1)
-
- // Stop value GC first.
- db.closers.valueGC.SignalAndWait()
-
- // Stop writes next.
- db.closers.writes.SignalAndWait()
-
- // Don't accept any more write.
- close(db.writeCh)
-
- db.closers.pub.SignalAndWait()
-
- // Now close the value log.
- if vlogErr := db.vlog.Close(); vlogErr != nil {
- err = errors.Wrap(vlogErr, "DB.Close")
- }
-
- // Make sure that block writer is done pushing stuff into memtable!
- // Otherwise, you will have a race condition: we are trying to flush memtables
- // and remove them completely, while the block / memtable writer is still
- // trying to push stuff into the memtable. This will also resolve the value
- // offset problem: as we push into memtable, we update value offsets there.
- if !db.mt.Empty() {
- db.elog.Printf("Flushing memtable")
- for {
- pushedFlushTask := func() bool {
- db.Lock()
- defer db.Unlock()
- y.AssertTrue(db.mt != nil)
- select {
- case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
- db.imm = append(db.imm, db.mt) // Flusher will attempt to remove this from s.imm.
- db.mt = nil // Will segfault if we try writing!
- db.elog.Printf("pushed to flush chan\n")
- return true
- default:
- // If we fail to push, we need to unlock and wait for a short while.
- // The flushing operation needs to update s.imm. Otherwise, we have a deadlock.
- // TODO: Think about how to do this more cleanly, maybe without any locks.
- }
- return false
- }()
- if pushedFlushTask {
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
- }
- db.stopMemoryFlush()
- db.stopCompactions()
-
- // Force Compact L0
- // We don't need to care about cstatus since no parallel compaction is running.
- if db.opt.CompactL0OnClose {
- err := db.lc.doCompact(compactionPriority{level: 0, score: 1.73})
- switch err {
- case errFillTables:
- // This error only means that there might be enough tables to do a compaction. So, we
- // should not report it to the end user to avoid confusing them.
- case nil:
- db.opt.Infof("Force compaction on level 0 done")
- default:
- db.opt.Warningf("While forcing compaction on level 0: %v", err)
- }
- }
-
- if lcErr := db.lc.close(); err == nil {
- err = errors.Wrap(lcErr, "DB.Close")
- }
- db.elog.Printf("Waiting for closer")
- db.closers.updateSize.SignalAndWait()
- db.orc.Stop()
-
- db.elog.Finish()
-
- if db.dirLockGuard != nil {
- if guardErr := db.dirLockGuard.release(); err == nil {
- err = errors.Wrap(guardErr, "DB.Close")
- }
- }
- if db.valueDirGuard != nil {
- if guardErr := db.valueDirGuard.release(); err == nil {
- err = errors.Wrap(guardErr, "DB.Close")
- }
- }
- if manifestErr := db.manifest.close(); err == nil {
- err = errors.Wrap(manifestErr, "DB.Close")
- }
-
- // Fsync directories to ensure that lock file, and any other removed files whose directory
- // we haven't specifically fsynced, are guaranteed to have their directory entry removal
- // persisted to disk.
- if syncErr := syncDir(db.opt.Dir); err == nil {
- err = errors.Wrap(syncErr, "DB.Close")
- }
- if syncErr := syncDir(db.opt.ValueDir); err == nil {
- err = errors.Wrap(syncErr, "DB.Close")
- }
-
- return err
-}
-
-const (
- lockFile = "LOCK"
-)
-
-// Sync syncs database content to disk. This function provides
-// more control to user to sync data whenever required.
-func (db *DB) Sync() error {
- return db.vlog.sync(math.MaxUint32)
-}
-
-// getMemtables returns the current memtables and get references.
-func (db *DB) getMemTables() ([]*skl.Skiplist, func()) {
- db.RLock()
- defer db.RUnlock()
-
- tables := make([]*skl.Skiplist, len(db.imm)+1)
-
- // Get mutable memtable.
- tables[0] = db.mt
- tables[0].IncrRef()
-
- // Get immutable memtables.
- last := len(db.imm) - 1
- for i := range db.imm {
- tables[i+1] = db.imm[last-i]
- tables[i+1].IncrRef()
- }
- return tables, func() {
- for _, tbl := range tables {
- tbl.DecrRef()
- }
- }
-}
-
-// get returns the value in memtable or disk for given key.
-// Note that value will include meta byte.
-//
-// IMPORTANT: We should never write an entry with an older timestamp for the same key, We need to
-// maintain this invariant to search for the latest value of a key, or else we need to search in all
-// tables and find the max version among them. To maintain this invariant, we also need to ensure
-// that all versions of a key are always present in the same table from level 1, because compaction
-// can push any table down.
-//
-// Update (Sep 22, 2018): To maintain the above invariant, and to allow keys to be moved from one
-// value log to another (while reclaiming space during value log GC), we have logically moved this
-// need to write "old versions after new versions" to the badgerMove keyspace. Thus, for normal
-// gets, we can stop going down the LSM tree once we find any version of the key (note however that
-// we will ALWAYS skip versions with ts greater than the key version). However, if that key has
-// been moved, then for the corresponding movekey, we'll look through all the levels of the tree
-// to ensure that we pick the highest version of the movekey present.
-func (db *DB) get(key []byte) (y.ValueStruct, error) {
- tables, decr := db.getMemTables() // Lock should be released.
- defer decr()
-
- var maxVs *y.ValueStruct
- var version uint64
- if bytes.HasPrefix(key, badgerMove) {
- // If we are checking badgerMove key, we should look into all the
- // levels, so we can pick up the newer versions, which might have been
- // compacted down the tree.
- maxVs = &y.ValueStruct{}
- version = y.ParseTs(key)
- }
-
- y.NumGets.Add(1)
- for i := 0; i < len(tables); i++ {
- vs := tables[i].Get(key)
- y.NumMemtableGets.Add(1)
- if vs.Meta == 0 && vs.Value == nil {
- continue
- }
- // Found a version of the key. For user keyspace, return immediately. For move keyspace,
- // continue iterating, unless we found a version == given key version.
- if maxVs == nil || vs.Version == version {
- return vs, nil
- }
- if maxVs.Version < vs.Version {
- *maxVs = vs
- }
- }
- return db.lc.get(key, maxVs)
-}
-
-// updateHead should not be called without the db.Lock() since db.vhead is used
-// by the writer go routines and memtable flushing goroutine.
-func (db *DB) updateHead(ptrs []valuePointer) {
- var ptr valuePointer
- for i := len(ptrs) - 1; i >= 0; i-- {
- p := ptrs[i]
- if !p.IsZero() {
- ptr = p
- break
- }
- }
- if ptr.IsZero() {
- return
- }
-
- y.AssertTrue(!ptr.Less(db.vhead))
- db.vhead = ptr
-}
-
-var requestPool = sync.Pool{
- New: func() interface{} {
- return new(request)
- },
-}
-
-func (db *DB) shouldWriteValueToLSM(e Entry) bool {
- return len(e.Value) < db.opt.ValueThreshold
-}
-
-func (db *DB) writeToLSM(b *request) error {
- if len(b.Ptrs) != len(b.Entries) {
- return errors.Errorf("Ptrs and Entries don't match: %+v", b)
- }
-
- for i, entry := range b.Entries {
- if entry.meta&bitFinTxn != 0 {
- continue
- }
- if db.shouldWriteValueToLSM(*entry) { // Will include deletion / tombstone case.
- db.mt.Put(entry.Key,
- y.ValueStruct{
- Value: entry.Value,
- // Ensure value pointer flag is removed. Otherwise, the value will fail
- // to be retrieved during iterator prefetch. `bitValuePointer` is only
- // known to be set in write to LSM when the entry is loaded from a backup
- // with lower ValueThreshold and its value was stored in the value log.
- Meta: entry.meta &^ bitValuePointer,
- UserMeta: entry.UserMeta,
- ExpiresAt: entry.ExpiresAt,
- })
- } else {
- var offsetBuf [vptrSize]byte
- db.mt.Put(entry.Key,
- y.ValueStruct{
- Value: b.Ptrs[i].Encode(offsetBuf[:]),
- Meta: entry.meta | bitValuePointer,
- UserMeta: entry.UserMeta,
- ExpiresAt: entry.ExpiresAt,
- })
- }
- }
- return nil
-}
-
-// writeRequests is called serially by only one goroutine.
-func (db *DB) writeRequests(reqs []*request) error {
- if len(reqs) == 0 {
- return nil
- }
-
- done := func(err error) {
- for _, r := range reqs {
- r.Err = err
- r.Wg.Done()
- }
- }
- db.elog.Printf("writeRequests called. Writing to value log")
-
- err := db.vlog.write(reqs)
- if err != nil {
- done(err)
- return err
- }
-
- db.elog.Printf("Sending updates to subscribers")
- db.pub.sendUpdates(reqs)
- db.elog.Printf("Writing to memtable")
- var count int
- for _, b := range reqs {
- if len(b.Entries) == 0 {
- continue
- }
- count += len(b.Entries)
- var i uint64
- for err = db.ensureRoomForWrite(); err == errNoRoom; err = db.ensureRoomForWrite() {
- i++
- if i%100 == 0 {
- db.elog.Printf("Making room for writes")
- }
- // We need to poll a bit because both hasRoomForWrite and the flusher need access to s.imm.
- // When flushChan is full and you are blocked there, and the flusher is trying to update s.imm,
- // you will get a deadlock.
- time.Sleep(10 * time.Millisecond)
- }
- if err != nil {
- done(err)
- return errors.Wrap(err, "writeRequests")
- }
- if err := db.writeToLSM(b); err != nil {
- done(err)
- return errors.Wrap(err, "writeRequests")
- }
- db.Lock()
- db.updateHead(b.Ptrs)
- db.Unlock()
- }
- done(nil)
- db.elog.Printf("%d entries written", count)
- return nil
-}
-
-func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) {
- if atomic.LoadInt32(&db.blockWrites) == 1 {
- return nil, ErrBlockedWrites
- }
- var count, size int64
- for _, e := range entries {
- size += int64(e.estimateSize(db.opt.ValueThreshold))
- count++
- }
- if count >= db.opt.maxBatchCount || size >= db.opt.maxBatchSize {
- return nil, ErrTxnTooBig
- }
-
- // We can only service one request because we need each txn to be stored in a contigous section.
- // Txns should not interleave among other txns or rewrites.
- req := requestPool.Get().(*request)
- req.reset()
- req.Entries = entries
- req.Wg.Add(1)
- req.IncrRef() // for db write
- db.writeCh <- req // Handled in doWrites.
- y.NumPuts.Add(int64(len(entries)))
-
- return req, nil
-}
-
-func (db *DB) doWrites(lc *y.Closer) {
- defer lc.Done()
- pendingCh := make(chan struct{}, 1)
-
- writeRequests := func(reqs []*request) {
- if err := db.writeRequests(reqs); err != nil {
- db.opt.Errorf("writeRequests: %v", err)
- }
- <-pendingCh
- }
-
- // This variable tracks the number of pending writes.
- reqLen := new(expvar.Int)
- y.PendingWrites.Set(db.opt.Dir, reqLen)
-
- reqs := make([]*request, 0, 10)
- for {
- var r *request
- select {
- case r = <-db.writeCh:
- case <-lc.HasBeenClosed():
- goto closedCase
- }
-
- for {
- reqs = append(reqs, r)
- reqLen.Set(int64(len(reqs)))
-
- if len(reqs) >= 3*kvWriteChCapacity {
- pendingCh <- struct{}{} // blocking.
- goto writeCase
- }
-
- select {
- // Either push to pending, or continue to pick from writeCh.
- case r = <-db.writeCh:
- case pendingCh <- struct{}{}:
- goto writeCase
- case <-lc.HasBeenClosed():
- goto closedCase
- }
- }
-
- closedCase:
- // All the pending request are drained.
- // Don't close the writeCh, because it has be used in several places.
- for {
- select {
- case r = <-db.writeCh:
- reqs = append(reqs, r)
- default:
- pendingCh <- struct{}{} // Push to pending before doing a write.
- writeRequests(reqs)
- return
- }
- }
-
- writeCase:
- go writeRequests(reqs)
- reqs = make([]*request, 0, 10)
- reqLen.Set(0)
- }
-}
-
-// batchSet applies a list of badger.Entry. If a request level error occurs it
-// will be returned.
-// Check(kv.BatchSet(entries))
-func (db *DB) batchSet(entries []*Entry) error {
- req, err := db.sendToWriteCh(entries)
- if err != nil {
- return err
- }
-
- return req.Wait()
-}
-
-// batchSetAsync is the asynchronous version of batchSet. It accepts a callback
-// function which is called when all the sets are complete. If a request level
-// error occurs, it will be passed back via the callback.
-// err := kv.BatchSetAsync(entries, func(err error)) {
-// Check(err)
-// }
-func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error {
- req, err := db.sendToWriteCh(entries)
- if err != nil {
- return err
- }
- go func() {
- err := req.Wait()
- // Write is complete. Let's call the callback function now.
- f(err)
- }()
- return nil
-}
-
-var errNoRoom = errors.New("No room for write")
-
-// ensureRoomForWrite is always called serially.
-func (db *DB) ensureRoomForWrite() error {
- var err error
- db.Lock()
- defer db.Unlock()
-
- // Here we determine if we need to force flush memtable. Given we rotated log file, it would
- // make sense to force flush a memtable, so the updated value head would have a chance to be
- // pushed to L0. Otherwise, it would not go to L0, until the memtable has been fully filled,
- // which can take a lot longer if the write load has fewer keys and larger values. This force
- // flush, thus avoids the need to read through a lot of log files on a crash and restart.
- // Above approach is quite simple with small drawback. We are calling ensureRoomForWrite before
- // inserting every entry in Memtable. We will get latest db.head after all entries for a request
- // are inserted in Memtable. If we have done >= db.logRotates rotations, then while inserting
- // first entry in Memtable, below condition will be true and we will endup flushing old value of
- // db.head. Hence we are limiting no of value log files to be read to db.logRotates only.
- forceFlush := atomic.LoadInt32(&db.logRotates) >= db.opt.LogRotatesToFlush
-
- if !forceFlush && db.mt.MemSize() < db.opt.MaxTableSize {
- return nil
- }
-
- y.AssertTrue(db.mt != nil) // A nil mt indicates that DB is being closed.
- select {
- case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
- // After every memtable flush, let's reset the counter.
- atomic.StoreInt32(&db.logRotates, 0)
-
- // Ensure value log is synced to disk so this memtable's contents wouldn't be lost.
- err = db.vlog.sync(db.vhead.Fid)
- if err != nil {
- return err
- }
-
- db.opt.Debugf("Flushing memtable, mt.size=%d size of flushChan: %d\n",
- db.mt.MemSize(), len(db.flushChan))
- // We manage to push this task. Let's modify imm.
- db.imm = append(db.imm, db.mt)
- db.mt = skl.NewSkiplist(arenaSize(db.opt))
- // New memtable is empty. We certainly have room.
- return nil
- default:
- // We need to do this to unlock and allow the flusher to modify imm.
- return errNoRoom
- }
-}
-
-func arenaSize(opt Options) int64 {
- return opt.MaxTableSize + opt.maxBatchSize + opt.maxBatchCount*int64(skl.MaxNodeSize)
-}
-
-// WriteLevel0Table flushes memtable.
-func writeLevel0Table(ft flushTask, f io.Writer) error {
- iter := ft.mt.NewIterator()
- defer iter.Close()
- b := table.NewTableBuilder()
- defer b.Close()
- for iter.SeekToFirst(); iter.Valid(); iter.Next() {
- if len(ft.dropPrefixes) > 0 && hasAnyPrefixes(iter.Key(), ft.dropPrefixes) {
- continue
- }
- b.Add(iter.Key(), iter.Value())
- }
- _, err := f.Write(b.Finish())
- return err
-}
-
-type flushTask struct {
- mt *skl.Skiplist
- vptr valuePointer
- dropPrefixes [][]byte
-}
-
-func (db *DB) pushHead(ft flushTask) error {
- // Ensure we never push a zero valued head pointer.
- if ft.vptr.IsZero() {
- return errors.New("Head should not be zero")
- }
-
- // Store badger head even if vptr is zero, need it for readTs
- db.opt.Debugf("Storing value log head: %+v\n", ft.vptr)
- offset := make([]byte, vptrSize)
- ft.vptr.Encode(offset)
-
- // Pick the max commit ts, so in case of crash, our read ts would be higher than all the
- // commits.
- headTs := y.KeyWithTs(head, db.orc.nextTs())
- ft.mt.Put(headTs, y.ValueStruct{Value: offset})
-
- return nil
-}
-
-// handleFlushTask must be run serially.
-func (db *DB) handleFlushTask(ft flushTask) error {
- // There can be a scenario, when empty memtable is flushed. For example, memtable is empty and
- // after writing request to value log, rotation count exceeds db.LogRotatesToFlush.
- if ft.mt.Empty() {
- return nil
- }
-
- if err := db.pushHead(ft); err != nil {
- return err
- }
-
- fileID := db.lc.reserveFileID()
- fd, err := y.CreateSyncedFile(table.NewFilename(fileID, db.opt.Dir), true)
- if err != nil {
- return y.Wrap(err)
- }
-
- // Don't block just to sync the directory entry.
- dirSyncCh := make(chan error)
- go func() { dirSyncCh <- syncDir(db.opt.Dir) }()
-
- err = writeLevel0Table(ft, fd)
- dirSyncErr := <-dirSyncCh
-
- if err != nil {
- db.elog.Errorf("ERROR while writing to level 0: %v", err)
- return err
- }
- if dirSyncErr != nil {
- // Do dir sync as best effort. No need to return due to an error there.
- db.elog.Errorf("ERROR while syncing level directory: %v", dirSyncErr)
- }
-
- tbl, err := table.OpenTable(fd, db.opt.TableLoadingMode, nil)
- if err != nil {
- db.elog.Printf("ERROR while opening table: %v", err)
- return err
- }
- // We own a ref on tbl.
- err = db.lc.addLevel0Table(tbl) // This will incrRef (if we don't error, sure)
- _ = tbl.DecrRef() // Releases our ref.
- return err
-}
-
-// flushMemtable must keep running until we send it an empty flushTask. If there
-// are errors during handling the flush task, we'll retry indefinitely.
-func (db *DB) flushMemtable(lc *y.Closer) error {
- defer lc.Done()
-
- for ft := range db.flushChan {
- if ft.mt == nil {
- // We close db.flushChan now, instead of sending a nil ft.mt.
- continue
- }
- for {
- err := db.handleFlushTask(ft)
- if err == nil {
- // Update s.imm. Need a lock.
- db.Lock()
- // This is a single-threaded operation. ft.mt corresponds to the head of
- // db.imm list. Once we flush it, we advance db.imm. The next ft.mt
- // which would arrive here would match db.imm[0], because we acquire a
- // lock over DB when pushing to flushChan.
- // TODO: This logic is dirty AF. Any change and this could easily break.
- y.AssertTrue(ft.mt == db.imm[0])
- db.imm = db.imm[1:]
- ft.mt.DecrRef() // Return memory.
- db.Unlock()
-
- break
- }
- // Encountered error. Retry indefinitely.
- db.opt.Errorf("Failure while flushing memtable to disk: %v. Retrying...\n", err)
- time.Sleep(time.Second)
- }
- }
- return nil
-}
-
-func exists(path string) (bool, error) {
- _, err := os.Stat(path)
- if err == nil {
- return true, nil
- }
- if os.IsNotExist(err) {
- return false, nil
- }
- return true, err
-}
-
-// This function does a filewalk, calculates the size of vlog and sst files and stores it in
-// y.LSMSize and y.VlogSize.
-func (db *DB) calculateSize() {
- newInt := func(val int64) *expvar.Int {
- v := new(expvar.Int)
- v.Add(val)
- return v
- }
-
- totalSize := func(dir string) (int64, int64) {
- var lsmSize, vlogSize int64
- err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- ext := filepath.Ext(path)
- if ext == ".sst" {
- lsmSize += info.Size()
- } else if ext == ".vlog" {
- vlogSize += info.Size()
- }
- return nil
- })
- if err != nil {
- db.elog.Printf("Got error while calculating total size of directory: %s", dir)
- }
- return lsmSize, vlogSize
- }
-
- lsmSize, vlogSize := totalSize(db.opt.Dir)
- y.LSMSize.Set(db.opt.Dir, newInt(lsmSize))
- // If valueDir is different from dir, we'd have to do another walk.
- if db.opt.ValueDir != db.opt.Dir {
- _, vlogSize = totalSize(db.opt.ValueDir)
- }
- y.VlogSize.Set(db.opt.ValueDir, newInt(vlogSize))
-}
-
-func (db *DB) updateSize(lc *y.Closer) {
- defer lc.Done()
-
- metricsTicker := time.NewTicker(time.Minute)
- defer metricsTicker.Stop()
-
- for {
- select {
- case <-metricsTicker.C:
- db.calculateSize()
- case <-lc.HasBeenClosed():
- return
- }
- }
-}
-
-// RunValueLogGC triggers a value log garbage collection.
-//
-// It picks value log files to perform GC based on statistics that are collected
-// during compactions. If no such statistics are available, then log files are
-// picked in random order. The process stops as soon as the first log file is
-// encountered which does not result in garbage collection.
-//
-// When a log file is picked, it is first sampled. If the sample shows that we
-// can discard at least discardRatio space of that file, it would be rewritten.
-//
-// If a call to RunValueLogGC results in no rewrites, then an ErrNoRewrite is
-// thrown indicating that the call resulted in no file rewrites.
-//
-// We recommend setting discardRatio to 0.5, thus indicating that a file be
-// rewritten if half the space can be discarded. This results in a lifetime
-// value log write amplification of 2 (1 from original write + 0.5 rewrite +
-// 0.25 + 0.125 + ... = 2). Setting it to higher value would result in fewer
-// space reclaims, while setting it to a lower value would result in more space
-// reclaims at the cost of increased activity on the LSM tree. discardRatio
-// must be in the range (0.0, 1.0), both endpoints excluded, otherwise an
-// ErrInvalidRequest is returned.
-//
-// Only one GC is allowed at a time. If another value log GC is running, or DB
-// has been closed, this would return an ErrRejected.
-//
-// Note: Every time GC is run, it would produce a spike of activity on the LSM
-// tree.
-func (db *DB) RunValueLogGC(discardRatio float64) error {
- if discardRatio >= 1.0 || discardRatio <= 0.0 {
- return ErrInvalidRequest
- }
-
- // Find head on disk
- headKey := y.KeyWithTs(head, math.MaxUint64)
- // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
- val, err := db.lc.get(headKey, nil)
- if err != nil {
- return errors.Wrap(err, "Retrieving head from on-disk LSM")
- }
-
- var head valuePointer
- if len(val.Value) > 0 {
- head.Decode(val.Value)
- }
-
- // Pick a log file and run GC
- return db.vlog.runGC(discardRatio, head)
-}
-
-// Size returns the size of lsm and value log files in bytes. It can be used to decide how often to
-// call RunValueLogGC.
-func (db *DB) Size() (lsm, vlog int64) {
- if y.LSMSize.Get(db.opt.Dir) == nil {
- lsm, vlog = 0, 0
- return
- }
- lsm = y.LSMSize.Get(db.opt.Dir).(*expvar.Int).Value()
- vlog = y.VlogSize.Get(db.opt.ValueDir).(*expvar.Int).Value()
- return
-}
-
-// Sequence represents a Badger sequence.
-type Sequence struct {
- sync.Mutex
- db *DB
- key []byte
- next uint64
- leased uint64
- bandwidth uint64
-}
-
-// Next would return the next integer in the sequence, updating the lease by running a transaction
-// if needed.
-func (seq *Sequence) Next() (uint64, error) {
- seq.Lock()
- defer seq.Unlock()
- if seq.next >= seq.leased {
- if err := seq.updateLease(); err != nil {
- return 0, err
- }
- }
- val := seq.next
- seq.next++
- return val, nil
-}
-
-// Release the leased sequence to avoid wasted integers. This should be done right
-// before closing the associated DB. However it is valid to use the sequence after
-// it was released, causing a new lease with full bandwidth.
-func (seq *Sequence) Release() error {
- seq.Lock()
- defer seq.Unlock()
- err := seq.db.Update(func(txn *Txn) error {
- item, err := txn.Get(seq.key)
- if err != nil {
- return err
- }
-
- var num uint64
- if err := item.Value(func(v []byte) error {
- num = binary.BigEndian.Uint64(v)
- return nil
- }); err != nil {
- return err
- }
-
- if num == seq.leased {
- var buf [8]byte
- binary.BigEndian.PutUint64(buf[:], seq.next)
- return txn.SetEntry(NewEntry(seq.key, buf[:]))
- }
-
- return nil
- })
- if err != nil {
- return err
- }
- seq.leased = seq.next
- return nil
-}
-
-func (seq *Sequence) updateLease() error {
- return seq.db.Update(func(txn *Txn) error {
- item, err := txn.Get(seq.key)
- if err == ErrKeyNotFound {
- seq.next = 0
- } else if err != nil {
- return err
- } else {
- var num uint64
- if err := item.Value(func(v []byte) error {
- num = binary.BigEndian.Uint64(v)
- return nil
- }); err != nil {
- return err
- }
- seq.next = num
- }
-
- lease := seq.next + seq.bandwidth
- var buf [8]byte
- binary.BigEndian.PutUint64(buf[:], lease)
- if err = txn.SetEntry(NewEntry(seq.key, buf[:])); err != nil {
- return err
- }
- seq.leased = lease
- return nil
- })
-}
-
-// GetSequence would initiate a new sequence object, generating it from the stored lease, if
-// available, in the database. Sequence can be used to get a list of monotonically increasing
-// integers. Multiple sequences can be created by providing different keys. Bandwidth sets the
-// size of the lease, determining how many Next() requests can be served from memory.
-//
-// GetSequence is not supported on ManagedDB. Calling this would result in a panic.
-func (db *DB) GetSequence(key []byte, bandwidth uint64) (*Sequence, error) {
- if db.opt.managedTxns {
- panic("Cannot use GetSequence with managedDB=true.")
- }
-
- switch {
- case len(key) == 0:
- return nil, ErrEmptyKey
- case bandwidth == 0:
- return nil, ErrZeroBandwidth
- }
- seq := &Sequence{
- db: db,
- key: key,
- next: 0,
- leased: 0,
- bandwidth: bandwidth,
- }
- err := seq.updateLease()
- return seq, err
-}
-
-// Tables gets the TableInfo objects from the level controller. If withKeysCount
-// is true, TableInfo objects also contain counts of keys for the tables.
-func (db *DB) Tables(withKeysCount bool) []TableInfo {
- return db.lc.getTableInfo(withKeysCount)
-}
-
-// KeySplits can be used to get rough key ranges to divide up iteration over
-// the DB.
-func (db *DB) KeySplits(prefix []byte) []string {
- var splits []string
- // We just want table ranges here and not keys count.
- for _, ti := range db.Tables(false) {
- // We don't use ti.Left, because that has a tendency to store !badger
- // keys.
- if bytes.HasPrefix(ti.Right, prefix) {
- splits = append(splits, string(ti.Right))
- }
- }
- sort.Strings(splits)
- return splits
-}
-
-// MaxBatchCount returns max possible entries in batch
-func (db *DB) MaxBatchCount() int64 {
- return db.opt.maxBatchCount
-}
-
-// MaxBatchSize returns max possible batch size
-func (db *DB) MaxBatchSize() int64 {
- return db.opt.maxBatchSize
-}
-
-func (db *DB) stopMemoryFlush() {
- // Stop memtable flushes.
- if db.closers.memtable != nil {
- close(db.flushChan)
- db.closers.memtable.SignalAndWait()
- }
-}
-
-func (db *DB) stopCompactions() {
- // Stop compactions.
- if db.closers.compactors != nil {
- db.closers.compactors.SignalAndWait()
- }
-}
-
-func (db *DB) startCompactions() {
- // Resume compactions.
- if db.closers.compactors != nil {
- db.closers.compactors = y.NewCloser(1)
- db.lc.startCompact(db.closers.compactors)
- }
-}
-
-func (db *DB) startMemoryFlush() {
- // Start memory fluhser.
- if db.closers.memtable != nil {
- db.flushChan = make(chan flushTask, db.opt.NumMemtables)
- db.closers.memtable = y.NewCloser(1)
- go func() {
- _ = db.flushMemtable(db.closers.memtable)
- }()
- }
-}
-
-// Flatten can be used to force compactions on the LSM tree so all the tables fall on the same
-// level. This ensures that all the versions of keys are colocated and not split across multiple
-// levels, which is necessary after a restore from backup. During Flatten, live compactions are
-// stopped. Ideally, no writes are going on during Flatten. Otherwise, it would create competition
-// between flattening the tree and new tables being created at level zero.
-func (db *DB) Flatten(workers int) error {
- db.stopCompactions()
- defer db.startCompactions()
-
- compactAway := func(cp compactionPriority) error {
- db.opt.Infof("Attempting to compact with %+v\n", cp)
- errCh := make(chan error, 1)
- for i := 0; i < workers; i++ {
- go func() {
- errCh <- db.lc.doCompact(cp)
- }()
- }
- var success int
- var rerr error
- for i := 0; i < workers; i++ {
- err := <-errCh
- if err != nil {
- rerr = err
- db.opt.Warningf("While running doCompact with %+v. Error: %v\n", cp, err)
- } else {
- success++
- }
- }
- if success == 0 {
- return rerr
- }
- // We could do at least one successful compaction. So, we'll consider this a success.
- db.opt.Infof("%d compactor(s) succeeded. One or more tables from level %d compacted.\n",
- success, cp.level)
- return nil
- }
-
- hbytes := func(sz int64) string {
- return humanize.Bytes(uint64(sz))
- }
-
- for {
- db.opt.Infof("\n")
- var levels []int
- for i, l := range db.lc.levels {
- sz := l.getTotalSize()
- db.opt.Infof("Level: %d. %8s Size. %8s Max.\n",
- i, hbytes(l.getTotalSize()), hbytes(l.maxTotalSize))
- if sz > 0 {
- levels = append(levels, i)
- }
- }
- if len(levels) <= 1 {
- prios := db.lc.pickCompactLevels()
- if len(prios) == 0 || prios[0].score <= 1.0 {
- db.opt.Infof("All tables consolidated into one level. Flattening done.\n")
- return nil
- }
- if err := compactAway(prios[0]); err != nil {
- return err
- }
- continue
- }
- // Create an artificial compaction priority, to ensure that we compact the level.
- cp := compactionPriority{level: levels[0], score: 1.71}
- if err := compactAway(cp); err != nil {
- return err
- }
- }
-}
-
-func (db *DB) blockWrite() {
- // Stop accepting new writes.
- atomic.StoreInt32(&db.blockWrites, 1)
-
- // Make all pending writes finish. The following will also close writeCh.
- db.closers.writes.SignalAndWait()
- db.opt.Infof("Writes flushed. Stopping compactions now...")
-}
-
-func (db *DB) unblockWrite() {
- db.closers.writes = y.NewCloser(1)
- go db.doWrites(db.closers.writes)
-
- // Resume writes.
- atomic.StoreInt32(&db.blockWrites, 0)
-}
-
-func (db *DB) prepareToDrop() func() {
- if db.opt.ReadOnly {
- panic("Attempting to drop data in read-only mode.")
- }
- // In order prepare for drop, we need to block the incoming writes and
- // write it to db. Then, flush all the pending flushtask. So that, we
- // don't miss any entries.
- db.blockWrite()
- reqs := make([]*request, 0, 10)
- for {
- select {
- case r := <-db.writeCh:
- reqs = append(reqs, r)
- default:
- if err := db.writeRequests(reqs); err != nil {
- db.opt.Errorf("writeRequests: %v", err)
- }
- db.stopMemoryFlush()
- return func() {
- db.opt.Infof("Resuming writes")
- db.startMemoryFlush()
- db.unblockWrite()
- }
- }
- }
-}
-
-// DropAll would drop all the data stored in Badger. It does this in the following way.
-// - Stop accepting new writes.
-// - Pause memtable flushes and compactions.
-// - Pick all tables from all levels, create a changeset to delete all these
-// tables and apply it to manifest.
-// - Pick all log files from value log, and delete all of them. Restart value log files from zero.
-// - Resume memtable flushes and compactions.
-//
-// NOTE: DropAll is resilient to concurrent writes, but not to reads. It is up to the user to not do
-// any reads while DropAll is going on, otherwise they may result in panics. Ideally, both reads and
-// writes are paused before running DropAll, and resumed after it is finished.
-func (db *DB) DropAll() error {
- f, err := db.dropAll()
- defer f()
- if err != nil {
- return err
- }
- return nil
-}
-
-func (db *DB) dropAll() (func(), error) {
- db.opt.Infof("DropAll called. Blocking writes...")
- f := db.prepareToDrop()
- // prepareToDrop will stop all the incomming write and flushes any pending flush tasks.
- // Before we drop, we'll stop the compaction because anyways all the datas are going to
- // be deleted.
- db.stopCompactions()
- resume := func() {
- db.startCompactions()
- f()
- }
- // Block all foreign interactions with memory tables.
- db.Lock()
- defer db.Unlock()
-
- // Remove inmemory tables. Calling DecrRef for safety. Not sure if they're absolutely needed.
- db.mt.DecrRef()
- for _, mt := range db.imm {
- mt.DecrRef()
- }
- db.imm = db.imm[:0]
- db.mt = skl.NewSkiplist(arenaSize(db.opt)) // Set it up for future writes.
-
- num, err := db.lc.dropTree()
- if err != nil {
- return resume, err
- }
- db.opt.Infof("Deleted %d SSTables. Now deleting value logs...\n", num)
-
- num, err = db.vlog.dropAll()
- if err != nil {
- return resume, err
- }
- db.vhead = valuePointer{} // Zero it out.
- db.lc.nextFileID = 1
- db.opt.Infof("Deleted %d value log files. DropAll done.\n", num)
- return resume, nil
-}
-
-// DropPrefix would drop all the keys with the provided prefix. It does this in the following way:
-// - Stop accepting new writes.
-// - Stop memtable flushes before acquiring lock. Because we're acquring lock here
-// and memtable flush stalls for lock, which leads to deadlock
-// - Flush out all memtables, skipping over keys with the given prefix, Kp.
-// - Write out the value log header to memtables when flushing, so we don't accidentally bring Kp
-// back after a restart.
-// - Stop compaction.
-// - Compact L0->L1, skipping over Kp.
-// - Compact rest of the levels, Li->Li, picking tables which have Kp.
-// - Resume memtable flushes, compactions and writes.
-func (db *DB) DropPrefix(prefixes ...[]byte) error {
- db.opt.Infof("DropPrefix Called")
- f := db.prepareToDrop()
- defer f()
- // Block all foreign interactions with memory tables.
- db.Lock()
- defer db.Unlock()
-
- db.imm = append(db.imm, db.mt)
- for _, memtable := range db.imm {
- if memtable.Empty() {
- memtable.DecrRef()
- continue
- }
- task := flushTask{
- mt: memtable,
- // Ensure that the head of value log gets persisted to disk.
- vptr: db.vhead,
- dropPrefixes: prefixes,
- }
- db.opt.Debugf("Flushing memtable")
- if err := db.handleFlushTask(task); err != nil {
- db.opt.Errorf("While trying to flush memtable: %v", err)
- return err
- }
- memtable.DecrRef()
- }
- db.stopCompactions()
- defer db.startCompactions()
- db.imm = db.imm[:0]
- db.mt = skl.NewSkiplist(arenaSize(db.opt))
-
- // Drop prefixes from the levels.
- if err := db.lc.dropPrefixes(prefixes); err != nil {
- return err
- }
- db.opt.Infof("DropPrefix done")
- return nil
-}
-
-// KVList contains a list of key-value pairs.
-type KVList = pb.KVList
-
-// Subscribe can be used to watch key changes for the given key prefixes.
-// At least one prefix should be passed, or an error will be returned.
-// You can use an empty prefix to monitor all changes to the DB.
-// This function blocks until the given context is done or an error occurs.
-// The given function will be called with a new KVList containing the modified keys and the
-// corresponding values.
-func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, prefixes ...[]byte) error {
- if cb == nil {
- return ErrNilCallback
- }
-
- c := y.NewCloser(1)
- recvCh, id := db.pub.newSubscriber(c, prefixes...)
- slurp := func(batch *pb.KVList) error {
- for {
- select {
- case kvs := <-recvCh:
- batch.Kv = append(batch.Kv, kvs.Kv...)
- default:
- if len(batch.GetKv()) > 0 {
- return cb(batch)
- }
- return nil
- }
- }
- }
- for {
- select {
- case <-c.HasBeenClosed():
- // No need to delete here. Closer will be called only while
- // closing DB. Subscriber will be deleted by cleanSubscribers.
- err := slurp(new(pb.KVList))
- // Drain if any pending updates.
- c.Done()
- return err
- case <-ctx.Done():
- c.Done()
- db.pub.deleteSubscriber(id)
- // Delete the subscriber to avoid further updates.
- return ctx.Err()
- case batch := <-recvCh:
- err := slurp(batch)
- if err != nil {
- c.Done()
- // Delete the subscriber if there is an error by the callback.
- db.pub.deleteSubscriber(id)
- return err
- }
- }
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/dir_plan9.go b/vendor/github.com/dgraph-io/badger/dir_plan9.go
deleted file mode 100644
index ad323d70..00000000
--- a/vendor/github.com/dgraph-io/badger/dir_plan9.go
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/pkg/errors"
-)
-
-// directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part
-// of the locking mechanism, it's just advisory.
-type directoryLockGuard struct {
- // File handle on the directory, which we've locked.
- f *os.File
- // The absolute path to our pid file.
- path string
-}
-
-// acquireDirectoryLock gets a lock on the directory.
-// It will also write our pid to dirPath/pidFileName for convenience.
-// readOnly is not supported on Plan 9.
-func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (
- *directoryLockGuard, error) {
- if readOnly {
- return nil, ErrPlan9NotSupported
- }
-
- // Convert to absolute path so that Release still works even if we do an unbalanced
- // chdir in the meantime.
- absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName))
- if err != nil {
- return nil, errors.Wrap(err, "cannot get absolute path for pid lock file")
- }
-
- // If the file was unpacked or created by some other program, it might not
- // have the ModeExclusive bit set. Set it before we call OpenFile, so that we
- // can be confident that a successful OpenFile implies exclusive use.
- //
- // OpenFile fails if the file ModeExclusive bit set *and* the file is already open.
- // So, if the file is closed when the DB crashed, we're fine. When the process
- // that was managing the DB crashes, the OS will close the file for us.
- //
- // This bit of code is copied from Go's lockedfile internal package:
- // https://github.com/golang/go/blob/go1.15rc1/src/cmd/go/internal/lockedfile/lockedfile_plan9.go#L58
- if fi, err := os.Stat(absPidFilePath); err == nil {
- if fi.Mode()&os.ModeExclusive == 0 {
- if err := os.Chmod(absPidFilePath, fi.Mode()|os.ModeExclusive); err != nil {
- return nil, errors.Wrapf(err, "could not set exclusive mode bit")
- }
- }
- } else if !os.IsNotExist(err) {
- return nil, err
- }
- f, err := os.OpenFile(absPidFilePath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666|os.ModeExclusive)
- if err != nil {
- if isLocked(err) {
- return nil, errors.Wrapf(err,
- "Cannot open pid lock file %q. Another process is using this Badger database",
- absPidFilePath)
- }
- return nil, errors.Wrapf(err, "Cannot open pid lock file %q", absPidFilePath)
- }
-
- if _, err = fmt.Fprintf(f, "%d\n", os.Getpid()); err != nil {
- f.Close()
- return nil, errors.Wrapf(err, "could not write pid")
- }
- return &directoryLockGuard{f, absPidFilePath}, nil
-}
-
-// Release deletes the pid file and releases our lock on the directory.
-func (guard *directoryLockGuard) release() error {
- // It's important that we remove the pid file first.
- err := os.Remove(guard.path)
-
- if closeErr := guard.f.Close(); err == nil {
- err = closeErr
- }
- guard.path = ""
- guard.f = nil
-
- return err
-}
-
-// openDir opens a directory for syncing.
-func openDir(path string) (*os.File, error) { return os.Open(path) }
-
-// When you create or delete a file, you have to ensure the directory entry for the file is synced
-// in order to guarantee the file is visible (if the system crashes). (See the man page for fsync,
-// or see https://github.com/coreos/etcd/issues/6368 for an example.)
-func syncDir(dir string) error {
- f, err := openDir(dir)
- if err != nil {
- return errors.Wrapf(err, "While opening directory: %s.", dir)
- }
-
- err = f.Sync()
- closeErr := f.Close()
- if err != nil {
- return errors.Wrapf(err, "While syncing directory: %s.", dir)
- }
- return errors.Wrapf(closeErr, "While closing directory: %s.", dir)
-}
-
-// Opening an exclusive-use file returns an error.
-// The expected error strings are:
-//
-// - "open/create -- file is locked" (cwfs, kfs)
-// - "exclusive lock" (fossil)
-// - "exclusive use file already open" (ramfs)
-//
-// See https://github.com/golang/go/blob/go1.15rc1/src/cmd/go/internal/lockedfile/lockedfile_plan9.go#L16
-var lockedErrStrings = [...]string{
- "file is locked",
- "exclusive lock",
- "exclusive use file already open",
-}
-
-// Even though plan9 doesn't support the Lock/RLock/Unlock functions to
-// manipulate already-open files, IsLocked is still meaningful: os.OpenFile
-// itself may return errors that indicate that a file with the ModeExclusive bit
-// set is already open.
-func isLocked(err error) bool {
- s := err.Error()
-
- for _, frag := range lockedErrStrings {
- if strings.Contains(s, frag) {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/dgraph-io/badger/dir_unix.go b/vendor/github.com/dgraph-io/badger/dir_unix.go
deleted file mode 100644
index a3fef669..00000000
--- a/vendor/github.com/dgraph-io/badger/dir_unix.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// +build !windows,!plan9
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
-
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
- "golang.org/x/sys/unix"
-)
-
-// directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part
-// of the locking mechanism, it's just advisory.
-type directoryLockGuard struct {
- // File handle on the directory, which we've flocked.
- f *os.File
- // The absolute path to our pid file.
- path string
- // Was this a shared lock for a read-only database?
- readOnly bool
-}
-
-// acquireDirectoryLock gets a lock on the directory (using flock). If
-// this is not read-only, it will also write our pid to
-// dirPath/pidFileName for convenience.
-func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (
- *directoryLockGuard, error) {
- // Convert to absolute path so that Release still works even if we do an unbalanced
- // chdir in the meantime.
- absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName))
- if err != nil {
- return nil, errors.Wrap(err, "cannot get absolute path for pid lock file")
- }
- f, err := os.Open(dirPath)
- if err != nil {
- return nil, errors.Wrapf(err, "cannot open directory %q", dirPath)
- }
- opts := unix.LOCK_EX | unix.LOCK_NB
- if readOnly {
- opts = unix.LOCK_SH | unix.LOCK_NB
- }
-
- err = unix.Flock(int(f.Fd()), opts)
- if err != nil {
- f.Close()
- return nil, errors.Wrapf(err,
- "Cannot acquire directory lock on %q. Another process is using this Badger database.",
- dirPath)
- }
-
- if !readOnly {
- // Yes, we happily overwrite a pre-existing pid file. We're the
- // only read-write badger process using this directory.
- err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666)
- if err != nil {
- f.Close()
- return nil, errors.Wrapf(err,
- "Cannot write pid file %q", absPidFilePath)
- }
- }
- return &directoryLockGuard{f, absPidFilePath, readOnly}, nil
-}
-
-// Release deletes the pid file and releases our lock on the directory.
-func (guard *directoryLockGuard) release() error {
- var err error
- if !guard.readOnly {
- // It's important that we remove the pid file first.
- err = os.Remove(guard.path)
- }
-
- if closeErr := guard.f.Close(); err == nil {
- err = closeErr
- }
- guard.path = ""
- guard.f = nil
-
- return err
-}
-
-// openDir opens a directory for syncing.
-func openDir(path string) (*os.File, error) { return os.Open(path) }
-
-// When you create or delete a file, you have to ensure the directory entry for the file is synced
-// in order to guarantee the file is visible (if the system crashes). (See the man page for fsync,
-// or see https://github.com/coreos/etcd/issues/6368 for an example.)
-func syncDir(dir string) error {
- f, err := openDir(dir)
- if err != nil {
- return errors.Wrapf(err, "While opening directory: %s.", dir)
- }
- err = y.FileSync(f)
- closeErr := f.Close()
- if err != nil {
- return errors.Wrapf(err, "While syncing directory: %s.", dir)
- }
- return errors.Wrapf(closeErr, "While closing directory: %s.", dir)
-}
diff --git a/vendor/github.com/dgraph-io/badger/dir_windows.go b/vendor/github.com/dgraph-io/badger/dir_windows.go
deleted file mode 100644
index 60f982e2..00000000
--- a/vendor/github.com/dgraph-io/badger/dir_windows.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// +build windows
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-// OpenDir opens a directory in windows with write access for syncing.
-import (
- "os"
- "path/filepath"
- "syscall"
-
- "github.com/pkg/errors"
-)
-
-// FILE_ATTRIBUTE_TEMPORARY - A file that is being used for temporary storage.
-// FILE_FLAG_DELETE_ON_CLOSE - The file is to be deleted immediately after all of its handles are
-// closed, which includes the specified handle and any other open or duplicated handles.
-// See: https://docs.microsoft.com/en-us/windows/desktop/FileIO/file-attribute-constants
-// NOTE: Added here to avoid importing golang.org/x/sys/windows
-const (
- FILE_ATTRIBUTE_TEMPORARY = 0x00000100
- FILE_FLAG_DELETE_ON_CLOSE = 0x04000000
-)
-
-func openDir(path string) (*os.File, error) {
- fd, err := openDirWin(path)
- if err != nil {
- return nil, err
- }
- return os.NewFile(uintptr(fd), path), nil
-}
-
-func openDirWin(path string) (fd syscall.Handle, err error) {
- if len(path) == 0 {
- return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
- }
- pathp, err := syscall.UTF16PtrFromString(path)
- if err != nil {
- return syscall.InvalidHandle, err
- }
- access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
- sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
- createmode := uint32(syscall.OPEN_EXISTING)
- fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
- return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
-}
-
-// DirectoryLockGuard holds a lock on the directory.
-type directoryLockGuard struct {
- h syscall.Handle
- path string
-}
-
-// AcquireDirectoryLock acquires exclusive access to a directory.
-func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) {
- if readOnly {
- return nil, ErrWindowsNotSupported
- }
-
- // Convert to absolute path so that Release still works even if we do an unbalanced
- // chdir in the meantime.
- absLockFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName))
- if err != nil {
- return nil, errors.Wrap(err, "Cannot get absolute path for pid lock file")
- }
-
- // This call creates a file handler in memory that only one process can use at a time. When
- // that process ends, the file is deleted by the system.
- // FILE_ATTRIBUTE_TEMPORARY is used to tell Windows to try to create the handle in memory.
- // FILE_FLAG_DELETE_ON_CLOSE is not specified in syscall_windows.go but tells Windows to delete
- // the file when all processes holding the handler are closed.
- // XXX: this works but it's a bit klunky. i'd prefer to use LockFileEx but it needs unsafe pkg.
- h, err := syscall.CreateFile(
- syscall.StringToUTF16Ptr(absLockFilePath), 0, 0, nil,
- syscall.OPEN_ALWAYS,
- uint32(FILE_ATTRIBUTE_TEMPORARY|FILE_FLAG_DELETE_ON_CLOSE),
- 0)
- if err != nil {
- return nil, errors.Wrapf(err,
- "Cannot create lock file %q. Another process is using this Badger database",
- absLockFilePath)
- }
-
- return &directoryLockGuard{h: h, path: absLockFilePath}, nil
-}
-
-// Release removes the directory lock.
-func (g *directoryLockGuard) release() error {
- g.path = ""
- return syscall.CloseHandle(g.h)
-}
-
-// Windows doesn't support syncing directories to the file system. See
-// https://github.com/dgraph-io/badger/issues/699#issuecomment-504133587 for more details.
-func syncDir(dir string) error { return nil }
diff --git a/vendor/github.com/dgraph-io/badger/doc.go b/vendor/github.com/dgraph-io/badger/doc.go
deleted file mode 100644
index 83dc9a28..00000000
--- a/vendor/github.com/dgraph-io/badger/doc.go
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
-Package badger implements an embeddable, simple and fast key-value database,
-written in pure Go. It is designed to be highly performant for both reads and
-writes simultaneously. Badger uses Multi-Version Concurrency Control (MVCC), and
-supports transactions. It runs transactions concurrently, with serializable
-snapshot isolation guarantees.
-
-Badger uses an LSM tree along with a value log to separate keys from values,
-hence reducing both write amplification and the size of the LSM tree. This
-allows LSM tree to be served entirely from RAM, while the values are served
-from SSD.
-
-
-Usage
-
-Badger has the following main types: DB, Txn, Item and Iterator. DB contains
-keys that are associated with values. It must be opened with the appropriate
-options before it can be accessed.
-
-All operations happen inside a Txn. Txn represents a transaction, which can
-be read-only or read-write. Read-only transactions can read values for a
-given key (which are returned inside an Item), or iterate over a set of
-key-value pairs using an Iterator (which are returned as Item type values as
-well). Read-write transactions can also update and delete keys from the DB.
-
-See the examples for more usage details.
-*/
-package badger
diff --git a/vendor/github.com/dgraph-io/badger/errors.go b/vendor/github.com/dgraph-io/badger/errors.go
deleted file mode 100644
index 933cd130..00000000
--- a/vendor/github.com/dgraph-io/badger/errors.go
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "math"
-
- "github.com/pkg/errors"
-)
-
-const (
- // ValueThresholdLimit is the maximum permissible value of opt.ValueThreshold.
- ValueThresholdLimit = math.MaxUint16 - 16 + 1
-)
-
-var (
- // ErrValueLogSize is returned when opt.ValueLogFileSize option is not within the valid
- // range.
- ErrValueLogSize = errors.New("Invalid ValueLogFileSize, must be between 1MB and 2GB")
-
- // ErrValueThreshold is returned when ValueThreshold is set to a value close to or greater than
- // uint16.
- ErrValueThreshold = errors.Errorf(
- "Invalid ValueThreshold, must be less than %d", ValueThresholdLimit)
-
- // ErrKeyNotFound is returned when key isn't found on a txn.Get.
- ErrKeyNotFound = errors.New("Key not found")
-
- // ErrTxnTooBig is returned if too many writes are fit into a single transaction.
- ErrTxnTooBig = errors.New("Txn is too big to fit into one request")
-
- // ErrConflict is returned when a transaction conflicts with another transaction. This can
- // happen if the read rows had been updated concurrently by another transaction.
- ErrConflict = errors.New("Transaction Conflict. Please retry")
-
- // ErrReadOnlyTxn is returned if an update function is called on a read-only transaction.
- ErrReadOnlyTxn = errors.New("No sets or deletes are allowed in a read-only transaction")
-
- // ErrDiscardedTxn is returned if a previously discarded transaction is re-used.
- ErrDiscardedTxn = errors.New("This transaction has been discarded. Create a new one")
-
- // ErrEmptyKey is returned if an empty key is passed on an update function.
- ErrEmptyKey = errors.New("Key cannot be empty")
-
- // ErrInvalidKey is returned if the key has a special !badger! prefix,
- // reserved for internal usage.
- ErrInvalidKey = errors.New("Key is using a reserved !badger! prefix")
-
- // ErrRetry is returned when a log file containing the value is not found.
- // This usually indicates that it may have been garbage collected, and the
- // operation needs to be retried.
- ErrRetry = errors.New("Unable to find log file. Please retry")
-
- // ErrThresholdZero is returned if threshold is set to zero, and value log GC is called.
- // In such a case, GC can't be run.
- ErrThresholdZero = errors.New(
- "Value log GC can't run because threshold is set to zero")
-
- // ErrNoRewrite is returned if a call for value log GC doesn't result in a log file rewrite.
- ErrNoRewrite = errors.New(
- "Value log GC attempt didn't result in any cleanup")
-
- // ErrRejected is returned if a value log GC is called either while another GC is running, or
- // after DB::Close has been called.
- ErrRejected = errors.New("Value log GC request rejected")
-
- // ErrInvalidRequest is returned if the user request is invalid.
- ErrInvalidRequest = errors.New("Invalid request")
-
- // ErrManagedTxn is returned if the user tries to use an API which isn't
- // allowed due to external management of transactions, when using ManagedDB.
- ErrManagedTxn = errors.New(
- "Invalid API request. Not allowed to perform this action using ManagedDB")
-
- // ErrInvalidDump if a data dump made previously cannot be loaded into the database.
- ErrInvalidDump = errors.New("Data dump cannot be read")
-
- // ErrZeroBandwidth is returned if the user passes in zero bandwidth for sequence.
- ErrZeroBandwidth = errors.New("Bandwidth must be greater than zero")
-
- // ErrInvalidLoadingMode is returned when opt.ValueLogLoadingMode option is not
- // within the valid range
- ErrInvalidLoadingMode = errors.New("Invalid ValueLogLoadingMode, must be FileIO or MemoryMap")
-
- // ErrReplayNeeded is returned when opt.ReadOnly is set but the
- // database requires a value log replay.
- ErrReplayNeeded = errors.New("Database was not properly closed, cannot open read-only")
-
- // ErrWindowsNotSupported is returned when opt.ReadOnly is used on Windows
- ErrWindowsNotSupported = errors.New("Read-only mode is not supported on Windows")
-
- // ErrPlan9NotSupported is returned when opt.ReadOnly is used on Plan 9
- ErrPlan9NotSupported = errors.New("Read-only mode is not supported on Plan 9")
-
- // ErrTruncateNeeded is returned when the value log gets corrupt, and requires truncation of
- // corrupt data to allow Badger to run properly.
- ErrTruncateNeeded = errors.New(
- "Value log truncate required to run DB. This might result in data loss")
-
- // ErrBlockedWrites is returned if the user called DropAll. During the process of dropping all
- // data from Badger, we stop accepting new writes, by returning this error.
- ErrBlockedWrites = errors.New("Writes are blocked, possibly due to DropAll or Close")
-
- // ErrNilCallback is returned when subscriber's callback is nil.
- ErrNilCallback = errors.New("Callback cannot be nil")
-)
diff --git a/vendor/github.com/dgraph-io/badger/histogram.go b/vendor/github.com/dgraph-io/badger/histogram.go
deleted file mode 100644
index d8c94bb7..00000000
--- a/vendor/github.com/dgraph-io/badger/histogram.go
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "math"
-)
-
-// PrintHistogram builds and displays the key-value size histogram.
-// When keyPrefix is set, only the keys that have prefix "keyPrefix" are
-// considered for creating the histogram
-func (db *DB) PrintHistogram(keyPrefix []byte) {
- if db == nil {
- fmt.Println("\nCannot build histogram: DB is nil.")
- return
- }
- histogram := db.buildHistogram(keyPrefix)
- fmt.Printf("Histogram of key sizes (in bytes)\n")
- histogram.keySizeHistogram.printHistogram()
- fmt.Printf("Histogram of value sizes (in bytes)\n")
- histogram.valueSizeHistogram.printHistogram()
-}
-
-// histogramData stores information about a histogram
-type histogramData struct {
- bins []int64
- countPerBin []int64
- totalCount int64
- min int64
- max int64
- sum int64
-}
-
-// sizeHistogram contains keySize histogram and valueSize histogram
-type sizeHistogram struct {
- keySizeHistogram, valueSizeHistogram histogramData
-}
-
-// newSizeHistogram returns a new instance of keyValueSizeHistogram with
-// properly initialized fields.
-func newSizeHistogram() *sizeHistogram {
- // TODO(ibrahim): find appropriate bin size.
- keyBins := createHistogramBins(1, 16)
- valueBins := createHistogramBins(1, 30)
- return &sizeHistogram{
- keySizeHistogram: histogramData{
- bins: keyBins,
- countPerBin: make([]int64, len(keyBins)+1),
- max: math.MinInt64,
- min: math.MaxInt64,
- sum: 0,
- },
- valueSizeHistogram: histogramData{
- bins: valueBins,
- countPerBin: make([]int64, len(valueBins)+1),
- max: math.MinInt64,
- min: math.MaxInt64,
- sum: 0,
- },
- }
-}
-
-// createHistogramBins creates bins for an histogram. The bin sizes are powers
-// of two of the form [2^min_exponent, ..., 2^max_exponent].
-func createHistogramBins(minExponent, maxExponent uint32) []int64 {
- var bins []int64
- for i := minExponent; i <= maxExponent; i++ {
- bins = append(bins, int64(1)< histogram.max {
- histogram.max = value
- }
- if value < histogram.min {
- histogram.min = value
- }
-
- histogram.sum += value
- histogram.totalCount++
-
- for index := 0; index <= len(histogram.bins); index++ {
- // Allocate value in the last buckets if we reached the end of the Bounds array.
- if index == len(histogram.bins) {
- histogram.countPerBin[index]++
- break
- }
-
- // Check if the value should be added to the "index" bin
- if value < int64(histogram.bins[index]) {
- histogram.countPerBin[index]++
- break
- }
- }
-}
-
-// buildHistogram builds the key-value size histogram.
-// When keyPrefix is set, only the keys that have prefix "keyPrefix" are
-// considered for creating the histogram
-func (db *DB) buildHistogram(keyPrefix []byte) *sizeHistogram {
- txn := db.NewTransaction(false)
- defer txn.Discard()
-
- itr := txn.NewIterator(DefaultIteratorOptions)
- defer itr.Close()
-
- badgerHistogram := newSizeHistogram()
-
- // Collect key and value sizes.
- for itr.Seek(keyPrefix); itr.ValidForPrefix(keyPrefix); itr.Next() {
- item := itr.Item()
- badgerHistogram.keySizeHistogram.Update(item.KeySize())
- badgerHistogram.valueSizeHistogram.Update(item.ValueSize())
- }
- return badgerHistogram
-}
-
-// printHistogram prints the histogram data in a human-readable format.
-func (histogram histogramData) printHistogram() {
- fmt.Printf("Total count: %d\n", histogram.totalCount)
- fmt.Printf("Min value: %d\n", histogram.min)
- fmt.Printf("Max value: %d\n", histogram.max)
- fmt.Printf("Mean: %.2f\n", float64(histogram.sum)/float64(histogram.totalCount))
- fmt.Printf("%24s %9s\n", "Range", "Count")
-
- numBins := len(histogram.bins)
- for index, count := range histogram.countPerBin {
- if count == 0 {
- continue
- }
-
- // The last bin represents the bin that contains the range from
- // the last bin up to infinity so it's processed differently than the
- // other bins.
- if index == len(histogram.countPerBin)-1 {
- lowerBound := int(histogram.bins[numBins-1])
- fmt.Printf("[%10d, %10s) %9d\n", lowerBound, "infinity", count)
- continue
- }
-
- upperBound := int(histogram.bins[index])
- lowerBound := 0
- if index > 0 {
- lowerBound = int(histogram.bins[index-1])
- }
-
- fmt.Printf("[%10d, %10d) %9d\n", lowerBound, upperBound, count)
- }
- fmt.Println()
-}
diff --git a/vendor/github.com/dgraph-io/badger/iterator.go b/vendor/github.com/dgraph-io/badger/iterator.go
deleted file mode 100644
index c11f2549..00000000
--- a/vendor/github.com/dgraph-io/badger/iterator.go
+++ /dev/null
@@ -1,736 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "fmt"
- "hash/crc32"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/options"
- "github.com/dgraph-io/badger/table"
-
- "github.com/dgraph-io/badger/y"
-)
-
-type prefetchStatus uint8
-
-const (
- prefetched prefetchStatus = iota + 1
-)
-
-// Item is returned during iteration. Both the Key() and Value() output is only valid until
-// iterator.Next() is called.
-type Item struct {
- status prefetchStatus
- err error
- wg sync.WaitGroup
- db *DB
- key []byte
- vptr []byte
- meta byte // We need to store meta to know about bitValuePointer.
- userMeta byte
- expiresAt uint64
- val []byte
- slice *y.Slice // Used only during prefetching.
- next *Item
- version uint64
- txn *Txn
-}
-
-// String returns a string representation of Item
-func (item *Item) String() string {
- return fmt.Sprintf("key=%q, version=%d, meta=%x", item.Key(), item.Version(), item.meta)
-}
-
-// Key returns the key.
-//
-// Key is only valid as long as item is valid, or transaction is valid. If you need to use it
-// outside its validity, please use KeyCopy.
-func (item *Item) Key() []byte {
- return item.key
-}
-
-// KeyCopy returns a copy of the key of the item, writing it to dst slice.
-// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and
-// returned.
-func (item *Item) KeyCopy(dst []byte) []byte {
- return y.SafeCopy(dst, item.key)
-}
-
-// Version returns the commit timestamp of the item.
-func (item *Item) Version() uint64 {
- return item.version
-}
-
-// Value retrieves the value of the item from the value log.
-//
-// This method must be called within a transaction. Calling it outside a
-// transaction is considered undefined behavior. If an iterator is being used,
-// then Item.Value() is defined in the current iteration only, because items are
-// reused.
-//
-// If you need to use a value outside a transaction, please use Item.ValueCopy
-// instead, or copy it yourself. Value might change once discard or commit is called.
-// Use ValueCopy if you want to do a Set after Get.
-func (item *Item) Value(fn func(val []byte) error) error {
- item.wg.Wait()
- if item.status == prefetched {
- if item.err == nil && fn != nil {
- if err := fn(item.val); err != nil {
- return err
- }
- }
- return item.err
- }
- buf, cb, err := item.yieldItemValue()
- defer runCallback(cb)
- if err != nil {
- return err
- }
- if fn != nil {
- return fn(buf)
- }
- return nil
-}
-
-// ValueCopy returns a copy of the value of the item from the value log, writing it to dst slice.
-// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and
-// returned. Tip: It might make sense to reuse the returned slice as dst argument for the next call.
-//
-// This function is useful in long running iterate/update transactions to avoid a write deadlock.
-// See Github issue: https://github.com/dgraph-io/badger/issues/315
-func (item *Item) ValueCopy(dst []byte) ([]byte, error) {
- item.wg.Wait()
- if item.status == prefetched {
- return y.SafeCopy(dst, item.val), item.err
- }
- buf, cb, err := item.yieldItemValue()
- defer runCallback(cb)
- return y.SafeCopy(dst, buf), err
-}
-
-func (item *Item) hasValue() bool {
- if item.meta == 0 && item.vptr == nil {
- // key not found
- return false
- }
- return true
-}
-
-// IsDeletedOrExpired returns true if item contains deleted or expired value.
-func (item *Item) IsDeletedOrExpired() bool {
- return isDeletedOrExpired(item.meta, item.expiresAt)
-}
-
-// DiscardEarlierVersions returns whether the item was created with the
-// option to discard earlier versions of a key when multiple are available.
-func (item *Item) DiscardEarlierVersions() bool {
- return item.meta&bitDiscardEarlierVersions > 0
-}
-
-func (item *Item) yieldItemValue() ([]byte, func(), error) {
- key := item.Key() // No need to copy.
- for {
- if !item.hasValue() {
- return nil, nil, nil
- }
-
- if item.slice == nil {
- item.slice = new(y.Slice)
- }
-
- if (item.meta & bitValuePointer) == 0 {
- val := item.slice.Resize(len(item.vptr))
- copy(val, item.vptr)
- return val, nil, nil
- }
-
- var vp valuePointer
- vp.Decode(item.vptr)
- result, cb, err := item.db.vlog.Read(vp, item.slice)
- if err != ErrRetry {
- return result, cb, err
- }
- if bytes.HasPrefix(key, badgerMove) {
- // err == ErrRetry
- // Error is retry even after checking the move keyspace. So, let's
- // just assume that value is not present.
- return nil, cb, nil
- }
-
- // The value pointer is pointing to a deleted value log. Look for the
- // move key and read that instead.
- runCallback(cb)
- // Do not put badgerMove on the left in append. It seems to cause some sort of manipulation.
- keyTs := y.KeyWithTs(item.Key(), item.Version())
- key = make([]byte, len(badgerMove)+len(keyTs))
- n := copy(key, badgerMove)
- copy(key[n:], keyTs)
- // Note that we can't set item.key to move key, because that would
- // change the key user sees before and after this call. Also, this move
- // logic is internal logic and should not impact the external behavior
- // of the retrieval.
- vs, err := item.db.get(key)
- if err != nil {
- return nil, nil, err
- }
- if vs.Version != item.Version() {
- return nil, nil, nil
- }
- // Bug fix: Always copy the vs.Value into vptr here. Otherwise, when item is reused this
- // slice gets overwritten.
- item.vptr = y.SafeCopy(item.vptr, vs.Value)
- item.meta &^= bitValuePointer // Clear the value pointer bit.
- if vs.Meta&bitValuePointer > 0 {
- item.meta |= bitValuePointer // This meta would only be about value pointer.
- }
- }
-}
-
-func runCallback(cb func()) {
- if cb != nil {
- cb()
- }
-}
-
-func (item *Item) prefetchValue() {
- val, cb, err := item.yieldItemValue()
- defer runCallback(cb)
-
- item.err = err
- item.status = prefetched
- if val == nil {
- return
- }
- if item.db.opt.ValueLogLoadingMode == options.MemoryMap {
- buf := item.slice.Resize(len(val))
- copy(buf, val)
- item.val = buf
- } else {
- item.val = val
- }
-}
-
-// EstimatedSize returns the approximate size of the key-value pair.
-//
-// This can be called while iterating through a store to quickly estimate the
-// size of a range of key-value pairs (without fetching the corresponding
-// values).
-func (item *Item) EstimatedSize() int64 {
- if !item.hasValue() {
- return 0
- }
- if (item.meta & bitValuePointer) == 0 {
- return int64(len(item.key) + len(item.vptr))
- }
- var vp valuePointer
- vp.Decode(item.vptr)
- return int64(vp.Len) // includes key length.
-}
-
-// KeySize returns the size of the key.
-// Exact size of the key is key + 8 bytes of timestamp
-func (item *Item) KeySize() int64 {
- return int64(len(item.key))
-}
-
-// ValueSize returns the exact size of the value.
-//
-// This can be called to quickly estimate the size of a value without fetching
-// it.
-func (item *Item) ValueSize() int64 {
- if !item.hasValue() {
- return 0
- }
- if (item.meta & bitValuePointer) == 0 {
- return int64(len(item.vptr))
- }
- var vp valuePointer
- vp.Decode(item.vptr)
-
- klen := int64(len(item.key) + 8) // 8 bytes for timestamp.
- return int64(vp.Len) - klen - headerBufSize - crc32.Size
-}
-
-// UserMeta returns the userMeta set by the user. Typically, this byte, optionally set by the user
-// is used to interpret the value.
-func (item *Item) UserMeta() byte {
- return item.userMeta
-}
-
-// ExpiresAt returns a Unix time value indicating when the item will be
-// considered expired. 0 indicates that the item will never expire.
-func (item *Item) ExpiresAt() uint64 {
- return item.expiresAt
-}
-
-// TODO: Switch this to use linked list container in Go.
-type list struct {
- head *Item
- tail *Item
-}
-
-func (l *list) push(i *Item) {
- i.next = nil
- if l.tail == nil {
- l.head = i
- l.tail = i
- return
- }
- l.tail.next = i
- l.tail = i
-}
-
-func (l *list) pop() *Item {
- if l.head == nil {
- return nil
- }
- i := l.head
- if l.head == l.tail {
- l.tail = nil
- l.head = nil
- } else {
- l.head = i.next
- }
- i.next = nil
- return i
-}
-
-// IteratorOptions is used to set options when iterating over Badger key-value
-// stores.
-//
-// This package provides DefaultIteratorOptions which contains options that
-// should work for most applications. Consider using that as a starting point
-// before customizing it for your own needs.
-type IteratorOptions struct {
- // Indicates whether we should prefetch values during iteration and store them.
- PrefetchValues bool
- // How many KV pairs to prefetch while iterating. Valid only if PrefetchValues is true.
- PrefetchSize int
- Reverse bool // Direction of iteration. False is forward, true is backward.
- AllVersions bool // Fetch all valid versions of the same key.
-
- // The following option is used to narrow down the SSTables that iterator picks up. If
- // Prefix is specified, only tables which could have this prefix are picked based on their range
- // of keys.
- Prefix []byte // Only iterate over this given prefix.
- prefixIsKey bool // If set, use the prefix for bloom filter lookup.
-
- InternalAccess bool // Used to allow internal access to badger keys.
-}
-
-func (opt *IteratorOptions) compareToPrefix(key []byte) int {
- // We should compare key without timestamp. For example key - a[TS] might be > "aa" prefix.
- key = y.ParseKey(key)
- if len(key) > len(opt.Prefix) {
- key = key[:len(opt.Prefix)]
- }
- return bytes.Compare(key, opt.Prefix)
-}
-
-func (opt *IteratorOptions) pickTable(t table.TableInterface) bool {
- if len(opt.Prefix) == 0 {
- return true
- }
- if opt.compareToPrefix(t.Smallest()) > 0 {
- return false
- }
- if opt.compareToPrefix(t.Biggest()) < 0 {
- return false
- }
- // Bloom filter lookup would only work if opt.Prefix does NOT have the read
- // timestamp as part of the key.
- if opt.prefixIsKey && t.DoesNotHave(opt.Prefix) {
- return false
- }
- return true
-}
-
-// pickTables picks the necessary table for the iterator. This function also assumes
-// that the tables are sorted in the right order.
-func (opt *IteratorOptions) pickTables(all []*table.Table) []*table.Table {
- if len(opt.Prefix) == 0 {
- out := make([]*table.Table, len(all))
- copy(out, all)
- return out
- }
- sIdx := sort.Search(len(all), func(i int) bool {
- return opt.compareToPrefix(all[i].Biggest()) >= 0
- })
- if sIdx == len(all) {
- // Not found.
- return []*table.Table{}
- }
-
- filtered := all[sIdx:]
- if !opt.prefixIsKey {
- eIdx := sort.Search(len(filtered), func(i int) bool {
- return opt.compareToPrefix(filtered[i].Smallest()) > 0
- })
- out := make([]*table.Table, len(filtered[:eIdx]))
- copy(out, filtered[:eIdx])
- return out
- }
-
- var out []*table.Table
- for _, t := range filtered {
- // When we encounter the first table whose smallest key is higher than
- // opt.Prefix, we can stop.
- if opt.compareToPrefix(t.Smallest()) > 0 {
- return out
- }
- // opt.Prefix is actually the key. So, we can run bloom filter checks
- // as well.
- if t.DoesNotHave(opt.Prefix) {
- continue
- }
- out = append(out, t)
- }
- return out
-}
-
-// DefaultIteratorOptions contains default options when iterating over Badger key-value stores.
-var DefaultIteratorOptions = IteratorOptions{
- PrefetchValues: true,
- PrefetchSize: 100,
- Reverse: false,
- AllVersions: false,
-}
-
-// Iterator helps iterating over the KV pairs in a lexicographically sorted order.
-type Iterator struct {
- iitr y.Iterator
- txn *Txn
- readTs uint64
-
- opt IteratorOptions
- item *Item
- data list
- waste list
-
- lastKey []byte // Used to skip over multiple versions of the same key.
-
- closed bool
-}
-
-// NewIterator returns a new iterator. Depending upon the options, either only keys, or both
-// key-value pairs would be fetched. The keys are returned in lexicographically sorted order.
-// Using prefetch is recommended if you're doing a long running iteration, for performance.
-//
-// Multiple Iterators:
-// For a read-only txn, multiple iterators can be running simultaneously. However, for a read-write
-// txn, only one can be running at one time to avoid race conditions, because Txn is thread-unsafe.
-func (txn *Txn) NewIterator(opt IteratorOptions) *Iterator {
- if txn.discarded {
- panic("Transaction has already been discarded")
- }
- // Do not change the order of the next if. We must track the number of running iterators.
- if atomic.AddInt32(&txn.numIterators, 1) > 1 && txn.update {
- atomic.AddInt32(&txn.numIterators, -1)
- panic("Only one iterator can be active at one time, for a RW txn.")
- }
-
- // TODO: If Prefix is set, only pick those memtables which have keys with
- // the prefix.
- tables, decr := txn.db.getMemTables()
- defer decr()
- txn.db.vlog.incrIteratorCount()
- var iters []y.Iterator
- if itr := txn.newPendingWritesIterator(opt.Reverse); itr != nil {
- iters = append(iters, itr)
- }
- for i := 0; i < len(tables); i++ {
- iters = append(iters, tables[i].NewUniIterator(opt.Reverse))
- }
- iters = txn.db.lc.appendIterators(iters, &opt) // This will increment references.
-
- res := &Iterator{
- txn: txn,
- iitr: table.NewMergeIterator(iters, opt.Reverse),
- opt: opt,
- readTs: txn.readTs,
- }
- return res
-}
-
-// NewKeyIterator is just like NewIterator, but allows the user to iterate over all versions of a
-// single key. Internally, it sets the Prefix option in provided opt, and uses that prefix to
-// additionally run bloom filter lookups before picking tables from the LSM tree.
-func (txn *Txn) NewKeyIterator(key []byte, opt IteratorOptions) *Iterator {
- if len(opt.Prefix) > 0 {
- panic("opt.Prefix should be nil for NewKeyIterator.")
- }
- opt.Prefix = key // This key must be without the timestamp.
- opt.prefixIsKey = true
- opt.AllVersions = true
- return txn.NewIterator(opt)
-}
-
-func (it *Iterator) newItem() *Item {
- item := it.waste.pop()
- if item == nil {
- item = &Item{slice: new(y.Slice), db: it.txn.db, txn: it.txn}
- }
- return item
-}
-
-// Item returns pointer to the current key-value pair.
-// This item is only valid until it.Next() gets called.
-func (it *Iterator) Item() *Item {
- tx := it.txn
- tx.addReadKey(it.item.Key())
- return it.item
-}
-
-// Valid returns false when iteration is done.
-func (it *Iterator) Valid() bool {
- if it.item == nil {
- return false
- }
- if it.opt.prefixIsKey {
- return bytes.Equal(it.item.key, it.opt.Prefix)
- }
- return bytes.HasPrefix(it.item.key, it.opt.Prefix)
-}
-
-// ValidForPrefix returns false when iteration is done
-// or when the current key is not prefixed by the specified prefix.
-func (it *Iterator) ValidForPrefix(prefix []byte) bool {
- return it.Valid() && bytes.HasPrefix(it.item.key, prefix)
-}
-
-// Close would close the iterator. It is important to call this when you're done with iteration.
-func (it *Iterator) Close() {
- if it.closed {
- return
- }
- it.closed = true
-
- it.iitr.Close()
- // It is important to wait for the fill goroutines to finish. Otherwise, we might leave zombie
- // goroutines behind, which are waiting to acquire file read locks after DB has been closed.
- waitFor := func(l list) {
- item := l.pop()
- for item != nil {
- item.wg.Wait()
- item = l.pop()
- }
- }
- waitFor(it.waste)
- waitFor(it.data)
-
- // TODO: We could handle this error.
- _ = it.txn.db.vlog.decrIteratorCount()
- atomic.AddInt32(&it.txn.numIterators, -1)
-}
-
-// Next would advance the iterator by one. Always check it.Valid() after a Next()
-// to ensure you have access to a valid it.Item().
-func (it *Iterator) Next() {
- // Reuse current item
- it.item.wg.Wait() // Just cleaner to wait before pushing to avoid doing ref counting.
- it.waste.push(it.item)
-
- // Set next item to current
- it.item = it.data.pop()
-
- for it.iitr.Valid() {
- if it.parseItem() {
- // parseItem calls one extra next.
- // This is used to deal with the complexity of reverse iteration.
- break
- }
- }
-}
-
-func isDeletedOrExpired(meta byte, expiresAt uint64) bool {
- if meta&bitDelete > 0 {
- return true
- }
- if expiresAt == 0 {
- return false
- }
- return expiresAt <= uint64(time.Now().Unix())
-}
-
-// parseItem is a complex function because it needs to handle both forward and reverse iteration
-// implementation. We store keys such that their versions are sorted in descending order. This makes
-// forward iteration efficient, but revese iteration complicated. This tradeoff is better because
-// forward iteration is more common than reverse.
-//
-// This function advances the iterator.
-func (it *Iterator) parseItem() bool {
- mi := it.iitr
- key := mi.Key()
-
- setItem := func(item *Item) {
- if it.item == nil {
- it.item = item
- } else {
- it.data.push(item)
- }
- }
-
- // Skip badger keys.
- if !it.opt.InternalAccess && bytes.HasPrefix(key, badgerPrefix) {
- mi.Next()
- return false
- }
-
- // Skip any versions which are beyond the readTs.
- version := y.ParseTs(key)
- if version > it.readTs {
- mi.Next()
- return false
- }
-
- if it.opt.AllVersions {
- // Return deleted or expired values also, otherwise user can't figure out
- // whether the key was deleted.
- item := it.newItem()
- it.fill(item)
- setItem(item)
- mi.Next()
- return true
- }
-
- // If iterating in forward direction, then just checking the last key against current key would
- // be sufficient.
- if !it.opt.Reverse {
- if y.SameKey(it.lastKey, key) {
- mi.Next()
- return false
- }
- // Only track in forward direction.
- // We should update lastKey as soon as we find a different key in our snapshot.
- // Consider keys: a 5, b 7 (del), b 5. When iterating, lastKey = a.
- // Then we see b 7, which is deleted. If we don't store lastKey = b, we'll then return b 5,
- // which is wrong. Therefore, update lastKey here.
- it.lastKey = y.SafeCopy(it.lastKey, mi.Key())
- }
-
-FILL:
- // If deleted, advance and return.
- vs := mi.Value()
- if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
- mi.Next()
- return false
- }
-
- item := it.newItem()
- it.fill(item)
- // fill item based on current cursor position. All Next calls have returned, so reaching here
- // means no Next was called.
-
- mi.Next() // Advance but no fill item yet.
- if !it.opt.Reverse || !mi.Valid() { // Forward direction, or invalid.
- setItem(item)
- return true
- }
-
- // Reverse direction.
- nextTs := y.ParseTs(mi.Key())
- mik := y.ParseKey(mi.Key())
- if nextTs <= it.readTs && bytes.Equal(mik, item.key) {
- // This is a valid potential candidate.
- goto FILL
- }
- // Ignore the next candidate. Return the current one.
- setItem(item)
- return true
-}
-
-func (it *Iterator) fill(item *Item) {
- vs := it.iitr.Value()
- item.meta = vs.Meta
- item.userMeta = vs.UserMeta
- item.expiresAt = vs.ExpiresAt
-
- item.version = y.ParseTs(it.iitr.Key())
- item.key = y.SafeCopy(item.key, y.ParseKey(it.iitr.Key()))
-
- item.vptr = y.SafeCopy(item.vptr, vs.Value)
- item.val = nil
- if it.opt.PrefetchValues {
- item.wg.Add(1)
- go func() {
- // FIXME we are not handling errors here.
- item.prefetchValue()
- item.wg.Done()
- }()
- }
-}
-
-func (it *Iterator) prefetch() {
- prefetchSize := 2
- if it.opt.PrefetchValues && it.opt.PrefetchSize > 1 {
- prefetchSize = it.opt.PrefetchSize
- }
-
- i := it.iitr
- var count int
- it.item = nil
- for i.Valid() {
- if !it.parseItem() {
- continue
- }
- count++
- if count == prefetchSize {
- break
- }
- }
-}
-
-// Seek would seek to the provided key if present. If absent, it would seek to the next
-// smallest key greater than the provided key if iterating in the forward direction.
-// Behavior would be reversed if iterating backwards.
-func (it *Iterator) Seek(key []byte) {
- for i := it.data.pop(); i != nil; i = it.data.pop() {
- i.wg.Wait()
- it.waste.push(i)
- }
-
- it.lastKey = it.lastKey[:0]
- if len(key) == 0 {
- key = it.opt.Prefix
- }
- if len(key) == 0 {
- it.iitr.Rewind()
- it.prefetch()
- return
- }
-
- if !it.opt.Reverse {
- key = y.KeyWithTs(key, it.txn.readTs)
- } else {
- key = y.KeyWithTs(key, 0)
- }
- it.iitr.Seek(key)
- it.prefetch()
-}
-
-// Rewind would rewind the iterator cursor all the way to zero-th position, which would be the
-// smallest key if iterating forward, and largest if iterating backward. It does not keep track of
-// whether the cursor started with a Seek().
-func (it *Iterator) Rewind() {
- it.Seek(nil)
-}
diff --git a/vendor/github.com/dgraph-io/badger/level_handler.go b/vendor/github.com/dgraph-io/badger/level_handler.go
deleted file mode 100644
index 1ea2af22..00000000
--- a/vendor/github.com/dgraph-io/badger/level_handler.go
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "sort"
- "sync"
-
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-type levelHandler struct {
- // Guards tables, totalSize.
- sync.RWMutex
-
- // For level >= 1, tables are sorted by key ranges, which do not overlap.
- // For level 0, tables are sorted by time.
- // For level 0, newest table are at the back. Compact the oldest one first, which is at the front.
- tables []*table.Table
- totalSize int64
-
- // The following are initialized once and const.
- level int
- strLevel string
- maxTotalSize int64
- db *DB
-}
-
-func (s *levelHandler) getTotalSize() int64 {
- s.RLock()
- defer s.RUnlock()
- return s.totalSize
-}
-
-// initTables replaces s.tables with given tables. This is done during loading.
-func (s *levelHandler) initTables(tables []*table.Table) {
- s.Lock()
- defer s.Unlock()
-
- s.tables = tables
- s.totalSize = 0
- for _, t := range tables {
- s.totalSize += t.Size()
- }
-
- if s.level == 0 {
- // Key range will overlap. Just sort by fileID in ascending order
- // because newer tables are at the end of level 0.
- sort.Slice(s.tables, func(i, j int) bool {
- return s.tables[i].ID() < s.tables[j].ID()
- })
- } else {
- // Sort tables by keys.
- sort.Slice(s.tables, func(i, j int) bool {
- return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0
- })
- }
-}
-
-// deleteTables remove tables idx0, ..., idx1-1.
-func (s *levelHandler) deleteTables(toDel []*table.Table) error {
- s.Lock() // s.Unlock() below
-
- toDelMap := make(map[uint64]struct{})
- for _, t := range toDel {
- toDelMap[t.ID()] = struct{}{}
- }
-
- // Make a copy as iterators might be keeping a slice of tables.
- var newTables []*table.Table
- for _, t := range s.tables {
- _, found := toDelMap[t.ID()]
- if !found {
- newTables = append(newTables, t)
- continue
- }
- s.totalSize -= t.Size()
- }
- s.tables = newTables
-
- s.Unlock() // Unlock s _before_ we DecrRef our tables, which can be slow.
-
- return decrRefs(toDel)
-}
-
-// replaceTables will replace tables[left:right] with newTables. Note this EXCLUDES tables[right].
-// You must call decr() to delete the old tables _after_ writing the update to the manifest.
-func (s *levelHandler) replaceTables(toDel, toAdd []*table.Table) error {
- // Need to re-search the range of tables in this level to be replaced as other goroutines might
- // be changing it as well. (They can't touch our tables, but if they add/remove other tables,
- // the indices get shifted around.)
- s.Lock() // We s.Unlock() below.
-
- toDelMap := make(map[uint64]struct{})
- for _, t := range toDel {
- toDelMap[t.ID()] = struct{}{}
- }
- var newTables []*table.Table
- for _, t := range s.tables {
- _, found := toDelMap[t.ID()]
- if !found {
- newTables = append(newTables, t)
- continue
- }
- s.totalSize -= t.Size()
- }
-
- // Increase totalSize first.
- for _, t := range toAdd {
- s.totalSize += t.Size()
- t.IncrRef()
- newTables = append(newTables, t)
- }
-
- // Assign tables.
- s.tables = newTables
- sort.Slice(s.tables, func(i, j int) bool {
- return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0
- })
- s.Unlock() // s.Unlock before we DecrRef tables -- that can be slow.
- return decrRefs(toDel)
-}
-
-// addTable adds toAdd table to levelHandler. Normally when we add tables to levelHandler, we sort
-// tables based on table.Smallest. This is required for correctness of the system. But in case of
-// stream writer this can be avoided. We can just add tables to levelHandler's table list
-// and after all addTable calls, we can sort table list(check sortTable method).
-// NOTE: levelHandler.sortTables() should be called after call addTable calls are done.
-func (s *levelHandler) addTable(t *table.Table) {
- s.Lock()
- defer s.Unlock()
-
- s.totalSize += t.Size() // Increase totalSize first.
- t.IncrRef()
- s.tables = append(s.tables, t)
-}
-
-// sortTables sorts tables of levelHandler based on table.Smallest.
-// Normally it should be called after all addTable calls.
-func (s *levelHandler) sortTables() {
- s.RLock()
- defer s.RUnlock()
-
- sort.Slice(s.tables, func(i, j int) bool {
- return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0
- })
-}
-
-func decrRefs(tables []*table.Table) error {
- for _, table := range tables {
- if err := table.DecrRef(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func newLevelHandler(db *DB, level int) *levelHandler {
- return &levelHandler{
- level: level,
- strLevel: fmt.Sprintf("l%d", level),
- db: db,
- }
-}
-
-// tryAddLevel0Table returns true if ok and no stalling.
-func (s *levelHandler) tryAddLevel0Table(t *table.Table) bool {
- y.AssertTrue(s.level == 0)
- // Need lock as we may be deleting the first table during a level 0 compaction.
- s.Lock()
- defer s.Unlock()
- if len(s.tables) >= s.db.opt.NumLevelZeroTablesStall {
- return false
- }
-
- s.tables = append(s.tables, t)
- t.IncrRef()
- s.totalSize += t.Size()
-
- return true
-}
-
-func (s *levelHandler) numTables() int {
- s.RLock()
- defer s.RUnlock()
- return len(s.tables)
-}
-
-func (s *levelHandler) close() error {
- s.RLock()
- defer s.RUnlock()
- var err error
- for _, t := range s.tables {
- if closeErr := t.Close(); closeErr != nil && err == nil {
- err = closeErr
- }
- }
- return errors.Wrap(err, "levelHandler.close")
-}
-
-// getTableForKey acquires a read-lock to access s.tables. It returns a list of tableHandlers.
-func (s *levelHandler) getTableForKey(key []byte) ([]*table.Table, func() error) {
- s.RLock()
- defer s.RUnlock()
-
- if s.level == 0 {
- // For level 0, we need to check every table. Remember to make a copy as s.tables may change
- // once we exit this function, and we don't want to lock s.tables while seeking in tables.
- // CAUTION: Reverse the tables.
- out := make([]*table.Table, 0, len(s.tables))
- for i := len(s.tables) - 1; i >= 0; i-- {
- out = append(out, s.tables[i])
- s.tables[i].IncrRef()
- }
- return out, func() error {
- for _, t := range out {
- if err := t.DecrRef(); err != nil {
- return err
- }
- }
- return nil
- }
- }
- // For level >= 1, we can do a binary search as key range does not overlap.
- idx := sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(s.tables[i].Biggest(), key) >= 0
- })
- if idx >= len(s.tables) {
- // Given key is strictly > than every element we have.
- return nil, func() error { return nil }
- }
- tbl := s.tables[idx]
- tbl.IncrRef()
- return []*table.Table{tbl}, tbl.DecrRef
-}
-
-// get returns value for a given key or the key after that. If not found, return nil.
-func (s *levelHandler) get(key []byte) (y.ValueStruct, error) {
- tables, decr := s.getTableForKey(key)
- keyNoTs := y.ParseKey(key)
-
- var maxVs y.ValueStruct
- for _, th := range tables {
- if th.DoesNotHave(keyNoTs) {
- y.NumLSMBloomHits.Add(s.strLevel, 1)
- continue
- }
-
- it := th.NewIterator(false)
- defer it.Close()
-
- y.NumLSMGets.Add(s.strLevel, 1)
- it.Seek(key)
- if !it.Valid() {
- continue
- }
- if y.SameKey(key, it.Key()) {
- if version := y.ParseTs(it.Key()); maxVs.Version < version {
- maxVs = it.Value()
- maxVs.Version = version
- }
- }
- }
- return maxVs, decr()
-}
-
-// appendIterators appends iterators to an array of iterators, for merging.
-// Note: This obtains references for the table handlers. Remember to close these iterators.
-func (s *levelHandler) appendIterators(iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
- s.RLock()
- defer s.RUnlock()
-
- if s.level == 0 {
- // Remember to add in reverse order!
- // The newer table at the end of s.tables should be added first as it takes precedence.
- // Level 0 tables are not in key sorted order, so we need to consider them one by one.
- var out []*table.Table
- for _, t := range s.tables {
- if opt.pickTable(t) {
- out = append(out, t)
- }
- }
- return appendIteratorsReversed(iters, out, opt.Reverse)
- }
-
- tables := opt.pickTables(s.tables)
- if len(tables) == 0 {
- return iters
- }
- return append(iters, table.NewConcatIterator(tables, opt.Reverse))
-}
-
-type levelHandlerRLocked struct{}
-
-// overlappingTables returns the tables that intersect with key range. Returns a half-interval.
-// This function should already have acquired a read lock, and this is so important the caller must
-// pass an empty parameter declaring such.
-func (s *levelHandler) overlappingTables(_ levelHandlerRLocked, kr keyRange) (int, int) {
- if len(kr.left) == 0 || len(kr.right) == 0 {
- return 0, 0
- }
- left := sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(kr.left, s.tables[i].Biggest()) <= 0
- })
- right := sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(kr.right, s.tables[i].Smallest()) < 0
- })
- return left, right
-}
diff --git a/vendor/github.com/dgraph-io/badger/levels.go b/vendor/github.com/dgraph-io/badger/levels.go
deleted file mode 100644
index 96f1264b..00000000
--- a/vendor/github.com/dgraph-io/badger/levels.go
+++ /dev/null
@@ -1,1092 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "os"
- "sort"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/trace"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-type levelsController struct {
- nextFileID uint64 // Atomic
- elog trace.EventLog
-
- // The following are initialized once and const.
- levels []*levelHandler
- kv *DB
-
- cstatus compactStatus
-}
-
-var (
- // This is for getting timings between stalls.
- lastUnstalled time.Time
-)
-
-// revertToManifest checks that all necessary table files exist and removes all table files not
-// referenced by the manifest. idMap is a set of table file id's that were read from the directory
-// listing.
-func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {
- // 1. Check all files in manifest exist.
- for id := range mf.Tables {
- if _, ok := idMap[id]; !ok {
- return fmt.Errorf("file does not exist for table %d", id)
- }
- }
-
- // 2. Delete files that shouldn't exist.
- for id := range idMap {
- if _, ok := mf.Tables[id]; !ok {
- kv.elog.Printf("Table file %d not referenced in MANIFEST\n", id)
- filename := table.NewFilename(id, kv.opt.Dir)
- if err := os.Remove(filename); err != nil {
- return y.Wrapf(err, "While removing table %d", id)
- }
- }
- }
-
- return nil
-}
-
-func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) {
- y.AssertTrue(db.opt.NumLevelZeroTablesStall > db.opt.NumLevelZeroTables)
- s := &levelsController{
- kv: db,
- elog: db.elog,
- levels: make([]*levelHandler, db.opt.MaxLevels),
- }
- s.cstatus.levels = make([]*levelCompactStatus, db.opt.MaxLevels)
-
- for i := 0; i < db.opt.MaxLevels; i++ {
- s.levels[i] = newLevelHandler(db, i)
- if i == 0 {
- // Do nothing.
- } else if i == 1 {
- // Level 1 probably shouldn't be too much bigger than level 0.
- s.levels[i].maxTotalSize = db.opt.LevelOneSize
- } else {
- s.levels[i].maxTotalSize = s.levels[i-1].maxTotalSize * int64(db.opt.LevelSizeMultiplier)
- }
- s.cstatus.levels[i] = new(levelCompactStatus)
- }
-
- // Compare manifest against directory, check for existent/non-existent files, and remove.
- if err := revertToManifest(db, mf, getIDMap(db.opt.Dir)); err != nil {
- return nil, err
- }
-
- // Some files may be deleted. Let's reload.
- var flags uint32 = y.Sync
- if db.opt.ReadOnly {
- flags |= y.ReadOnly
- }
-
- var mu sync.Mutex
- tables := make([][]*table.Table, db.opt.MaxLevels)
- var maxFileID uint64
-
- // We found that using 3 goroutines allows disk throughput to be utilized to its max.
- // Disk utilization is the main thing we should focus on, while trying to read the data. That's
- // the one factor that remains constant between HDD and SSD.
- throttle := y.NewThrottle(3)
-
- start := time.Now()
- var numOpened int32
- tick := time.NewTicker(3 * time.Second)
- defer tick.Stop()
-
- for fileID, tf := range mf.Tables {
- fname := table.NewFilename(fileID, db.opt.Dir)
- select {
- case <-tick.C:
- db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened),
- len(mf.Tables), time.Since(start).Round(time.Millisecond))
- default:
- }
- if err := throttle.Do(); err != nil {
- closeAllTables(tables)
- return nil, err
- }
- if fileID > maxFileID {
- maxFileID = fileID
- }
- go func(fname string, tf TableManifest) {
- var rerr error
- defer func() {
- throttle.Done(rerr)
- atomic.AddInt32(&numOpened, 1)
- }()
- fd, err := y.OpenExistingFile(fname, flags)
- if err != nil {
- rerr = errors.Wrapf(err, "Opening file: %q", fname)
- return
- }
-
- t, err := table.OpenTable(fd, db.opt.TableLoadingMode, tf.Checksum)
- if err != nil {
- if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") {
- db.opt.Errorf(err.Error())
- db.opt.Errorf("Ignoring table %s", fd.Name())
- // Do not set rerr. We will continue without this table.
- } else {
- rerr = errors.Wrapf(err, "Opening table: %q", fname)
- }
- return
- }
-
- mu.Lock()
- tables[tf.Level] = append(tables[tf.Level], t)
- mu.Unlock()
- }(fname, tf)
- }
- if err := throttle.Finish(); err != nil {
- closeAllTables(tables)
- return nil, err
- }
- db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened),
- time.Since(start).Round(time.Millisecond))
- s.nextFileID = maxFileID + 1
- for i, tbls := range tables {
- s.levels[i].initTables(tbls)
- }
-
- // Make sure key ranges do not overlap etc.
- if err := s.validate(); err != nil {
- _ = s.cleanupLevels()
- return nil, errors.Wrap(err, "Level validation")
- }
-
- // Sync directory (because we have at least removed some files, or previously created the
- // manifest file).
- if err := syncDir(db.opt.Dir); err != nil {
- _ = s.close()
- return nil, err
- }
-
- return s, nil
-}
-
-// Closes the tables, for cleanup in newLevelsController. (We Close() instead of using DecrRef()
-// because that would delete the underlying files.) We ignore errors, which is OK because tables
-// are read-only.
-func closeAllTables(tables [][]*table.Table) {
- for _, tableSlice := range tables {
- for _, table := range tableSlice {
- _ = table.Close()
- }
- }
-}
-
-func (s *levelsController) cleanupLevels() error {
- var firstErr error
- for _, l := range s.levels {
- if err := l.close(); err != nil && firstErr == nil {
- firstErr = err
- }
- }
- return firstErr
-}
-
-// dropTree picks all tables from all levels, creates a manifest changeset,
-// applies it, and then decrements the refs of these tables, which would result
-// in their deletion.
-func (s *levelsController) dropTree() (int, error) {
- // First pick all tables, so we can create a manifest changelog.
- var all []*table.Table
- for _, l := range s.levels {
- l.RLock()
- all = append(all, l.tables...)
- l.RUnlock()
- }
- if len(all) == 0 {
- return 0, nil
- }
-
- // Generate the manifest changes.
- changes := []*pb.ManifestChange{}
- for _, table := range all {
- changes = append(changes, newDeleteChange(table.ID()))
- }
- changeSet := pb.ManifestChangeSet{Changes: changes}
- if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
- return 0, err
- }
-
- // Now that manifest has been successfully written, we can delete the tables.
- for _, l := range s.levels {
- l.Lock()
- l.totalSize = 0
- l.tables = l.tables[:0]
- l.Unlock()
- }
- for _, table := range all {
- if err := table.DecrRef(); err != nil {
- return 0, err
- }
- }
- return len(all), nil
-}
-
-// dropPrefix runs a L0->L1 compaction, and then runs same level compaction on the rest of the
-// levels. For L0->L1 compaction, it runs compactions normally, but skips over all the keys with the
-// provided prefix and also the internal move keys for the same prefix.
-// For Li->Li compactions, it picks up the tables which would have the prefix. The
-// tables who only have keys with this prefix are quickly dropped. The ones which have other keys
-// are run through MergeIterator and compacted to create new tables. All the mechanisms of
-// compactions apply, i.e. level sizes and MANIFEST are updated as in the normal flow.
-func (s *levelsController) dropPrefixes(prefixes [][]byte) error {
- // Internal move keys related to the given prefix should also be skipped.
- for _, prefix := range prefixes {
- key := make([]byte, 0, len(badgerMove)+len(prefix))
- key = append(key, badgerMove...)
- key = append(key, prefix...)
- prefixes = append(prefixes, key)
- }
-
- opt := s.kv.opt
- // Iterate levels in the reverse order because if we were to iterate from
- // lower level (say level 0) to a higher level (say level 3) we could have
- // a state in which level 0 is compacted and an older version of a key exists in lower level.
- // At this point, if someone creates an iterator, they would see an old
- // value for a key from lower levels. Iterating in reverse order ensures we
- // drop the oldest data first so that lookups never return stale data.
- for i := len(s.levels) - 1; i >= 0; i-- {
- l := s.levels[i]
-
- l.RLock()
- if l.level == 0 {
- size := len(l.tables)
- l.RUnlock()
-
- if size > 0 {
- cp := compactionPriority{
- level: 0,
- score: 1.74,
- // A unique number greater than 1.0 does two things. Helps identify this
- // function in logs, and forces a compaction.
- dropPrefixes: prefixes,
- }
- if err := s.doCompact(cp); err != nil {
- opt.Warningf("While compacting level 0: %v", err)
- return nil
- }
- }
- continue
- }
-
- // Build a list of compaction tableGroups affecting all the prefixes we
- // need to drop. We need to build tableGroups that satisfy the invariant that
- // bottom tables are consecutive.
- // tableGroup contains groups of consecutive tables.
- var tableGroups [][]*table.Table
- var tableGroup []*table.Table
-
- finishGroup := func() {
- if len(tableGroup) > 0 {
- tableGroups = append(tableGroups, tableGroup)
- tableGroup = nil
- }
- }
-
- for _, table := range l.tables {
- if containsAnyPrefixes(table.Smallest(), table.Biggest(), prefixes) {
- tableGroup = append(tableGroup, table)
- } else {
- finishGroup()
- }
- }
- finishGroup()
-
- l.RUnlock()
-
- if len(tableGroups) == 0 {
- continue
- }
-
- opt.Infof("Dropping prefix at level %d (%d tableGroups)", l.level, len(tableGroups))
- for _, operation := range tableGroups {
- cd := compactDef{
- elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"),
- thisLevel: l,
- nextLevel: l,
- top: nil,
- bot: operation,
- dropPrefixes: prefixes,
- }
- if err := s.runCompactDef(l.level, cd); err != nil {
- opt.Warningf("While running compact def: %+v. Error: %v", cd, err)
- return err
- }
- }
- }
- return nil
-}
-
-func (s *levelsController) startCompact(lc *y.Closer) {
- n := s.kv.opt.NumCompactors
- lc.AddRunning(n - 1)
- for i := 0; i < n; i++ {
- go s.runWorker(lc)
- }
-}
-
-func (s *levelsController) runWorker(lc *y.Closer) {
- defer lc.Done()
-
- randomDelay := time.NewTimer(time.Duration(rand.Int31n(1000)) * time.Millisecond)
- select {
- case <-randomDelay.C:
- case <-lc.HasBeenClosed():
- randomDelay.Stop()
- return
- }
-
- ticker := time.NewTicker(time.Second)
- defer ticker.Stop()
-
- for {
- select {
- // Can add a done channel or other stuff.
- case <-ticker.C:
- prios := s.pickCompactLevels()
- for _, p := range prios {
- if err := s.doCompact(p); err == nil {
- break
- } else if err == errFillTables {
- // pass
- } else {
- s.kv.opt.Warningf("While running doCompact: %v\n", err)
- }
- }
- case <-lc.HasBeenClosed():
- return
- }
- }
-}
-
-// Returns true if level zero may be compacted, without accounting for compactions that already
-// might be happening.
-func (s *levelsController) isLevel0Compactable() bool {
- return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables
-}
-
-// Returns true if the non-zero level may be compacted. delSize provides the size of the tables
-// which are currently being compacted so that we treat them as already having started being
-// compacted (because they have been, yet their size is already counted in getTotalSize).
-func (l *levelHandler) isCompactable(delSize int64) bool {
- return l.getTotalSize()-delSize >= l.maxTotalSize
-}
-
-type compactionPriority struct {
- level int
- score float64
- dropPrefixes [][]byte
-}
-
-// pickCompactLevel determines which level to compact.
-// Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction
-func (s *levelsController) pickCompactLevels() (prios []compactionPriority) {
- // This function must use identical criteria for guaranteeing compaction's progress that
- // addLevel0Table uses.
-
- // cstatus is checked to see if level 0's tables are already being compacted
- if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() {
- pri := compactionPriority{
- level: 0,
- score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables),
- }
- prios = append(prios, pri)
- }
-
- for i, l := range s.levels[1:] {
- // Don't consider those tables that are already being compacted right now.
- delSize := s.cstatus.delSize(i + 1)
-
- if l.isCompactable(delSize) {
- pri := compactionPriority{
- level: i + 1,
- score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize),
- }
- prios = append(prios, pri)
- }
- }
- sort.Slice(prios, func(i, j int) bool {
- return prios[i].score > prios[j].score
- })
- return prios
-}
-
-// checkOverlap checks if the given tables overlap with any level from the given "lev" onwards.
-func (s *levelsController) checkOverlap(tables []*table.Table, lev int) bool {
- kr := getKeyRange(tables...)
- for i, lh := range s.levels {
- if i < lev { // Skip upper levels.
- continue
- }
- lh.RLock()
- left, right := lh.overlappingTables(levelHandlerRLocked{}, kr)
- lh.RUnlock()
- if right-left > 0 {
- return true
- }
- }
- return false
-}
-
-// compactBuildTables merges topTables and botTables to form a list of new tables.
-func (s *levelsController) compactBuildTables(
- lev int, cd compactDef) ([]*table.Table, func() error, error) {
- topTables := cd.top
- botTables := cd.bot
-
- // Check overlap of the top level with the levels which are not being
- // compacted in this compaction.
- hasOverlap := s.checkOverlap(cd.allTables(), cd.nextLevel.level+1)
-
- // Try to collect stats so that we can inform value log about GC. That would help us find which
- // value log file should be GCed.
- discardStats := make(map[uint32]int64)
- updateStats := func(vs y.ValueStruct) {
- if vs.Meta&bitValuePointer > 0 {
- var vp valuePointer
- vp.Decode(vs.Value)
- discardStats[vp.Fid] += int64(vp.Len)
- }
- }
-
- // Create iterators across all the tables involved first.
- var iters []y.Iterator
- if lev == 0 {
- iters = appendIteratorsReversed(iters, topTables, false)
- } else if len(topTables) > 0 {
- y.AssertTrue(len(topTables) == 1)
- iters = []y.Iterator{topTables[0].NewIterator(false)}
- }
-
- // Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap.
- var valid []*table.Table
-
-nextTable:
- for _, table := range botTables {
- if len(cd.dropPrefixes) > 0 {
- for _, prefix := range cd.dropPrefixes {
- if bytes.HasPrefix(table.Smallest(), prefix) &&
- bytes.HasPrefix(table.Biggest(), prefix) {
- // All the keys in this table have the dropPrefix. So, this
- // table does not need to be in the iterator and can be
- // dropped immediately.
- continue nextTable
- }
- }
- }
- valid = append(valid, table)
- }
- iters = append(iters, table.NewConcatIterator(valid, false))
- it := table.NewMergeIterator(iters, false)
- defer it.Close() // Important to close the iterator to do ref counting.
-
- it.Rewind()
-
- // Pick a discard ts, so we can discard versions below this ts. We should
- // never discard any versions starting from above this timestamp, because
- // that would affect the snapshot view guarantee provided by transactions.
- discardTs := s.kv.orc.discardAtOrBelow()
-
- // Start generating new tables.
- type newTableResult struct {
- table *table.Table
- err error
- }
- resultCh := make(chan newTableResult)
- var numBuilds, numVersions int
- var lastKey, skipKey []byte
- for it.Valid() {
- timeStart := time.Now()
- builder := table.NewTableBuilder()
- var numKeys, numSkips uint64
- for ; it.Valid(); it.Next() {
- // See if we need to skip the prefix.
- if len(cd.dropPrefixes) > 0 && hasAnyPrefixes(it.Key(), cd.dropPrefixes) {
- numSkips++
- updateStats(it.Value())
- continue
- }
-
- // See if we need to skip this key.
- if len(skipKey) > 0 {
- if y.SameKey(it.Key(), skipKey) {
- numSkips++
- updateStats(it.Value())
- continue
- } else {
- skipKey = skipKey[:0]
- }
- }
-
- if !y.SameKey(it.Key(), lastKey) {
- if builder.ReachedCapacity(s.kv.opt.MaxTableSize) {
- // Only break if we are on a different key, and have reached capacity. We want
- // to ensure that all versions of the key are stored in the same sstable, and
- // not divided across multiple tables at the same level.
- break
- }
- lastKey = y.SafeCopy(lastKey, it.Key())
- numVersions = 0
- }
-
- vs := it.Value()
- version := y.ParseTs(it.Key())
- // Do not discard entries inserted by merge operator. These entries will be
- // discarded once they're merged
- if version <= discardTs && vs.Meta&bitMergeEntry == 0 {
- // Keep track of the number of versions encountered for this key. Only consider the
- // versions which are below the minReadTs, otherwise, we might end up discarding the
- // only valid version for a running transaction.
- numVersions++
-
- // Keep the current version and discard all the next versions if
- // - The `discardEarlierVersions` bit is set OR
- // - We've already processed `NumVersionsToKeep` number of versions
- // (including the current item being processed)
- lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0 ||
- numVersions == s.kv.opt.NumVersionsToKeep
-
- isExpired := isDeletedOrExpired(vs.Meta, vs.ExpiresAt)
-
- if isExpired || lastValidVersion {
- // If this version of the key is deleted or expired, skip all the rest of the
- // versions. Ensure that we're only removing versions below readTs.
- skipKey = y.SafeCopy(skipKey, it.Key())
-
- switch {
- // Add the key to the table only if it has not expired.
- // We don't want to add the deleted/expired keys.
- case !isExpired && lastValidVersion:
- // Add this key. We have set skipKey, so the following key versions
- // would be skipped.
- case hasOverlap:
- // If this key range has overlap with lower levels, then keep the deletion
- // marker with the latest version, discarding the rest. We have set skipKey,
- // so the following key versions would be skipped.
- default:
- // If no overlap, we can skip all the versions, by continuing here.
- numSkips++
- updateStats(vs)
- continue // Skip adding this key.
- }
- }
- }
- numKeys++
- builder.Add(it.Key(), it.Value())
- }
- // It was true that it.Valid() at least once in the loop above, which means we
- // called Add() at least once, and builder is not Empty().
- s.kv.opt.Debugf("LOG Compact. Added %d keys. Skipped %d keys. Iteration took: %v",
- numKeys, numSkips, time.Since(timeStart))
- if !builder.Empty() {
- numBuilds++
- fileID := s.reserveFileID()
- go func(builder *table.Builder) {
- defer builder.Close()
-
- fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true)
- if err != nil {
- resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)}
- return
- }
-
- if _, err := fd.Write(builder.Finish()); err != nil {
- resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)}
- return
- }
-
- tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode, nil)
- // decrRef is added below.
- resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())}
- }(builder)
- }
- }
-
- newTables := make([]*table.Table, 0, 20)
- // Wait for all table builders to finish.
- var firstErr error
- for x := 0; x < numBuilds; x++ {
- res := <-resultCh
- newTables = append(newTables, res.table)
- if firstErr == nil {
- firstErr = res.err
- }
- }
-
- if firstErr == nil {
- // Ensure created files' directory entries are visible. We don't mind the extra latency
- // from not doing this ASAP after all file creation has finished because this is a
- // background operation.
- firstErr = syncDir(s.kv.opt.Dir)
- }
-
- if firstErr != nil {
- // An error happened. Delete all the newly created table files (by calling DecrRef
- // -- we're the only holders of a ref).
- for j := 0; j < numBuilds; j++ {
- if newTables[j] != nil {
- _ = newTables[j].DecrRef()
- }
- }
- errorReturn := errors.Wrapf(firstErr, "While running compaction for: %+v", cd)
- return nil, nil, errorReturn
- }
-
- sort.Slice(newTables, func(i, j int) bool {
- return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0
- })
- s.kv.vlog.updateDiscardStats(discardStats)
- s.kv.opt.Debugf("Discard stats: %v", discardStats)
- return newTables, func() error { return decrRefs(newTables) }, nil
-}
-
-func buildChangeSet(cd *compactDef, newTables []*table.Table) pb.ManifestChangeSet {
- changes := []*pb.ManifestChange{}
- for _, table := range newTables {
- changes = append(changes,
- newCreateChange(table.ID(), cd.nextLevel.level, table.Checksum))
- }
- for _, table := range cd.top {
- changes = append(changes, newDeleteChange(table.ID()))
- }
- for _, table := range cd.bot {
- changes = append(changes, newDeleteChange(table.ID()))
- }
- return pb.ManifestChangeSet{Changes: changes}
-}
-
-func hasAnyPrefixes(s []byte, listOfPrefixes [][]byte) bool {
- for _, prefix := range listOfPrefixes {
- if bytes.HasPrefix(s, prefix) {
- return true
- }
- }
-
- return false
-}
-
-func containsPrefix(smallValue, largeValue, prefix []byte) bool {
- if bytes.HasPrefix(smallValue, prefix) {
- return true
- }
- if bytes.HasPrefix(largeValue, prefix) {
- return true
- }
- if bytes.Compare(prefix, smallValue) > 0 &&
- bytes.Compare(prefix, largeValue) < 0 {
- return true
- }
-
- return false
-}
-
-func containsAnyPrefixes(smallValue, largeValue []byte, listOfPrefixes [][]byte) bool {
- for _, prefix := range listOfPrefixes {
- if containsPrefix(smallValue, largeValue, prefix) {
- return true
- }
- }
-
- return false
-}
-
-type compactDef struct {
- elog trace.Trace
-
- thisLevel *levelHandler
- nextLevel *levelHandler
-
- top []*table.Table
- bot []*table.Table
-
- thisRange keyRange
- nextRange keyRange
-
- thisSize int64
-
- dropPrefixes [][]byte
-}
-
-func (cd *compactDef) lockLevels() {
- cd.thisLevel.RLock()
- cd.nextLevel.RLock()
-}
-
-func (cd *compactDef) unlockLevels() {
- cd.nextLevel.RUnlock()
- cd.thisLevel.RUnlock()
-}
-
-func (cd *compactDef) allTables() []*table.Table {
- ret := make([]*table.Table, 0, len(cd.top)+len(cd.bot))
- ret = append(ret, cd.top...)
- ret = append(ret, cd.bot...)
- return ret
-}
-
-func (s *levelsController) fillTablesL0(cd *compactDef) bool {
- cd.lockLevels()
- defer cd.unlockLevels()
-
- cd.top = make([]*table.Table, len(cd.thisLevel.tables))
- copy(cd.top, cd.thisLevel.tables)
- if len(cd.top) == 0 {
- return false
- }
- cd.thisRange = infRange
-
- kr := getKeyRange(cd.top...)
- left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, kr)
- cd.bot = make([]*table.Table, right-left)
- copy(cd.bot, cd.nextLevel.tables[left:right])
-
- if len(cd.bot) == 0 {
- cd.nextRange = kr
- } else {
- cd.nextRange = getKeyRange(cd.bot...)
- }
-
- if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
- return false
- }
-
- return true
-}
-
-// sortByOverlap sorts tables in increasing order of overlap with next level.
-func (s *levelsController) sortByOverlap(tables []*table.Table, cd *compactDef) {
- if len(tables) == 0 || cd.nextLevel == nil {
- return
- }
-
- tableOverlap := make([]int, len(tables))
- for i := range tables {
- // get key range for table
- tableRange := getKeyRange(tables[i])
- // get overlap with next level
- left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, tableRange)
- tableOverlap[i] = right - left
- }
-
- sort.Slice(tables, func(i, j int) bool {
- return tableOverlap[i] < tableOverlap[j]
- })
-}
-
-func (s *levelsController) fillTables(cd *compactDef) bool {
- cd.lockLevels()
- defer cd.unlockLevels()
-
- tables := make([]*table.Table, len(cd.thisLevel.tables))
- copy(tables, cd.thisLevel.tables)
- if len(tables) == 0 {
- return false
- }
-
- // We want to pick files from current level in order of increasing overlap with next level
- // tables. Idea here is to first compact file from current level which has least overlap with
- // next level. This provides us better write amplification.
- s.sortByOverlap(tables, cd)
-
- for _, t := range tables {
- cd.thisSize = t.Size()
- cd.thisRange = getKeyRange(t)
- if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) {
- continue
- }
- cd.top = []*table.Table{t}
- left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, cd.thisRange)
-
- cd.bot = make([]*table.Table, right-left)
- copy(cd.bot, cd.nextLevel.tables[left:right])
-
- if len(cd.bot) == 0 {
- cd.bot = []*table.Table{}
- cd.nextRange = cd.thisRange
- if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
- continue
- }
- return true
- }
- cd.nextRange = getKeyRange(cd.bot...)
-
- if s.cstatus.overlapsWith(cd.nextLevel.level, cd.nextRange) {
- continue
- }
- if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
- continue
- }
- return true
- }
- return false
-}
-
-func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) {
- timeStart := time.Now()
-
- thisLevel := cd.thisLevel
- nextLevel := cd.nextLevel
-
- // Table should never be moved directly between levels, always be rewritten to allow discarding
- // invalid versions.
-
- newTables, decr, err := s.compactBuildTables(l, cd)
- if err != nil {
- return err
- }
- defer func() {
- // Only assign to err, if it's not already nil.
- if decErr := decr(); err == nil {
- err = decErr
- }
- }()
- changeSet := buildChangeSet(&cd, newTables)
-
- // We write to the manifest _before_ we delete files (and after we created files)
- if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
- return err
- }
-
- // See comment earlier in this function about the ordering of these ops, and the order in which
- // we access levels when reading.
- if err := nextLevel.replaceTables(cd.bot, newTables); err != nil {
- return err
- }
- if err := thisLevel.deleteTables(cd.top); err != nil {
- return err
- }
-
- // Note: For level 0, while doCompact is running, it is possible that new tables are added.
- // However, the tables are added only to the end, so it is ok to just delete the first table.
-
- s.kv.opt.Infof("LOG Compact %d->%d, del %d tables, add %d tables, took %v\n",
- thisLevel.level, nextLevel.level, len(cd.top)+len(cd.bot),
- len(newTables), time.Since(timeStart))
- return nil
-}
-
-var errFillTables = errors.New("Unable to fill tables")
-
-// doCompact picks some table on level l and compacts it away to the next level.
-func (s *levelsController) doCompact(p compactionPriority) error {
- l := p.level
- y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check.
-
- cd := compactDef{
- elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"),
- thisLevel: s.levels[l],
- nextLevel: s.levels[l+1],
- dropPrefixes: p.dropPrefixes,
- }
- cd.elog.SetMaxEvents(100)
- defer cd.elog.Finish()
-
- s.kv.opt.Infof("Got compaction priority: %+v", p)
-
- // While picking tables to be compacted, both levels' tables are expected to
- // remain unchanged.
- if l == 0 {
- if !s.fillTablesL0(&cd) {
- return errFillTables
- }
-
- } else {
- if !s.fillTables(&cd) {
- return errFillTables
- }
- }
- defer s.cstatus.delete(cd) // Remove the ranges from compaction status.
-
- s.kv.opt.Infof("Running for level: %d\n", cd.thisLevel.level)
- s.cstatus.toLog(cd.elog)
- if err := s.runCompactDef(l, cd); err != nil {
- // This compaction couldn't be done successfully.
- s.kv.opt.Warningf("LOG Compact FAILED with error: %+v: %+v", err, cd)
- return err
- }
-
- s.cstatus.toLog(cd.elog)
- s.kv.opt.Infof("Compaction for level: %d DONE", cd.thisLevel.level)
- return nil
-}
-
-func (s *levelsController) addLevel0Table(t *table.Table) error {
- // We update the manifest _before_ the table becomes part of a levelHandler, because at that
- // point it could get used in some compaction. This ensures the manifest file gets updated in
- // the proper order. (That means this update happens before that of some compaction which
- // deletes the table.)
- err := s.kv.manifest.addChanges([]*pb.ManifestChange{
- newCreateChange(t.ID(), 0, t.Checksum),
- })
- if err != nil {
- return err
- }
-
- for !s.levels[0].tryAddLevel0Table(t) {
- // Stall. Make sure all levels are healthy before we unstall.
- var timeStart time.Time
- {
- s.elog.Printf("STALLED STALLED STALLED: %v\n", time.Since(lastUnstalled))
- s.cstatus.RLock()
- for i := 0; i < s.kv.opt.MaxLevels; i++ {
- s.elog.Printf("level=%d. Status=%s Size=%d\n",
- i, s.cstatus.levels[i].debug(), s.levels[i].getTotalSize())
- }
- s.cstatus.RUnlock()
- timeStart = time.Now()
- }
- // Before we unstall, we need to make sure that level 0 and 1 are healthy. Otherwise, we
- // will very quickly fill up level 0 again and if the compaction strategy favors level 0,
- // then level 1 is going to super full.
- for i := 0; ; i++ {
- // Passing 0 for delSize to compactable means we're treating incomplete compactions as
- // not having finished -- we wait for them to finish. Also, it's crucial this behavior
- // replicates pickCompactLevels' behavior in computing compactability in order to
- // guarantee progress.
- if !s.isLevel0Compactable() && !s.levels[1].isCompactable(0) {
- break
- }
- time.Sleep(10 * time.Millisecond)
- if i%100 == 0 {
- prios := s.pickCompactLevels()
- s.elog.Printf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios)
- i = 0
- }
- }
- {
- s.elog.Printf("UNSTALLED UNSTALLED UNSTALLED: %v\n", time.Since(timeStart))
- lastUnstalled = time.Now()
- }
- }
-
- return nil
-}
-
-func (s *levelsController) close() error {
- err := s.cleanupLevels()
- return errors.Wrap(err, "levelsController.Close")
-}
-
-// get returns the found value if any. If not found, we return nil.
-func (s *levelsController) get(key []byte, maxVs *y.ValueStruct) (y.ValueStruct, error) {
- // It's important that we iterate the levels from 0 on upward. The reason is, if we iterated
- // in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could
- // read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do
- // parallelize this, we will need to call the h.RLock() function by increasing order of level
- // number.)
- version := y.ParseTs(key)
- for _, h := range s.levels {
- vs, err := h.get(key) // Calls h.RLock() and h.RUnlock().
- if err != nil {
- return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key)
- }
- if vs.Value == nil && vs.Meta == 0 {
- continue
- }
- if maxVs == nil || vs.Version == version {
- return vs, nil
- }
- if maxVs.Version < vs.Version {
- *maxVs = vs
- }
- }
- if maxVs != nil {
- return *maxVs, nil
- }
- return y.ValueStruct{}, nil
-}
-
-func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator {
- for i := len(th) - 1; i >= 0; i-- {
- // This will increment the reference of the table handler.
- out = append(out, th[i].NewIterator(reversed))
- }
- return out
-}
-
-// appendIterators appends iterators to an array of iterators, for merging.
-// Note: This obtains references for the table handlers. Remember to close these iterators.
-func (s *levelsController) appendIterators(
- iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
- // Just like with get, it's important we iterate the levels from 0 on upward, to avoid missing
- // data when there's a compaction.
- for _, level := range s.levels {
- iters = level.appendIterators(iters, opt)
- }
- return iters
-}
-
-// TableInfo represents the information about a table.
-type TableInfo struct {
- ID uint64
- Level int
- Left []byte
- Right []byte
- KeyCount uint64 // Number of keys in the table
-}
-
-func (s *levelsController) getTableInfo(withKeysCount bool) (result []TableInfo) {
- for _, l := range s.levels {
- l.RLock()
- for _, t := range l.tables {
- var count uint64
- if withKeysCount {
- it := t.NewIterator(false)
- for it.Rewind(); it.Valid(); it.Next() {
- count++
- }
- it.Close()
- }
-
- info := TableInfo{
- ID: t.ID(),
- Level: l.level,
- Left: t.Smallest(),
- Right: t.Biggest(),
- KeyCount: count,
- }
- result = append(result, info)
- }
- l.RUnlock()
- }
- sort.Slice(result, func(i, j int) bool {
- if result[i].Level != result[j].Level {
- return result[i].Level < result[j].Level
- }
- return result[i].ID < result[j].ID
- })
- return
-}
diff --git a/vendor/github.com/dgraph-io/badger/logger.go b/vendor/github.com/dgraph-io/badger/logger.go
deleted file mode 100644
index 3a9b8a33..00000000
--- a/vendor/github.com/dgraph-io/badger/logger.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright 2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "log"
- "os"
-)
-
-// Logger is implemented by any logging system that is used for standard logs.
-type Logger interface {
- Errorf(string, ...interface{})
- Warningf(string, ...interface{})
- Infof(string, ...interface{})
- Debugf(string, ...interface{})
-}
-
-// Errorf logs an ERROR log message to the logger specified in opts or to the
-// global logger if no logger is specified in opts.
-func (opt *Options) Errorf(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Errorf(format, v...)
-}
-
-// Infof logs an INFO message to the logger specified in opts.
-func (opt *Options) Infof(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Infof(format, v...)
-}
-
-// Warningf logs a WARNING message to the logger specified in opts.
-func (opt *Options) Warningf(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Warningf(format, v...)
-}
-
-// Debugf logs a DEBUG message to the logger specified in opts.
-func (opt *Options) Debugf(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Debugf(format, v...)
-}
-
-type defaultLog struct {
- *log.Logger
-}
-
-var defaultLogger = &defaultLog{Logger: log.New(os.Stderr, "badger ", log.LstdFlags)}
-
-func (l *defaultLog) Errorf(f string, v ...interface{}) {
- l.Printf("ERROR: "+f, v...)
-}
-
-func (l *defaultLog) Warningf(f string, v ...interface{}) {
- l.Printf("WARNING: "+f, v...)
-}
-
-func (l *defaultLog) Infof(f string, v ...interface{}) {
- l.Printf("INFO: "+f, v...)
-}
-
-func (l *defaultLog) Debugf(f string, v ...interface{}) {
- l.Printf("DEBUG: "+f, v...)
-}
diff --git a/vendor/github.com/dgraph-io/badger/managed_db.go b/vendor/github.com/dgraph-io/badger/managed_db.go
deleted file mode 100644
index 61e6b3cc..00000000
--- a/vendor/github.com/dgraph-io/badger/managed_db.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-// OpenManaged returns a new DB, which allows more control over setting
-// transaction timestamps, aka managed mode.
-//
-// This is only useful for databases built on top of Badger (like Dgraph), and
-// can be ignored by most users.
-func OpenManaged(opts Options) (*DB, error) {
- opts.managedTxns = true
- return Open(opts)
-}
-
-// NewTransactionAt follows the same logic as DB.NewTransaction(), but uses the
-// provided read timestamp.
-//
-// This is only useful for databases built on top of Badger (like Dgraph), and
-// can be ignored by most users.
-func (db *DB) NewTransactionAt(readTs uint64, update bool) *Txn {
- if !db.opt.managedTxns {
- panic("Cannot use NewTransactionAt with managedDB=false. Use NewTransaction instead.")
- }
- txn := db.newTransaction(update, true)
- txn.readTs = readTs
- return txn
-}
-
-// NewWriteBatchAt is similar to NewWriteBatch but it allows user to set the commit timestamp.
-// NewWriteBatchAt is supposed to be used only in the managed mode.
-func (db *DB) NewWriteBatchAt(commitTs uint64) *WriteBatch {
- if !db.opt.managedTxns {
- panic("cannot use NewWriteBatchAt with managedDB=false. Use NewWriteBatch instead")
- }
-
- wb := db.newWriteBatch()
- wb.commitTs = commitTs
- wb.txn.commitTs = commitTs
- return wb
-}
-
-// CommitAt commits the transaction, following the same logic as Commit(), but
-// at the given commit timestamp. This will panic if not used with managed transactions.
-//
-// This is only useful for databases built on top of Badger (like Dgraph), and
-// can be ignored by most users.
-func (txn *Txn) CommitAt(commitTs uint64, callback func(error)) error {
- if !txn.db.opt.managedTxns {
- panic("Cannot use CommitAt with managedDB=false. Use Commit instead.")
- }
- txn.commitTs = commitTs
- if callback == nil {
- return txn.Commit()
- }
- txn.CommitWith(callback)
- return nil
-}
-
-// SetDiscardTs sets a timestamp at or below which, any invalid or deleted
-// versions can be discarded from the LSM tree, and thence from the value log to
-// reclaim disk space. Can only be used with managed transactions.
-func (db *DB) SetDiscardTs(ts uint64) {
- if !db.opt.managedTxns {
- panic("Cannot use SetDiscardTs with managedDB=false.")
- }
- db.orc.setDiscardTs(ts)
-}
diff --git a/vendor/github.com/dgraph-io/badger/manifest.go b/vendor/github.com/dgraph-io/badger/manifest.go
deleted file mode 100644
index 5a2e837e..00000000
--- a/vendor/github.com/dgraph-io/badger/manifest.go
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bufio"
- "bytes"
- "encoding/binary"
- "fmt"
- "hash/crc32"
- "io"
- "os"
- "path/filepath"
- "sync"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/y"
- "github.com/golang/protobuf/proto"
- "github.com/pkg/errors"
-)
-
-// Manifest represents the contents of the MANIFEST file in a Badger store.
-//
-// The MANIFEST file describes the startup state of the db -- all LSM files and what level they're
-// at.
-//
-// It consists of a sequence of ManifestChangeSet objects. Each of these is treated atomically,
-// and contains a sequence of ManifestChange's (file creations/deletions) which we use to
-// reconstruct the manifest at startup.
-type Manifest struct {
- Levels []levelManifest
- Tables map[uint64]TableManifest
-
- // Contains total number of creation and deletion changes in the manifest -- used to compute
- // whether it'd be useful to rewrite the manifest.
- Creations int
- Deletions int
-}
-
-func createManifest() Manifest {
- levels := make([]levelManifest, 0)
- return Manifest{
- Levels: levels,
- Tables: make(map[uint64]TableManifest),
- }
-}
-
-// levelManifest contains information about LSM tree levels
-// in the MANIFEST file.
-type levelManifest struct {
- Tables map[uint64]struct{} // Set of table id's
-}
-
-// TableManifest contains information about a specific level
-// in the LSM tree.
-type TableManifest struct {
- Level uint8
- Checksum []byte
-}
-
-// manifestFile holds the file pointer (and other info) about the manifest file, which is a log
-// file we append to.
-type manifestFile struct {
- fp *os.File
- directory string
- // We make this configurable so that unit tests can hit rewrite() code quickly
- deletionsRewriteThreshold int
-
- // Guards appends, which includes access to the manifest field.
- appendLock sync.Mutex
-
- // Used to track the current state of the manifest, used when rewriting.
- manifest Manifest
-}
-
-const (
- // ManifestFilename is the filename for the manifest file.
- ManifestFilename = "MANIFEST"
- manifestRewriteFilename = "MANIFEST-REWRITE"
- manifestDeletionsRewriteThreshold = 10000
- manifestDeletionsRatio = 10
-)
-
-// asChanges returns a sequence of changes that could be used to recreate the Manifest in its
-// present state.
-func (m *Manifest) asChanges() []*pb.ManifestChange {
- changes := make([]*pb.ManifestChange, 0, len(m.Tables))
- for id, tm := range m.Tables {
- changes = append(changes, newCreateChange(id, int(tm.Level), tm.Checksum))
- }
- return changes
-}
-
-func (m *Manifest) clone() Manifest {
- changeSet := pb.ManifestChangeSet{Changes: m.asChanges()}
- ret := createManifest()
- y.Check(applyChangeSet(&ret, &changeSet))
- return ret
-}
-
-// openOrCreateManifestFile opens a Badger manifest file if it exists, or creates on if
-// one doesn’t.
-func openOrCreateManifestFile(dir string, readOnly bool) (
- ret *manifestFile, result Manifest, err error) {
- return helpOpenOrCreateManifestFile(dir, readOnly, manifestDeletionsRewriteThreshold)
-}
-
-func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold int) (
- *manifestFile, Manifest, error) {
-
- path := filepath.Join(dir, ManifestFilename)
- var flags uint32
- if readOnly {
- flags |= y.ReadOnly
- }
- fp, err := y.OpenExistingFile(path, flags) // We explicitly sync in addChanges, outside the lock.
- if err != nil {
- if !os.IsNotExist(err) {
- return nil, Manifest{}, err
- }
- if readOnly {
- return nil, Manifest{}, fmt.Errorf("no manifest found, required for read-only db")
- }
- m := createManifest()
- fp, netCreations, err := helpRewrite(dir, &m)
- if err != nil {
- return nil, Manifest{}, err
- }
- y.AssertTrue(netCreations == 0)
- mf := &manifestFile{
- fp: fp,
- directory: dir,
- manifest: m.clone(),
- deletionsRewriteThreshold: deletionsThreshold,
- }
- return mf, m, nil
- }
-
- manifest, truncOffset, err := ReplayManifestFile(fp)
- if err != nil {
- _ = fp.Close()
- return nil, Manifest{}, err
- }
-
- if !readOnly {
- // Truncate file so we don't have a half-written entry at the end.
- if err := fp.Truncate(truncOffset); err != nil {
- _ = fp.Close()
- return nil, Manifest{}, err
- }
- }
- if _, err = fp.Seek(0, io.SeekEnd); err != nil {
- _ = fp.Close()
- return nil, Manifest{}, err
- }
-
- mf := &manifestFile{
- fp: fp,
- directory: dir,
- manifest: manifest.clone(),
- deletionsRewriteThreshold: deletionsThreshold,
- }
- return mf, manifest, nil
-}
-
-func (mf *manifestFile) close() error {
- return mf.fp.Close()
-}
-
-// addChanges writes a batch of changes, atomically, to the file. By "atomically" that means when
-// we replay the MANIFEST file, we'll either replay all the changes or none of them. (The truth of
-// this depends on the filesystem -- some might append garbage data if a system crash happens at
-// the wrong time.)
-func (mf *manifestFile) addChanges(changesParam []*pb.ManifestChange) error {
- changes := pb.ManifestChangeSet{Changes: changesParam}
- buf, err := proto.Marshal(&changes)
- if err != nil {
- return err
- }
-
- // Maybe we could use O_APPEND instead (on certain file systems)
- mf.appendLock.Lock()
- if err := applyChangeSet(&mf.manifest, &changes); err != nil {
- mf.appendLock.Unlock()
- return err
- }
- // Rewrite manifest if it'd shrink by 1/10 and it's big enough to care
- if mf.manifest.Deletions > mf.deletionsRewriteThreshold &&
- mf.manifest.Deletions > manifestDeletionsRatio*(mf.manifest.Creations-mf.manifest.Deletions) {
- if err := mf.rewrite(); err != nil {
- mf.appendLock.Unlock()
- return err
- }
- } else {
- var lenCrcBuf [8]byte
- binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(buf)))
- binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(buf, y.CastagnoliCrcTable))
- buf = append(lenCrcBuf[:], buf...)
- if _, err := mf.fp.Write(buf); err != nil {
- mf.appendLock.Unlock()
- return err
- }
- }
-
- mf.appendLock.Unlock()
- return y.FileSync(mf.fp)
-}
-
-// Has to be 4 bytes. The value can never change, ever, anyway.
-var magicText = [4]byte{'B', 'd', 'g', 'r'}
-
-// The magic version number.
-const magicVersion = 4
-
-func helpRewrite(dir string, m *Manifest) (*os.File, int, error) {
- rewritePath := filepath.Join(dir, manifestRewriteFilename)
- // We explicitly sync.
- fp, err := y.OpenTruncFile(rewritePath, false)
- if err != nil {
- return nil, 0, err
- }
-
- buf := make([]byte, 8)
- copy(buf[0:4], magicText[:])
- binary.BigEndian.PutUint32(buf[4:8], magicVersion)
-
- netCreations := len(m.Tables)
- changes := m.asChanges()
- set := pb.ManifestChangeSet{Changes: changes}
-
- changeBuf, err := proto.Marshal(&set)
- if err != nil {
- fp.Close()
- return nil, 0, err
- }
- var lenCrcBuf [8]byte
- binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(changeBuf)))
- binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(changeBuf, y.CastagnoliCrcTable))
- buf = append(buf, lenCrcBuf[:]...)
- buf = append(buf, changeBuf...)
- if _, err := fp.Write(buf); err != nil {
- fp.Close()
- return nil, 0, err
- }
- if err := y.FileSync(fp); err != nil {
- fp.Close()
- return nil, 0, err
- }
-
- // In Windows the files should be closed before doing a Rename.
- if err = fp.Close(); err != nil {
- return nil, 0, err
- }
- manifestPath := filepath.Join(dir, ManifestFilename)
- if err := os.Rename(rewritePath, manifestPath); err != nil {
- return nil, 0, err
- }
- fp, err = y.OpenExistingFile(manifestPath, 0)
- if err != nil {
- return nil, 0, err
- }
- if _, err := fp.Seek(0, io.SeekEnd); err != nil {
- fp.Close()
- return nil, 0, err
- }
- if err := syncDir(dir); err != nil {
- fp.Close()
- return nil, 0, err
- }
-
- return fp, netCreations, nil
-}
-
-// Must be called while appendLock is held.
-func (mf *manifestFile) rewrite() error {
- // In Windows the files should be closed before doing a Rename.
- if err := mf.fp.Close(); err != nil {
- return err
- }
- fp, netCreations, err := helpRewrite(mf.directory, &mf.manifest)
- if err != nil {
- return err
- }
- mf.fp = fp
- mf.manifest.Creations = netCreations
- mf.manifest.Deletions = 0
-
- return nil
-}
-
-type countingReader struct {
- wrapped *bufio.Reader
- count int64
-}
-
-func (r *countingReader) Read(p []byte) (n int, err error) {
- n, err = r.wrapped.Read(p)
- r.count += int64(n)
- return
-}
-
-func (r *countingReader) ReadByte() (b byte, err error) {
- b, err = r.wrapped.ReadByte()
- if err == nil {
- r.count++
- }
- return
-}
-
-var (
- errBadMagic = errors.New("manifest has bad magic")
- errBadChecksum = errors.New("manifest has checksum mismatch")
-)
-
-// ReplayManifestFile reads the manifest file and constructs two manifest objects. (We need one
-// immutable copy and one mutable copy of the manifest. Easiest way is to construct two of them.)
-// Also, returns the last offset after a completely read manifest entry -- the file must be
-// truncated at that point before further appends are made (if there is a partial entry after
-// that). In normal conditions, truncOffset is the file size.
-func ReplayManifestFile(fp *os.File) (Manifest, int64, error) {
- r := countingReader{wrapped: bufio.NewReader(fp)}
-
- var magicBuf [8]byte
- if _, err := io.ReadFull(&r, magicBuf[:]); err != nil {
- return Manifest{}, 0, errBadMagic
- }
- if !bytes.Equal(magicBuf[0:4], magicText[:]) {
- return Manifest{}, 0, errBadMagic
- }
- version := binary.BigEndian.Uint32(magicBuf[4:8])
- if version != magicVersion {
- return Manifest{}, 0,
- //nolint:lll
- fmt.Errorf("manifest has unsupported version: %d (we support %d).\n"+
- "Please see https://github.com/dgraph-io/badger/blob/master/README.md#i-see-manifest-has-unsupported-version-x-we-support-y-error"+
- " on how to fix this.",
- version, magicVersion)
- }
-
- stat, err := fp.Stat()
- if err != nil {
- return Manifest{}, 0, err
- }
-
- build := createManifest()
- var offset int64
- for {
- offset = r.count
- var lenCrcBuf [8]byte
- _, err := io.ReadFull(&r, lenCrcBuf[:])
- if err != nil {
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- break
- }
- return Manifest{}, 0, err
- }
- length := binary.BigEndian.Uint32(lenCrcBuf[0:4])
- // Sanity check to ensure we don't over-allocate memory.
- if length > uint32(stat.Size()) {
- return Manifest{}, 0, errors.Errorf(
- "Buffer length: %d greater than file size: %d. Manifest file might be corrupted",
- length, stat.Size())
- }
- var buf = make([]byte, length)
- if _, err := io.ReadFull(&r, buf); err != nil {
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- break
- }
- return Manifest{}, 0, err
- }
- if crc32.Checksum(buf, y.CastagnoliCrcTable) != binary.BigEndian.Uint32(lenCrcBuf[4:8]) {
- return Manifest{}, 0, errBadChecksum
- }
-
- var changeSet pb.ManifestChangeSet
- if err := proto.Unmarshal(buf, &changeSet); err != nil {
- return Manifest{}, 0, err
- }
-
- if err := applyChangeSet(&build, &changeSet); err != nil {
- return Manifest{}, 0, err
- }
- }
-
- return build, offset, nil
-}
-
-func applyManifestChange(build *Manifest, tc *pb.ManifestChange) error {
- switch tc.Op {
- case pb.ManifestChange_CREATE:
- if _, ok := build.Tables[tc.Id]; ok {
- return fmt.Errorf("MANIFEST invalid, table %d exists", tc.Id)
- }
- build.Tables[tc.Id] = TableManifest{
- Level: uint8(tc.Level),
- Checksum: append([]byte{}, tc.Checksum...),
- }
- for len(build.Levels) <= int(tc.Level) {
- build.Levels = append(build.Levels, levelManifest{make(map[uint64]struct{})})
- }
- build.Levels[tc.Level].Tables[tc.Id] = struct{}{}
- build.Creations++
- case pb.ManifestChange_DELETE:
- tm, ok := build.Tables[tc.Id]
- if !ok {
- return fmt.Errorf("MANIFEST removes non-existing table %d", tc.Id)
- }
- delete(build.Levels[tm.Level].Tables, tc.Id)
- delete(build.Tables, tc.Id)
- build.Deletions++
- default:
- return fmt.Errorf("MANIFEST file has invalid manifestChange op")
- }
- return nil
-}
-
-// This is not a "recoverable" error -- opening the KV store fails because the MANIFEST file is
-// just plain broken.
-func applyChangeSet(build *Manifest, changeSet *pb.ManifestChangeSet) error {
- for _, change := range changeSet.Changes {
- if err := applyManifestChange(build, change); err != nil {
- return err
- }
- }
- return nil
-}
-
-func newCreateChange(id uint64, level int, checksum []byte) *pb.ManifestChange {
- return &pb.ManifestChange{
- Id: id,
- Op: pb.ManifestChange_CREATE,
- Level: uint32(level),
- Checksum: checksum,
- }
-}
-
-func newDeleteChange(id uint64) *pb.ManifestChange {
- return &pb.ManifestChange{
- Id: id,
- Op: pb.ManifestChange_DELETE,
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/merge.go b/vendor/github.com/dgraph-io/badger/merge.go
deleted file mode 100644
index 02ad4bcd..00000000
--- a/vendor/github.com/dgraph-io/badger/merge.go
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "sync"
- "time"
-
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-// MergeOperator represents a Badger merge operator.
-type MergeOperator struct {
- sync.RWMutex
- f MergeFunc
- db *DB
- key []byte
- closer *y.Closer
-}
-
-// MergeFunc accepts two byte slices, one representing an existing value, and
-// another representing a new value that needs to be ‘merged’ into it. MergeFunc
-// contains the logic to perform the ‘merge’ and return an updated value.
-// MergeFunc could perform operations like integer addition, list appends etc.
-// Note that the ordering of the operands is maintained.
-type MergeFunc func(existingVal, newVal []byte) []byte
-
-// GetMergeOperator creates a new MergeOperator for a given key and returns a
-// pointer to it. It also fires off a goroutine that performs a compaction using
-// the merge function that runs periodically, as specified by dur.
-func (db *DB) GetMergeOperator(key []byte,
- f MergeFunc, dur time.Duration) *MergeOperator {
- op := &MergeOperator{
- f: f,
- db: db,
- key: key,
- closer: y.NewCloser(1),
- }
-
- go op.runCompactions(dur)
- return op
-}
-
-var errNoMerge = errors.New("No need for merge")
-
-func (op *MergeOperator) iterateAndMerge() (newVal []byte, latest uint64, err error) {
- txn := op.db.NewTransaction(false)
- defer txn.Discard()
- opt := DefaultIteratorOptions
- opt.AllVersions = true
- it := txn.NewKeyIterator(op.key, opt)
- defer it.Close()
-
- var numVersions int
- for it.Rewind(); it.Valid(); it.Next() {
- item := it.Item()
- numVersions++
- if numVersions == 1 {
- // This should be the newVal, considering this is the latest version.
- newVal, err = item.ValueCopy(newVal)
- if err != nil {
- return nil, 0, err
- }
- latest = item.Version()
- } else {
- if err := item.Value(func(oldVal []byte) error {
- // The merge should always be on the newVal considering it has the merge result of
- // the latest version. The value read should be the oldVal.
- newVal = op.f(oldVal, newVal)
- return nil
- }); err != nil {
- return nil, 0, err
- }
- }
- if item.DiscardEarlierVersions() {
- break
- }
- }
- if numVersions == 0 {
- return nil, latest, ErrKeyNotFound
- } else if numVersions == 1 {
- return newVal, latest, errNoMerge
- }
- return newVal, latest, nil
-}
-
-func (op *MergeOperator) compact() error {
- op.Lock()
- defer op.Unlock()
- val, version, err := op.iterateAndMerge()
- if err == ErrKeyNotFound || err == errNoMerge {
- return nil
- } else if err != nil {
- return err
- }
- entries := []*Entry{
- {
- Key: y.KeyWithTs(op.key, version),
- Value: val,
- meta: bitDiscardEarlierVersions,
- },
- }
- // Write value back to the DB. It is important that we do not set the bitMergeEntry bit
- // here. When compaction happens, all the older merged entries will be removed.
- return op.db.batchSetAsync(entries, func(err error) {
- if err != nil {
- op.db.opt.Errorf("failed to insert the result of merge compaction: %s", err)
- }
- })
-}
-
-func (op *MergeOperator) runCompactions(dur time.Duration) {
- ticker := time.NewTicker(dur)
- defer op.closer.Done()
- var stop bool
- for {
- select {
- case <-op.closer.HasBeenClosed():
- stop = true
- case <-ticker.C: // wait for tick
- }
- if err := op.compact(); err != nil {
- op.db.opt.Errorf("failure while running merge operation: %s", err)
- }
- if stop {
- ticker.Stop()
- break
- }
- }
-}
-
-// Add records a value in Badger which will eventually be merged by a background
-// routine into the values that were recorded by previous invocations to Add().
-func (op *MergeOperator) Add(val []byte) error {
- return op.db.Update(func(txn *Txn) error {
- return txn.SetEntry(NewEntry(op.key, val).withMergeBit())
- })
-}
-
-// Get returns the latest value for the merge operator, which is derived by
-// applying the merge function to all the values added so far.
-//
-// If Add has not been called even once, Get will return ErrKeyNotFound.
-func (op *MergeOperator) Get() ([]byte, error) {
- op.RLock()
- defer op.RUnlock()
- var existing []byte
- err := op.db.View(func(txn *Txn) (err error) {
- existing, _, err = op.iterateAndMerge()
- return err
- })
- if err == errNoMerge {
- return existing, nil
- }
- return existing, err
-}
-
-// Stop waits for any pending merge to complete and then stops the background
-// goroutine.
-func (op *MergeOperator) Stop() {
- op.closer.SignalAndWait()
-}
diff --git a/vendor/github.com/dgraph-io/badger/options.go b/vendor/github.com/dgraph-io/badger/options.go
deleted file mode 100644
index f396c7ea..00000000
--- a/vendor/github.com/dgraph-io/badger/options.go
+++ /dev/null
@@ -1,420 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "github.com/dgraph-io/badger/options"
-)
-
-// Note: If you add a new option X make sure you also add a WithX method on Options.
-
-// Options are params for creating DB object.
-//
-// This package provides DefaultOptions which contains options that should
-// work for most applications. Consider using that as a starting point before
-// customizing it for your own needs.
-//
-// Each option X is documented on the WithX method.
-type Options struct {
- // Required options.
-
- Dir string
- ValueDir string
-
- // Usually modified options.
-
- SyncWrites bool
- TableLoadingMode options.FileLoadingMode
- ValueLogLoadingMode options.FileLoadingMode
- NumVersionsToKeep int
- ReadOnly bool
- Truncate bool
- Logger Logger
- EventLogging bool
-
- // Fine tuning options.
-
- MaxTableSize int64
- LevelSizeMultiplier int
- MaxLevels int
- ValueThreshold int
- NumMemtables int
-
- NumLevelZeroTables int
- NumLevelZeroTablesStall int
-
- LevelOneSize int64
- ValueLogFileSize int64
- ValueLogMaxEntries uint32
-
- NumCompactors int
- CompactL0OnClose bool
- LogRotatesToFlush int32
- // When set, checksum will be validated for each entry read from the value log file.
- VerifyValueChecksum bool
-
- // BypassLockGaurd will bypass the lock guard on badger. Bypassing lock
- // guard can cause data corruption if multiple badger instances are using
- // the same directory. Use this options with caution.
- BypassLockGuard bool
-
- // Transaction start and commit timestamps are managed by end-user.
- // This is only useful for databases built on top of Badger (like Dgraph).
- // Not recommended for most users.
- managedTxns bool
-
- // 4. Flags for testing purposes
- // ------------------------------
- maxBatchCount int64 // max entries in batch
- maxBatchSize int64 // max batch size in bytes
-
-}
-
-// DefaultOptions sets a list of recommended options for good performance.
-// Feel free to modify these to suit your needs with the WithX methods.
-func DefaultOptions(path string) Options {
- return Options{
- Dir: path,
- ValueDir: path,
- LevelOneSize: 256 << 20,
- LevelSizeMultiplier: 10,
- TableLoadingMode: options.MemoryMap,
- ValueLogLoadingMode: options.MemoryMap,
- // table.MemoryMap to mmap() the tables.
- // table.Nothing to not preload the tables.
- MaxLevels: 7,
- MaxTableSize: 64 << 20,
- NumCompactors: 2, // Compactions can be expensive. Only run 2.
- NumLevelZeroTables: 5,
- NumLevelZeroTablesStall: 10,
- NumMemtables: 5,
- SyncWrites: true,
- NumVersionsToKeep: 1,
- CompactL0OnClose: true,
- VerifyValueChecksum: false,
- // Nothing to read/write value log using standard File I/O
- // MemoryMap to mmap() the value log files
- // (2^30 - 1)*2 when mmapping < 2^31 - 1, max int32.
- // -1 so 2*ValueLogFileSize won't overflow on 32-bit systems.
- ValueLogFileSize: 1<<30 - 1,
-
- ValueLogMaxEntries: 1000000,
- ValueThreshold: 32,
- Truncate: false,
- Logger: defaultLogger,
- EventLogging: true,
- LogRotatesToFlush: 2,
- }
-}
-
-// LSMOnlyOptions follows from DefaultOptions, but sets a higher ValueThreshold
-// so values would be colocated with the LSM tree, with value log largely acting
-// as a write-ahead log only. These options would reduce the disk usage of value
-// log, and make Badger act more like a typical LSM tree.
-func LSMOnlyOptions(path string) Options {
- // Max value length which fits in uint16.
- // Let's not set any other options, because they can cause issues with the
- // size of key-value a user can pass to Badger. For e.g., if we set
- // ValueLogFileSize to 64MB, a user can't pass a value more than that.
- // Setting it to ValueLogMaxEntries to 1000, can generate too many files.
- // These options are better configured on a usage basis, than broadly here.
- // The ValueThreshold is the most important setting a user needs to do to
- // achieve a heavier usage of LSM tree.
- // NOTE: If a user does not want to set 64KB as the ValueThreshold because
- // of performance reasons, 1KB would be a good option too, allowing
- // values smaller than 1KB to be colocated with the keys in the LSM tree.
- return DefaultOptions(path).WithValueThreshold(65500)
-}
-
-// WithDir returns a new Options value with Dir set to the given value.
-//
-// Dir is the path of the directory where key data will be stored in.
-// If it doesn't exist, Badger will try to create it for you.
-// This is set automatically to be the path given to `DefaultOptions`.
-func (opt Options) WithDir(val string) Options {
- opt.Dir = val
- return opt
-}
-
-// WithValueDir returns a new Options value with ValueDir set to the given value.
-//
-// ValueDir is the path of the directory where value data will be stored in.
-// If it doesn't exist, Badger will try to create it for you.
-// This is set automatically to be the path given to `DefaultOptions`.
-func (opt Options) WithValueDir(val string) Options {
- opt.ValueDir = val
- return opt
-}
-
-// WithSyncWrites returns a new Options value with SyncWrites set to the given value.
-//
-// When SyncWrites is true all writes are synced to disk. Setting this to false would achieve better
-// performance, but may cause data loss in case of crash.
-//
-// The default value of SyncWrites is true.
-func (opt Options) WithSyncWrites(val bool) Options {
- opt.SyncWrites = val
- return opt
-}
-
-// WithTableLoadingMode returns a new Options value with TableLoadingMode set to the given value.
-//
-// TableLoadingMode indicates which file loading mode should be used for the LSM tree data files.
-//
-// The default value of TableLoadingMode is options.MemoryMap.
-func (opt Options) WithTableLoadingMode(val options.FileLoadingMode) Options {
- opt.TableLoadingMode = val
- return opt
-}
-
-// WithValueLogLoadingMode returns a new Options value with ValueLogLoadingMode set to the given
-// value.
-//
-// ValueLogLoadingMode indicates which file loading mode should be used for the value log data
-// files.
-//
-// The default value of ValueLogLoadingMode is options.MemoryMap.
-func (opt Options) WithValueLogLoadingMode(val options.FileLoadingMode) Options {
- opt.ValueLogLoadingMode = val
- return opt
-}
-
-// WithNumVersionsToKeep returns a new Options value with NumVersionsToKeep set to the given value.
-//
-// NumVersionsToKeep sets how many versions to keep per key at most.
-//
-// The default value of NumVersionsToKeep is 1.
-func (opt Options) WithNumVersionsToKeep(val int) Options {
- opt.NumVersionsToKeep = val
- return opt
-}
-
-// WithReadOnly returns a new Options value with ReadOnly set to the given value.
-//
-// When ReadOnly is true the DB will be opened on read-only mode.
-// Multiple processes can open the same Badger DB.
-// Note: if the DB being opened had crashed before and has vlog data to be replayed,
-// ReadOnly will cause Open to fail with an appropriate message.
-//
-// The default value of ReadOnly is false.
-func (opt Options) WithReadOnly(val bool) Options {
- opt.ReadOnly = val
- return opt
-}
-
-// WithTruncate returns a new Options value with Truncate set to the given value.
-//
-// Truncate indicates whether value log files should be truncated to delete corrupt data, if any.
-// This option is ignored when ReadOnly is true.
-//
-// The default value of Truncate is false.
-func (opt Options) WithTruncate(val bool) Options {
- opt.Truncate = val
- return opt
-}
-
-// WithLogger returns a new Options value with Logger set to the given value.
-//
-// Logger provides a way to configure what logger each value of badger.DB uses.
-//
-// The default value of Logger writes to stderr using the log package from the Go standard library.
-func (opt Options) WithLogger(val Logger) Options {
- opt.Logger = val
- return opt
-}
-
-// WithEventLogging returns a new Options value with EventLogging set to the given value.
-//
-// EventLogging provides a way to enable or disable trace.EventLog logging.
-//
-// The default value of EventLogging is true.
-func (opt Options) WithEventLogging(enabled bool) Options {
- opt.EventLogging = enabled
- return opt
-}
-
-// WithMaxTableSize returns a new Options value with MaxTableSize set to the given value.
-//
-// MaxTableSize sets the maximum size in bytes for each LSM table or file.
-//
-// The default value of MaxTableSize is 64MB.
-func (opt Options) WithMaxTableSize(val int64) Options {
- opt.MaxTableSize = val
- return opt
-}
-
-// WithLevelSizeMultiplier returns a new Options value with LevelSizeMultiplier set to the given
-// value.
-//
-// LevelSizeMultiplier sets the ratio between the maximum sizes of contiguous levels in the LSM.
-// Once a level grows to be larger than this ratio allowed, the compaction process will be
-// triggered.
-//
-// The default value of LevelSizeMultiplier is 10.
-func (opt Options) WithLevelSizeMultiplier(val int) Options {
- opt.LevelSizeMultiplier = val
- return opt
-}
-
-// WithMaxLevels returns a new Options value with MaxLevels set to the given value.
-//
-// Maximum number of levels of compaction allowed in the LSM.
-//
-// The default value of MaxLevels is 7.
-func (opt Options) WithMaxLevels(val int) Options {
- opt.MaxLevels = val
- return opt
-}
-
-// WithValueThreshold returns a new Options value with ValueThreshold set to the given value.
-//
-// ValueThreshold sets the threshold used to decide whether a value is stored directly in the LSM
-// tree or separatedly in the log value files.
-//
-// The default value of ValueThreshold is 32, but LSMOnlyOptions sets it to 65500.
-func (opt Options) WithValueThreshold(val int) Options {
- opt.ValueThreshold = val
- return opt
-}
-
-// WithNumMemtables returns a new Options value with NumMemtables set to the given value.
-//
-// NumMemtables sets the maximum number of tables to keep in memory before stalling.
-//
-// The default value of NumMemtables is 5.
-func (opt Options) WithNumMemtables(val int) Options {
- opt.NumMemtables = val
- return opt
-}
-
-// WithNumLevelZeroTables returns a new Options value with NumLevelZeroTables set to the given
-// value.
-//
-// NumLevelZeroTables sets the maximum number of Level 0 tables before compaction starts.
-//
-// The default value of NumLevelZeroTables is 5.
-func (opt Options) WithNumLevelZeroTables(val int) Options {
- opt.NumLevelZeroTables = val
- return opt
-}
-
-// WithNumLevelZeroTablesStall returns a new Options value with NumLevelZeroTablesStall set to the
-// given value.
-//
-// NumLevelZeroTablesStall sets the number of Level 0 tables that once reached causes the DB to
-// stall until compaction succeeds.
-//
-// The default value of NumLevelZeroTablesStall is 10.
-func (opt Options) WithNumLevelZeroTablesStall(val int) Options {
- opt.NumLevelZeroTablesStall = val
- return opt
-}
-
-// WithLevelOneSize returns a new Options value with LevelOneSize set to the given value.
-//
-// LevelOneSize sets the maximum total size for Level 1.
-//
-// The default value of LevelOneSize is 20MB.
-func (opt Options) WithLevelOneSize(val int64) Options {
- opt.LevelOneSize = val
- return opt
-}
-
-// WithValueLogFileSize returns a new Options value with ValueLogFileSize set to the given value.
-//
-// ValueLogFileSize sets the maximum size of a single value log file.
-//
-// The default value of ValueLogFileSize is 1GB.
-func (opt Options) WithValueLogFileSize(val int64) Options {
- opt.ValueLogFileSize = val
- return opt
-}
-
-// WithValueLogMaxEntries returns a new Options value with ValueLogMaxEntries set to the given
-// value.
-//
-// ValueLogMaxEntries sets the maximum number of entries a value log file can hold approximately.
-// A actual size limit of a value log file is the minimum of ValueLogFileSize and
-// ValueLogMaxEntries.
-//
-// The default value of ValueLogMaxEntries is one million (1000000).
-func (opt Options) WithValueLogMaxEntries(val uint32) Options {
- opt.ValueLogMaxEntries = val
- return opt
-}
-
-// WithNumCompactors returns a new Options value with NumCompactors set to the given value.
-//
-// NumCompactors sets the number of compaction workers to run concurrently.
-// Setting this to zero stops compactions, which could eventually cause writes to block forever.
-//
-// The default value of NumCompactors is 2.
-func (opt Options) WithNumCompactors(val int) Options {
- opt.NumCompactors = val
- return opt
-}
-
-// WithCompactL0OnClose returns a new Options value with CompactL0OnClose set to the given value.
-//
-// CompactL0OnClose determines whether Level 0 should be compacted before closing the DB.
-// This ensures that both reads and writes are efficient when the DB is opened later.
-//
-// The default value of CompactL0OnClose is true.
-func (opt Options) WithCompactL0OnClose(val bool) Options {
- opt.CompactL0OnClose = val
- return opt
-}
-
-// WithLogRotatesToFlush returns a new Options value with LogRotatesToFlush set to the given value.
-//
-// LogRotatesToFlush sets the number of value log file rotates after which the Memtables are
-// flushed to disk. This is useful in write loads with fewer keys and larger values. This work load
-// would fill up the value logs quickly, while not filling up the Memtables. Thus, on a crash
-// and restart, the value log head could cause the replay of a good number of value log files
-// which can slow things on start.
-//
-// The default value of LogRotatesToFlush is 2.
-func (opt Options) WithLogRotatesToFlush(val int32) Options {
- opt.LogRotatesToFlush = val
- return opt
-}
-
-// WithVerifyValueChecksum returns a new Options value with VerifyValueChecksum set to
-// the given value.
-//
-// When VerifyValueChecksum is set to true, checksum will be verified for every entry read
-// from the value log. If the value is stored in SST (value size less than value threshold) then the
-// checksum validation will not be done.
-//
-// The default value of VerifyValueChecksum is False.
-func (opt Options) WithVerifyValueChecksum(val bool) Options {
- opt.VerifyValueChecksum = val
- return opt
-}
-
-// WithBypassLockGuard returns a new Options value with BypassLockGuard
-// set to the given value.
-//
-// When BypassLockGuard option is set, badger will not acquire a lock on the
-// directory. This could lead to data corruption if multiple badger instances
-// write to the same data directory. Use this option with caution.
-//
-// The default value of BypassLockGuard is false.
-func (opt Options) WithBypassLockGuard(b bool) Options {
- opt.BypassLockGuard = b
- return opt
-}
diff --git a/vendor/github.com/dgraph-io/badger/options/options.go b/vendor/github.com/dgraph-io/badger/options/options.go
deleted file mode 100644
index 06c8b1b7..00000000
--- a/vendor/github.com/dgraph-io/badger/options/options.go
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package options
-
-// FileLoadingMode specifies how data in LSM table files and value log files should
-// be loaded.
-type FileLoadingMode int
-
-const (
- // FileIO indicates that files must be loaded using standard I/O
- FileIO FileLoadingMode = iota
- // LoadToRAM indicates that file must be loaded into RAM
- LoadToRAM
- // MemoryMap indicates that that the file must be memory-mapped
- MemoryMap
-)
diff --git a/vendor/github.com/dgraph-io/badger/pb/gen.sh b/vendor/github.com/dgraph-io/badger/pb/gen.sh
deleted file mode 100644
index 49b44ff4..00000000
--- a/vendor/github.com/dgraph-io/badger/pb/gen.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-# You might need to go get -v github.com/gogo/protobuf/...
-
-protos=${GOPATH-$HOME/go}/src/github.com/dgraph-io/badger/pb
-pushd $protos > /dev/null
-protoc --gofast_out=plugins=grpc:. -I=. pb.proto
diff --git a/vendor/github.com/dgraph-io/badger/pb/pb.pb.go b/vendor/github.com/dgraph-io/badger/pb/pb.pb.go
deleted file mode 100644
index 6fd3d07c..00000000
--- a/vendor/github.com/dgraph-io/badger/pb/pb.pb.go
+++ /dev/null
@@ -1,1359 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: pb.proto
-
-package pb
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type ManifestChange_Operation int32
-
-const (
- ManifestChange_CREATE ManifestChange_Operation = 0
- ManifestChange_DELETE ManifestChange_Operation = 1
-)
-
-var ManifestChange_Operation_name = map[int32]string{
- 0: "CREATE",
- 1: "DELETE",
-}
-
-var ManifestChange_Operation_value = map[string]int32{
- "CREATE": 0,
- "DELETE": 1,
-}
-
-func (x ManifestChange_Operation) String() string {
- return proto.EnumName(ManifestChange_Operation_name, int32(x))
-}
-
-func (ManifestChange_Operation) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_f80abaa17e25ccc8, []int{3, 0}
-}
-
-type KV struct {
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- UserMeta []byte `protobuf:"bytes,3,opt,name=user_meta,json=userMeta,proto3" json:"user_meta,omitempty"`
- Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
- ExpiresAt uint64 `protobuf:"varint,5,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"`
- Meta []byte `protobuf:"bytes,6,opt,name=meta,proto3" json:"meta,omitempty"`
- // Stream id is used to identify which stream the KV came from.
- StreamId uint32 `protobuf:"varint,10,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"`
- // Stream done is used to indicate end of stream.
- StreamDone bool `protobuf:"varint,11,opt,name=stream_done,json=streamDone,proto3" json:"stream_done,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *KV) Reset() { *m = KV{} }
-func (m *KV) String() string { return proto.CompactTextString(m) }
-func (*KV) ProtoMessage() {}
-func (*KV) Descriptor() ([]byte, []int) {
- return fileDescriptor_f80abaa17e25ccc8, []int{0}
-}
-func (m *KV) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KV) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KV.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KV) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KV.Merge(m, src)
-}
-func (m *KV) XXX_Size() int {
- return m.Size()
-}
-func (m *KV) XXX_DiscardUnknown() {
- xxx_messageInfo_KV.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KV proto.InternalMessageInfo
-
-func (m *KV) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *KV) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *KV) GetUserMeta() []byte {
- if m != nil {
- return m.UserMeta
- }
- return nil
-}
-
-func (m *KV) GetVersion() uint64 {
- if m != nil {
- return m.Version
- }
- return 0
-}
-
-func (m *KV) GetExpiresAt() uint64 {
- if m != nil {
- return m.ExpiresAt
- }
- return 0
-}
-
-func (m *KV) GetMeta() []byte {
- if m != nil {
- return m.Meta
- }
- return nil
-}
-
-func (m *KV) GetStreamId() uint32 {
- if m != nil {
- return m.StreamId
- }
- return 0
-}
-
-func (m *KV) GetStreamDone() bool {
- if m != nil {
- return m.StreamDone
- }
- return false
-}
-
-type KVList struct {
- Kv []*KV `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *KVList) Reset() { *m = KVList{} }
-func (m *KVList) String() string { return proto.CompactTextString(m) }
-func (*KVList) ProtoMessage() {}
-func (*KVList) Descriptor() ([]byte, []int) {
- return fileDescriptor_f80abaa17e25ccc8, []int{1}
-}
-func (m *KVList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KVList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KVList.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KVList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KVList.Merge(m, src)
-}
-func (m *KVList) XXX_Size() int {
- return m.Size()
-}
-func (m *KVList) XXX_DiscardUnknown() {
- xxx_messageInfo_KVList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KVList proto.InternalMessageInfo
-
-func (m *KVList) GetKv() []*KV {
- if m != nil {
- return m.Kv
- }
- return nil
-}
-
-type ManifestChangeSet struct {
- // A set of changes that are applied atomically.
- Changes []*ManifestChange `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ManifestChangeSet) Reset() { *m = ManifestChangeSet{} }
-func (m *ManifestChangeSet) String() string { return proto.CompactTextString(m) }
-func (*ManifestChangeSet) ProtoMessage() {}
-func (*ManifestChangeSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_f80abaa17e25ccc8, []int{2}
-}
-func (m *ManifestChangeSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ManifestChangeSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ManifestChangeSet.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ManifestChangeSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ManifestChangeSet.Merge(m, src)
-}
-func (m *ManifestChangeSet) XXX_Size() int {
- return m.Size()
-}
-func (m *ManifestChangeSet) XXX_DiscardUnknown() {
- xxx_messageInfo_ManifestChangeSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ManifestChangeSet proto.InternalMessageInfo
-
-func (m *ManifestChangeSet) GetChanges() []*ManifestChange {
- if m != nil {
- return m.Changes
- }
- return nil
-}
-
-type ManifestChange struct {
- Id uint64 `protobuf:"varint,1,opt,name=Id,proto3" json:"Id,omitempty"`
- Op ManifestChange_Operation `protobuf:"varint,2,opt,name=Op,proto3,enum=pb.ManifestChange_Operation" json:"Op,omitempty"`
- Level uint32 `protobuf:"varint,3,opt,name=Level,proto3" json:"Level,omitempty"`
- Checksum []byte `protobuf:"bytes,4,opt,name=Checksum,proto3" json:"Checksum,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ManifestChange) Reset() { *m = ManifestChange{} }
-func (m *ManifestChange) String() string { return proto.CompactTextString(m) }
-func (*ManifestChange) ProtoMessage() {}
-func (*ManifestChange) Descriptor() ([]byte, []int) {
- return fileDescriptor_f80abaa17e25ccc8, []int{3}
-}
-func (m *ManifestChange) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ManifestChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ManifestChange.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ManifestChange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ManifestChange.Merge(m, src)
-}
-func (m *ManifestChange) XXX_Size() int {
- return m.Size()
-}
-func (m *ManifestChange) XXX_DiscardUnknown() {
- xxx_messageInfo_ManifestChange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ManifestChange proto.InternalMessageInfo
-
-func (m *ManifestChange) GetId() uint64 {
- if m != nil {
- return m.Id
- }
- return 0
-}
-
-func (m *ManifestChange) GetOp() ManifestChange_Operation {
- if m != nil {
- return m.Op
- }
- return ManifestChange_CREATE
-}
-
-func (m *ManifestChange) GetLevel() uint32 {
- if m != nil {
- return m.Level
- }
- return 0
-}
-
-func (m *ManifestChange) GetChecksum() []byte {
- if m != nil {
- return m.Checksum
- }
- return nil
-}
-
-func init() {
- proto.RegisterEnum("pb.ManifestChange_Operation", ManifestChange_Operation_name, ManifestChange_Operation_value)
- proto.RegisterType((*KV)(nil), "pb.KV")
- proto.RegisterType((*KVList)(nil), "pb.KVList")
- proto.RegisterType((*ManifestChangeSet)(nil), "pb.ManifestChangeSet")
- proto.RegisterType((*ManifestChange)(nil), "pb.ManifestChange")
-}
-
-func init() { proto.RegisterFile("pb.proto", fileDescriptor_f80abaa17e25ccc8) }
-
-var fileDescriptor_f80abaa17e25ccc8 = []byte{
- // 384 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0x4f, 0x6a, 0xdb, 0x40,
- 0x14, 0xc6, 0x33, 0x63, 0x47, 0x91, 0x9f, 0x13, 0xe3, 0x3e, 0x4a, 0x19, 0xfa, 0xc7, 0x15, 0xea,
- 0x46, 0x8b, 0xa0, 0x45, 0x7a, 0x02, 0xd7, 0xd1, 0xc2, 0xd8, 0xc1, 0x30, 0x0d, 0xde, 0x9a, 0x51,
- 0xf4, 0xda, 0x08, 0xc5, 0xd2, 0xa0, 0x19, 0x8b, 0xf6, 0x26, 0xbd, 0x40, 0xef, 0xd2, 0x65, 0x17,
- 0x3d, 0x40, 0x71, 0x2f, 0x52, 0x34, 0x72, 0x02, 0x26, 0xbb, 0xf7, 0x7d, 0xdf, 0x7b, 0xdf, 0xc0,
- 0x8f, 0x01, 0x5f, 0xa7, 0xb1, 0xae, 0x2b, 0x5b, 0x21, 0xd7, 0x69, 0xf8, 0x87, 0x01, 0x5f, 0xac,
- 0x71, 0x0c, 0xbd, 0x82, 0xbe, 0x0b, 0x16, 0xb0, 0xe8, 0x5c, 0xb6, 0x23, 0xbe, 0x84, 0xd3, 0x46,
- 0x3d, 0xec, 0x48, 0x70, 0xe7, 0x75, 0x02, 0xdf, 0xc0, 0x60, 0x67, 0xa8, 0xde, 0x6c, 0xc9, 0x2a,
- 0xd1, 0x73, 0x89, 0xdf, 0x1a, 0x37, 0x64, 0x15, 0x0a, 0x38, 0x6b, 0xa8, 0x36, 0x79, 0x55, 0x8a,
- 0x7e, 0xc0, 0xa2, 0xbe, 0x7c, 0x94, 0xf8, 0x0e, 0x80, 0xbe, 0xe9, 0xbc, 0x26, 0xb3, 0x51, 0x56,
- 0x9c, 0xba, 0x70, 0x70, 0x70, 0xa6, 0x16, 0x11, 0xfa, 0xae, 0xd0, 0x73, 0x85, 0x6e, 0x6e, 0x5f,
- 0x32, 0xb6, 0x26, 0xb5, 0xdd, 0xe4, 0x99, 0x80, 0x80, 0x45, 0x17, 0xd2, 0xef, 0x8c, 0x79, 0x86,
- 0xef, 0x61, 0x78, 0x08, 0xb3, 0xaa, 0x24, 0x31, 0x0c, 0x58, 0xe4, 0x4b, 0xe8, 0xac, 0xeb, 0xaa,
- 0xa4, 0x30, 0x00, 0x6f, 0xb1, 0x5e, 0xe6, 0xc6, 0xe2, 0x2b, 0xe0, 0x45, 0x23, 0x58, 0xd0, 0x8b,
- 0x86, 0x57, 0x5e, 0xac, 0xd3, 0x78, 0xb1, 0x96, 0xbc, 0x68, 0xc2, 0x29, 0xbc, 0xb8, 0x51, 0x65,
- 0xfe, 0x85, 0x8c, 0x9d, 0xdd, 0xab, 0xf2, 0x2b, 0x7d, 0x26, 0x8b, 0x97, 0x70, 0x76, 0xe7, 0x84,
- 0x39, 0x5c, 0x60, 0x7b, 0x71, 0xbc, 0x27, 0x1f, 0x57, 0xc2, 0x9f, 0x0c, 0x46, 0xc7, 0x19, 0x8e,
- 0x80, 0xcf, 0x33, 0x87, 0xb1, 0x2f, 0xf9, 0x3c, 0xc3, 0x4b, 0xe0, 0x2b, 0xed, 0x10, 0x8e, 0xae,
- 0xde, 0x3e, 0xef, 0x8a, 0x57, 0x9a, 0x6a, 0x65, 0xf3, 0xaa, 0x94, 0x7c, 0xa5, 0x5b, 0xe6, 0x4b,
- 0x6a, 0xe8, 0xc1, 0x91, 0xbd, 0x90, 0x9d, 0xc0, 0xd7, 0xe0, 0xcf, 0xee, 0xe9, 0xae, 0x30, 0xbb,
- 0xad, 0xe3, 0x7a, 0x2e, 0x9f, 0x74, 0xf8, 0x01, 0x06, 0x4f, 0x15, 0x08, 0xe0, 0xcd, 0x64, 0x32,
- 0xbd, 0x4d, 0xc6, 0x27, 0xed, 0x7c, 0x9d, 0x2c, 0x93, 0xdb, 0x64, 0xcc, 0x3e, 0x8d, 0x7f, 0xed,
- 0x27, 0xec, 0xf7, 0x7e, 0xc2, 0xfe, 0xee, 0x27, 0xec, 0xc7, 0xbf, 0xc9, 0x49, 0xea, 0xb9, 0x0f,
- 0xf0, 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x50, 0x3d, 0x49, 0xb9, 0x0c, 0x02, 0x00, 0x00,
-}
-
-func (m *KV) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KV) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KV) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.StreamDone {
- i--
- if m.StreamDone {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x58
- }
- if m.StreamId != 0 {
- i = encodeVarintPb(dAtA, i, uint64(m.StreamId))
- i--
- dAtA[i] = 0x50
- }
- if len(m.Meta) > 0 {
- i -= len(m.Meta)
- copy(dAtA[i:], m.Meta)
- i = encodeVarintPb(dAtA, i, uint64(len(m.Meta)))
- i--
- dAtA[i] = 0x32
- }
- if m.ExpiresAt != 0 {
- i = encodeVarintPb(dAtA, i, uint64(m.ExpiresAt))
- i--
- dAtA[i] = 0x28
- }
- if m.Version != 0 {
- i = encodeVarintPb(dAtA, i, uint64(m.Version))
- i--
- dAtA[i] = 0x20
- }
- if len(m.UserMeta) > 0 {
- i -= len(m.UserMeta)
- copy(dAtA[i:], m.UserMeta)
- i = encodeVarintPb(dAtA, i, uint64(len(m.UserMeta)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Value) > 0 {
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintPb(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintPb(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *KVList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KVList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KVList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Kv) > 0 {
- for iNdEx := len(m.Kv) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Kv[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintPb(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ManifestChangeSet) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ManifestChangeSet) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ManifestChangeSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Changes) > 0 {
- for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Changes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintPb(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ManifestChange) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ManifestChange) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ManifestChange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Checksum) > 0 {
- i -= len(m.Checksum)
- copy(dAtA[i:], m.Checksum)
- i = encodeVarintPb(dAtA, i, uint64(len(m.Checksum)))
- i--
- dAtA[i] = 0x22
- }
- if m.Level != 0 {
- i = encodeVarintPb(dAtA, i, uint64(m.Level))
- i--
- dAtA[i] = 0x18
- }
- if m.Op != 0 {
- i = encodeVarintPb(dAtA, i, uint64(m.Op))
- i--
- dAtA[i] = 0x10
- }
- if m.Id != 0 {
- i = encodeVarintPb(dAtA, i, uint64(m.Id))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintPb(dAtA []byte, offset int, v uint64) int {
- offset -= sovPb(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *KV) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovPb(uint64(l))
- }
- l = len(m.Value)
- if l > 0 {
- n += 1 + l + sovPb(uint64(l))
- }
- l = len(m.UserMeta)
- if l > 0 {
- n += 1 + l + sovPb(uint64(l))
- }
- if m.Version != 0 {
- n += 1 + sovPb(uint64(m.Version))
- }
- if m.ExpiresAt != 0 {
- n += 1 + sovPb(uint64(m.ExpiresAt))
- }
- l = len(m.Meta)
- if l > 0 {
- n += 1 + l + sovPb(uint64(l))
- }
- if m.StreamId != 0 {
- n += 1 + sovPb(uint64(m.StreamId))
- }
- if m.StreamDone {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *KVList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Kv) > 0 {
- for _, e := range m.Kv {
- l = e.Size()
- n += 1 + l + sovPb(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ManifestChangeSet) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Changes) > 0 {
- for _, e := range m.Changes {
- l = e.Size()
- n += 1 + l + sovPb(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ManifestChange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Id != 0 {
- n += 1 + sovPb(uint64(m.Id))
- }
- if m.Op != 0 {
- n += 1 + sovPb(uint64(m.Op))
- }
- if m.Level != 0 {
- n += 1 + sovPb(uint64(m.Level))
- }
- l = len(m.Checksum)
- if l > 0 {
- n += 1 + l + sovPb(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovPb(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozPb(x uint64) (n int) {
- return sovPb(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *KV) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KV: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
- if m.Value == nil {
- m.Value = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field UserMeta", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.UserMeta = append(m.UserMeta[:0], dAtA[iNdEx:postIndex]...)
- if m.UserMeta == nil {
- m.UserMeta = []byte{}
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- m.Version = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Version |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType)
- }
- m.ExpiresAt = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ExpiresAt |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Meta = append(m.Meta[:0], dAtA[iNdEx:postIndex]...)
- if m.Meta == nil {
- m.Meta = []byte{}
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType)
- }
- m.StreamId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StreamId |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StreamDone", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.StreamDone = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipPb(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *KVList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KVList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KVList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Kv = append(m.Kv, &KV{})
- if err := m.Kv[len(m.Kv)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPb(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ManifestChangeSet) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ManifestChangeSet: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ManifestChangeSet: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Changes = append(m.Changes, &ManifestChange{})
- if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPb(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ManifestChange) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ManifestChange: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ManifestChange: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
- }
- m.Id = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Id |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType)
- }
- m.Op = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Op |= ManifestChange_Operation(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType)
- }
- m.Level = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Level |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Checksum = append(m.Checksum[:0], dAtA[iNdEx:postIndex]...)
- if m.Checksum == nil {
- m.Checksum = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPb(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipPb(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPb
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPb
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPb
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthPb
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupPb
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthPb
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthPb = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowPb = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupPb = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/dgraph-io/badger/pb/pb.proto b/vendor/github.com/dgraph-io/badger/pb/pb.proto
deleted file mode 100644
index faf0b65c..00000000
--- a/vendor/github.com/dgraph-io/badger/pb/pb.proto
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Use protos/gen.sh to generate .pb.go files.
-syntax = "proto3";
-
-package pb;
-
-message KV {
- bytes key = 1;
- bytes value = 2;
- bytes user_meta = 3;
- uint64 version = 4;
- uint64 expires_at = 5;
- bytes meta = 6;
-
- // Stream id is used to identify which stream the KV came from.
- uint32 stream_id = 10;
- // Stream done is used to indicate end of stream.
- bool stream_done = 11;
-}
-
-message KVList {
- repeated KV kv = 1;
-}
-
-message ManifestChangeSet {
- // A set of changes that are applied atomically.
- repeated ManifestChange changes = 1;
-}
-
-message ManifestChange {
- uint64 Id = 1;
- enum Operation {
- CREATE = 0;
- DELETE = 1;
- }
- Operation Op = 2;
- uint32 Level = 3; // Only used for CREATE
- bytes Checksum = 4; // Only used for CREATE
-}
diff --git a/vendor/github.com/dgraph-io/badger/publisher.go b/vendor/github.com/dgraph-io/badger/publisher.go
deleted file mode 100644
index 7458b0d9..00000000
--- a/vendor/github.com/dgraph-io/badger/publisher.go
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "sync"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/trie"
- "github.com/dgraph-io/badger/y"
-)
-
-type subscriber struct {
- prefixes [][]byte
- sendCh chan<- *pb.KVList
- subCloser *y.Closer
-}
-
-type publisher struct {
- sync.Mutex
- pubCh chan requests
- subscribers map[uint64]subscriber
- nextID uint64
- indexer *trie.Trie
-}
-
-func newPublisher() *publisher {
- return &publisher{
- pubCh: make(chan requests, 1000),
- subscribers: make(map[uint64]subscriber),
- nextID: 0,
- indexer: trie.NewTrie(),
- }
-}
-
-func (p *publisher) listenForUpdates(c *y.Closer) {
- defer func() {
- p.cleanSubscribers()
- c.Done()
- }()
- slurp := func(batch requests) {
- for {
- select {
- case reqs := <-p.pubCh:
- batch = append(batch, reqs...)
- default:
- p.publishUpdates(batch)
- return
- }
- }
- }
- for {
- select {
- case <-c.HasBeenClosed():
- return
- case reqs := <-p.pubCh:
- slurp(reqs)
- }
- }
-}
-
-func (p *publisher) publishUpdates(reqs requests) {
- p.Lock()
- defer func() {
- p.Unlock()
- // Release all the request.
- reqs.DecrRef()
- }()
- batchedUpdates := make(map[uint64]*pb.KVList)
- for _, req := range reqs {
- for _, e := range req.Entries {
- ids := p.indexer.Get(e.Key)
- if len(ids) > 0 {
- k := y.SafeCopy(nil, e.Key)
- kv := &pb.KV{
- Key: y.ParseKey(k),
- Value: y.SafeCopy(nil, e.Value),
- Meta: []byte{e.UserMeta},
- ExpiresAt: e.ExpiresAt,
- Version: y.ParseTs(k),
- }
- for id := range ids {
- if _, ok := batchedUpdates[id]; !ok {
- batchedUpdates[id] = &pb.KVList{}
- }
- batchedUpdates[id].Kv = append(batchedUpdates[id].Kv, kv)
- }
- }
- }
- }
-
- for id, kvs := range batchedUpdates {
- p.subscribers[id].sendCh <- kvs
- }
-}
-
-func (p *publisher) newSubscriber(c *y.Closer, prefixes ...[]byte) (<-chan *pb.KVList, uint64) {
- p.Lock()
- defer p.Unlock()
- ch := make(chan *pb.KVList, 1000)
- id := p.nextID
- // Increment next ID.
- p.nextID++
- p.subscribers[id] = subscriber{
- prefixes: prefixes,
- sendCh: ch,
- subCloser: c,
- }
- for _, prefix := range prefixes {
- p.indexer.Add(prefix, id)
- }
- return ch, id
-}
-
-// cleanSubscribers stops all the subscribers. Ideally, It should be called while closing DB.
-func (p *publisher) cleanSubscribers() {
- p.Lock()
- defer p.Unlock()
- for id, s := range p.subscribers {
- for _, prefix := range s.prefixes {
- p.indexer.Delete(prefix, id)
- }
- delete(p.subscribers, id)
- s.subCloser.SignalAndWait()
- }
-}
-
-func (p *publisher) deleteSubscriber(id uint64) {
- p.Lock()
- defer p.Unlock()
- if s, ok := p.subscribers[id]; ok {
- for _, prefix := range s.prefixes {
- p.indexer.Delete(prefix, id)
- }
- }
- delete(p.subscribers, id)
-}
-
-func (p *publisher) sendUpdates(reqs requests) {
- if p.noOfSubscribers() != 0 {
- reqs.IncrRef()
- p.pubCh <- reqs
- }
-}
-
-func (p *publisher) noOfSubscribers() int {
- p.Lock()
- defer p.Unlock()
- return len(p.subscribers)
-}
diff --git a/vendor/github.com/dgraph-io/badger/skl/README.md b/vendor/github.com/dgraph-io/badger/skl/README.md
deleted file mode 100644
index e22e4590..00000000
--- a/vendor/github.com/dgraph-io/badger/skl/README.md
+++ /dev/null
@@ -1,113 +0,0 @@
-This is much better than `skiplist` and `slist`.
-
-```
-BenchmarkReadWrite/frac_0-8 3000000 537 ns/op
-BenchmarkReadWrite/frac_1-8 3000000 503 ns/op
-BenchmarkReadWrite/frac_2-8 3000000 492 ns/op
-BenchmarkReadWrite/frac_3-8 3000000 475 ns/op
-BenchmarkReadWrite/frac_4-8 3000000 440 ns/op
-BenchmarkReadWrite/frac_5-8 5000000 442 ns/op
-BenchmarkReadWrite/frac_6-8 5000000 380 ns/op
-BenchmarkReadWrite/frac_7-8 5000000 338 ns/op
-BenchmarkReadWrite/frac_8-8 5000000 294 ns/op
-BenchmarkReadWrite/frac_9-8 10000000 268 ns/op
-BenchmarkReadWrite/frac_10-8 100000000 26.3 ns/op
-```
-
-And even better than a simple map with read-write lock:
-
-```
-BenchmarkReadWriteMap/frac_0-8 2000000 774 ns/op
-BenchmarkReadWriteMap/frac_1-8 2000000 647 ns/op
-BenchmarkReadWriteMap/frac_2-8 3000000 605 ns/op
-BenchmarkReadWriteMap/frac_3-8 3000000 603 ns/op
-BenchmarkReadWriteMap/frac_4-8 3000000 556 ns/op
-BenchmarkReadWriteMap/frac_5-8 3000000 472 ns/op
-BenchmarkReadWriteMap/frac_6-8 3000000 476 ns/op
-BenchmarkReadWriteMap/frac_7-8 3000000 457 ns/op
-BenchmarkReadWriteMap/frac_8-8 5000000 444 ns/op
-BenchmarkReadWriteMap/frac_9-8 5000000 361 ns/op
-BenchmarkReadWriteMap/frac_10-8 10000000 212 ns/op
-```
-
-# Node Pooling
-
-Command used
-
-```
-rm -Rf tmp && /usr/bin/time -l ./populate -keys_mil 10
-```
-
-For pprof results, we run without using /usr/bin/time. There are four runs below.
-
-Results seem to vary quite a bit between runs.
-
-## Before node pooling
-
-```
-1311.53MB of 1338.69MB total (97.97%)
-Dropped 30 nodes (cum <= 6.69MB)
-Showing top 10 nodes out of 37 (cum >= 12.50MB)
- flat flat% sum% cum cum%
- 523.04MB 39.07% 39.07% 523.04MB 39.07% github.com/dgraph-io/badger/skl.(*Skiplist).Put
- 184.51MB 13.78% 52.85% 184.51MB 13.78% runtime.stringtoslicebyte
- 166.01MB 12.40% 65.25% 689.04MB 51.47% github.com/dgraph-io/badger/mem.(*Table).Put
- 165MB 12.33% 77.58% 165MB 12.33% runtime.convT2E
- 116.92MB 8.73% 86.31% 116.92MB 8.73% bytes.makeSlice
- 62.50MB 4.67% 90.98% 62.50MB 4.67% main.newValue
- 34.50MB 2.58% 93.56% 34.50MB 2.58% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV
- 25.50MB 1.90% 95.46% 100.06MB 7.47% github.com/dgraph-io/badger/y.(*MergeIterator).Next
- 21.06MB 1.57% 97.04% 21.06MB 1.57% github.com/dgraph-io/badger/table.(*Table).read
- 12.50MB 0.93% 97.97% 12.50MB 0.93% github.com/dgraph-io/badger/table.header.Encode
-
- 128.31 real 329.37 user 17.11 sys
-3355660288 maximum resident set size
- 0 average shared memory size
- 0 average unshared data size
- 0 average unshared stack size
- 2203080 page reclaims
- 764 page faults
- 0 swaps
- 275 block input operations
- 76 block output operations
- 0 messages sent
- 0 messages received
- 0 signals received
- 49173 voluntary context switches
- 599922 involuntary context switches
-```
-
-## After node pooling
-
-```
-1963.13MB of 2026.09MB total (96.89%)
-Dropped 29 nodes (cum <= 10.13MB)
-Showing top 10 nodes out of 41 (cum >= 185.62MB)
- flat flat% sum% cum cum%
- 658.05MB 32.48% 32.48% 658.05MB 32.48% github.com/dgraph-io/badger/skl.glob..func1
- 297.51MB 14.68% 47.16% 297.51MB 14.68% runtime.convT2E
- 257.51MB 12.71% 59.87% 257.51MB 12.71% runtime.stringtoslicebyte
- 249.01MB 12.29% 72.16% 1007.06MB 49.70% github.com/dgraph-io/badger/mem.(*Table).Put
- 142.43MB 7.03% 79.19% 142.43MB 7.03% bytes.makeSlice
- 100MB 4.94% 84.13% 758.05MB 37.41% github.com/dgraph-io/badger/skl.newNode
- 99.50MB 4.91% 89.04% 99.50MB 4.91% main.newValue
- 75MB 3.70% 92.74% 75MB 3.70% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV
- 44.62MB 2.20% 94.94% 44.62MB 2.20% github.com/dgraph-io/badger/table.(*Table).read
- 39.50MB 1.95% 96.89% 185.62MB 9.16% github.com/dgraph-io/badger/y.(*MergeIterator).Next
-
- 135.58 real 374.29 user 17.65 sys
-3740614656 maximum resident set size
- 0 average shared memory size
- 0 average unshared data size
- 0 average unshared stack size
- 2276566 page reclaims
- 770 page faults
- 0 swaps
- 128 block input operations
- 90 block output operations
- 0 messages sent
- 0 messages received
- 0 signals received
- 46434 voluntary context switches
- 597049 involuntary context switches
-```
diff --git a/vendor/github.com/dgraph-io/badger/skl/arena.go b/vendor/github.com/dgraph-io/badger/skl/arena.go
deleted file mode 100644
index def55071..00000000
--- a/vendor/github.com/dgraph-io/badger/skl/arena.go
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package skl
-
-import (
- "sync/atomic"
- "unsafe"
-
- "github.com/dgraph-io/badger/y"
-)
-
-const (
- offsetSize = int(unsafe.Sizeof(uint32(0)))
-
- // Always align nodes on 64-bit boundaries, even on 32-bit architectures,
- // so that the node.value field is 64-bit aligned. This is necessary because
- // node.getValueOffset uses atomic.LoadUint64, which expects its input
- // pointer to be 64-bit aligned.
- nodeAlign = int(unsafe.Sizeof(uint64(0))) - 1
-)
-
-// Arena should be lock-free.
-type Arena struct {
- n uint32
- buf []byte
-}
-
-// newArena returns a new arena.
-func newArena(n int64) *Arena {
- // Don't store data at position 0 in order to reserve offset=0 as a kind
- // of nil pointer.
- out := &Arena{
- n: 1,
- buf: make([]byte, n),
- }
- return out
-}
-
-func (s *Arena) size() int64 {
- return int64(atomic.LoadUint32(&s.n))
-}
-
-func (s *Arena) reset() {
- atomic.StoreUint32(&s.n, 0)
-}
-
-// putNode allocates a node in the arena. The node is aligned on a pointer-sized
-// boundary. The arena offset of the node is returned.
-func (s *Arena) putNode(height int) uint32 {
- // Compute the amount of the tower that will never be used, since the height
- // is less than maxHeight.
- unusedSize := (maxHeight - height) * offsetSize
-
- // Pad the allocation with enough bytes to ensure pointer alignment.
- l := uint32(MaxNodeSize - unusedSize + nodeAlign)
- n := atomic.AddUint32(&s.n, l)
- y.AssertTruef(int(n) <= len(s.buf),
- "Arena too small, toWrite:%d newTotal:%d limit:%d",
- l, n, len(s.buf))
-
- // Return the aligned offset.
- m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign)
- return m
-}
-
-// Put will *copy* val into arena. To make better use of this, reuse your input
-// val buffer. Returns an offset into buf. User is responsible for remembering
-// size of val. We could also store this size inside arena but the encoding and
-// decoding will incur some overhead.
-func (s *Arena) putVal(v y.ValueStruct) uint32 {
- l := uint32(v.EncodedSize())
- n := atomic.AddUint32(&s.n, l)
- y.AssertTruef(int(n) <= len(s.buf),
- "Arena too small, toWrite:%d newTotal:%d limit:%d",
- l, n, len(s.buf))
- m := n - l
- v.Encode(s.buf[m:])
- return m
-}
-
-func (s *Arena) putKey(key []byte) uint32 {
- l := uint32(len(key))
- n := atomic.AddUint32(&s.n, l)
- y.AssertTruef(int(n) <= len(s.buf),
- "Arena too small, toWrite:%d newTotal:%d limit:%d",
- l, n, len(s.buf))
- m := n - l
- y.AssertTrue(len(key) == copy(s.buf[m:n], key))
- return m
-}
-
-// getNode returns a pointer to the node located at offset. If the offset is
-// zero, then the nil node pointer is returned.
-func (s *Arena) getNode(offset uint32) *node {
- if offset == 0 {
- return nil
- }
-
- return (*node)(unsafe.Pointer(&s.buf[offset]))
-}
-
-// getKey returns byte slice at offset.
-func (s *Arena) getKey(offset uint32, size uint16) []byte {
- return s.buf[offset : offset+uint32(size)]
-}
-
-// getVal returns byte slice at offset. The given size should be just the value
-// size and should NOT include the meta bytes.
-func (s *Arena) getVal(offset uint32, size uint16) (ret y.ValueStruct) {
- ret.Decode(s.buf[offset : offset+uint32(size)])
- return
-}
-
-// getNodeOffset returns the offset of node in the arena. If the node pointer is
-// nil, then the zero offset is returned.
-func (s *Arena) getNodeOffset(nd *node) uint32 {
- if nd == nil {
- return 0
- }
-
- return uint32(uintptr(unsafe.Pointer(nd)) - uintptr(unsafe.Pointer(&s.buf[0])))
-}
diff --git a/vendor/github.com/dgraph-io/badger/skl/skl.go b/vendor/github.com/dgraph-io/badger/skl/skl.go
deleted file mode 100644
index 65647ff5..00000000
--- a/vendor/github.com/dgraph-io/badger/skl/skl.go
+++ /dev/null
@@ -1,517 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-Adapted from RocksDB inline skiplist.
-
-Key differences:
-- No optimization for sequential inserts (no "prev").
-- No custom comparator.
-- Support overwrites. This requires care when we see the same key when inserting.
- For RocksDB or LevelDB, overwrites are implemented as a newer sequence number in the key, so
- there is no need for values. We don't intend to support versioning. In-place updates of values
- would be more efficient.
-- We discard all non-concurrent code.
-- We do not support Splices. This simplifies the code a lot.
-- No AllocateNode or other pointer arithmetic.
-- We combine the findLessThan, findGreaterOrEqual, etc into one function.
-*/
-
-package skl
-
-import (
- "math"
- "sync/atomic"
- "unsafe"
-
- "github.com/dgraph-io/badger/y"
- "github.com/dgraph-io/ristretto/z"
-)
-
-const (
- maxHeight = 20
- heightIncrease = math.MaxUint32 / 3
-)
-
-// MaxNodeSize is the memory footprint of a node of maximum height.
-const MaxNodeSize = int(unsafe.Sizeof(node{}))
-
-type node struct {
- // Multiple parts of the value are encoded as a single uint64 so that it
- // can be atomically loaded and stored:
- // value offset: uint32 (bits 0-31)
- // value size : uint16 (bits 32-47)
- value uint64
-
- // A byte slice is 24 bytes. We are trying to save space here.
- keyOffset uint32 // Immutable. No need to lock to access key.
- keySize uint16 // Immutable. No need to lock to access key.
-
- // Height of the tower.
- height uint16
-
- // Most nodes do not need to use the full height of the tower, since the
- // probability of each successive level decreases exponentially. Because
- // these elements are never accessed, they do not need to be allocated.
- // Therefore, when a node is allocated in the arena, its memory footprint
- // is deliberately truncated to not include unneeded tower elements.
- //
- // All accesses to elements should use CAS operations, with no need to lock.
- tower [maxHeight]uint32
-}
-
-// Skiplist maps keys to values (in memory)
-type Skiplist struct {
- height int32 // Current height. 1 <= height <= kMaxHeight. CAS.
- head *node
- ref int32
- arena *Arena
-}
-
-// IncrRef increases the refcount
-func (s *Skiplist) IncrRef() {
- atomic.AddInt32(&s.ref, 1)
-}
-
-// DecrRef decrements the refcount, deallocating the Skiplist when done using it
-func (s *Skiplist) DecrRef() {
- newRef := atomic.AddInt32(&s.ref, -1)
- if newRef > 0 {
- return
- }
-
- s.arena.reset()
- // Indicate we are closed. Good for testing. Also, lets GC reclaim memory. Race condition
- // here would suggest we are accessing skiplist when we are supposed to have no reference!
- s.arena = nil
- // Since the head references the arena's buf, as long as the head is kept around
- // GC can't release the buf.
- s.head = nil
-}
-
-func newNode(arena *Arena, key []byte, v y.ValueStruct, height int) *node {
- // The base level is already allocated in the node struct.
- offset := arena.putNode(height)
- node := arena.getNode(offset)
- node.keyOffset = arena.putKey(key)
- node.keySize = uint16(len(key))
- node.height = uint16(height)
- node.value = encodeValue(arena.putVal(v), v.EncodedSize())
- return node
-}
-
-func encodeValue(valOffset uint32, valSize uint16) uint64 {
- return uint64(valSize)<<32 | uint64(valOffset)
-}
-
-func decodeValue(value uint64) (valOffset uint32, valSize uint16) {
- valOffset = uint32(value)
- valSize = uint16(value >> 32)
- return
-}
-
-// NewSkiplist makes a new empty skiplist, with a given arena size
-func NewSkiplist(arenaSize int64) *Skiplist {
- arena := newArena(arenaSize)
- head := newNode(arena, nil, y.ValueStruct{}, maxHeight)
- return &Skiplist{
- height: 1,
- head: head,
- arena: arena,
- ref: 1,
- }
-}
-
-func (s *node) getValueOffset() (uint32, uint16) {
- value := atomic.LoadUint64(&s.value)
- return decodeValue(value)
-}
-
-func (s *node) key(arena *Arena) []byte {
- return arena.getKey(s.keyOffset, s.keySize)
-}
-
-func (s *node) setValue(arena *Arena, v y.ValueStruct) {
- valOffset := arena.putVal(v)
- value := encodeValue(valOffset, v.EncodedSize())
- atomic.StoreUint64(&s.value, value)
-}
-
-func (s *node) getNextOffset(h int) uint32 {
- return atomic.LoadUint32(&s.tower[h])
-}
-
-func (s *node) casNextOffset(h int, old, val uint32) bool {
- return atomic.CompareAndSwapUint32(&s.tower[h], old, val)
-}
-
-// Returns true if key is strictly > n.key.
-// If n is nil, this is an "end" marker and we return false.
-//func (s *Skiplist) keyIsAfterNode(key []byte, n *node) bool {
-// y.AssertTrue(n != s.head)
-// return n != nil && y.CompareKeys(key, n.key) > 0
-//}
-
-func (s *Skiplist) randomHeight() int {
- h := 1
- for h < maxHeight && z.FastRand() <= heightIncrease {
- h++
- }
- return h
-}
-
-func (s *Skiplist) getNext(nd *node, height int) *node {
- return s.arena.getNode(nd.getNextOffset(height))
-}
-
-// findNear finds the node near to key.
-// If less=true, it finds rightmost node such that node.key < key (if allowEqual=false) or
-// node.key <= key (if allowEqual=true).
-// If less=false, it finds leftmost node such that node.key > key (if allowEqual=false) or
-// node.key >= key (if allowEqual=true).
-// Returns the node found. The bool returned is true if the node has key equal to given key.
-func (s *Skiplist) findNear(key []byte, less bool, allowEqual bool) (*node, bool) {
- x := s.head
- level := int(s.getHeight() - 1)
- for {
- // Assume x.key < key.
- next := s.getNext(x, level)
- if next == nil {
- // x.key < key < END OF LIST
- if level > 0 {
- // Can descend further to iterate closer to the end.
- level--
- continue
- }
- // Level=0. Cannot descend further. Let's return something that makes sense.
- if !less {
- return nil, false
- }
- // Try to return x. Make sure it is not a head node.
- if x == s.head {
- return nil, false
- }
- return x, false
- }
-
- nextKey := next.key(s.arena)
- cmp := y.CompareKeys(key, nextKey)
- if cmp > 0 {
- // x.key < next.key < key. We can continue to move right.
- x = next
- continue
- }
- if cmp == 0 {
- // x.key < key == next.key.
- if allowEqual {
- return next, true
- }
- if !less {
- // We want >, so go to base level to grab the next bigger note.
- return s.getNext(next, 0), false
- }
- // We want <. If not base level, we should go closer in the next level.
- if level > 0 {
- level--
- continue
- }
- // On base level. Return x.
- if x == s.head {
- return nil, false
- }
- return x, false
- }
- // cmp < 0. In other words, x.key < key < next.
- if level > 0 {
- level--
- continue
- }
- // At base level. Need to return something.
- if !less {
- return next, false
- }
- // Try to return x. Make sure it is not a head node.
- if x == s.head {
- return nil, false
- }
- return x, false
- }
-}
-
-// findSpliceForLevel returns (outBefore, outAfter) with outBefore.key <= key <= outAfter.key.
-// The input "before" tells us where to start looking.
-// If we found a node with the same key, then we return outBefore = outAfter.
-// Otherwise, outBefore.key < key < outAfter.key.
-func (s *Skiplist) findSpliceForLevel(key []byte, before *node, level int) (*node, *node) {
- for {
- // Assume before.key < key.
- next := s.getNext(before, level)
- if next == nil {
- return before, next
- }
- nextKey := next.key(s.arena)
- cmp := y.CompareKeys(key, nextKey)
- if cmp == 0 {
- // Equality case.
- return next, next
- }
- if cmp < 0 {
- // before.key < key < next.key. We are done for this level.
- return before, next
- }
- before = next // Keep moving right on this level.
- }
-}
-
-func (s *Skiplist) getHeight() int32 {
- return atomic.LoadInt32(&s.height)
-}
-
-// Put inserts the key-value pair.
-func (s *Skiplist) Put(key []byte, v y.ValueStruct) {
- // Since we allow overwrite, we may not need to create a new node. We might not even need to
- // increase the height. Let's defer these actions.
-
- listHeight := s.getHeight()
- var prev [maxHeight + 1]*node
- var next [maxHeight + 1]*node
- prev[listHeight] = s.head
- next[listHeight] = nil
- for i := int(listHeight) - 1; i >= 0; i-- {
- // Use higher level to speed up for current level.
- prev[i], next[i] = s.findSpliceForLevel(key, prev[i+1], i)
- if prev[i] == next[i] {
- prev[i].setValue(s.arena, v)
- return
- }
- }
-
- // We do need to create a new node.
- height := s.randomHeight()
- x := newNode(s.arena, key, v, height)
-
- // Try to increase s.height via CAS.
- listHeight = s.getHeight()
- for height > int(listHeight) {
- if atomic.CompareAndSwapInt32(&s.height, listHeight, int32(height)) {
- // Successfully increased skiplist.height.
- break
- }
- listHeight = s.getHeight()
- }
-
- // We always insert from the base level and up. After you add a node in base level, we cannot
- // create a node in the level above because it would have discovered the node in the base level.
- for i := 0; i < height; i++ {
- for {
- if prev[i] == nil {
- y.AssertTrue(i > 1) // This cannot happen in base level.
- // We haven't computed prev, next for this level because height exceeds old listHeight.
- // For these levels, we expect the lists to be sparse, so we can just search from head.
- prev[i], next[i] = s.findSpliceForLevel(key, s.head, i)
- // Someone adds the exact same key before we are able to do so. This can only happen on
- // the base level. But we know we are not on the base level.
- y.AssertTrue(prev[i] != next[i])
- }
- nextOffset := s.arena.getNodeOffset(next[i])
- x.tower[i] = nextOffset
- if prev[i].casNextOffset(i, nextOffset, s.arena.getNodeOffset(x)) {
- // Managed to insert x between prev[i] and next[i]. Go to the next level.
- break
- }
- // CAS failed. We need to recompute prev and next.
- // It is unlikely to be helpful to try to use a different level as we redo the search,
- // because it is unlikely that lots of nodes are inserted between prev[i] and next[i].
- prev[i], next[i] = s.findSpliceForLevel(key, prev[i], i)
- if prev[i] == next[i] {
- y.AssertTruef(i == 0, "Equality can happen only on base level: %d", i)
- prev[i].setValue(s.arena, v)
- return
- }
- }
- }
-}
-
-// Empty returns if the Skiplist is empty.
-func (s *Skiplist) Empty() bool {
- return s.findLast() == nil
-}
-
-// findLast returns the last element. If head (empty list), we return nil. All the find functions
-// will NEVER return the head nodes.
-func (s *Skiplist) findLast() *node {
- n := s.head
- level := int(s.getHeight()) - 1
- for {
- next := s.getNext(n, level)
- if next != nil {
- n = next
- continue
- }
- if level == 0 {
- if n == s.head {
- return nil
- }
- return n
- }
- level--
- }
-}
-
-// Get gets the value associated with the key. It returns a valid value if it finds equal or earlier
-// version of the same key.
-func (s *Skiplist) Get(key []byte) y.ValueStruct {
- n, _ := s.findNear(key, false, true) // findGreaterOrEqual.
- if n == nil {
- return y.ValueStruct{}
- }
-
- nextKey := s.arena.getKey(n.keyOffset, n.keySize)
- if !y.SameKey(key, nextKey) {
- return y.ValueStruct{}
- }
-
- valOffset, valSize := n.getValueOffset()
- vs := s.arena.getVal(valOffset, valSize)
- vs.Version = y.ParseTs(nextKey)
- return vs
-}
-
-// NewIterator returns a skiplist iterator. You have to Close() the iterator.
-func (s *Skiplist) NewIterator() *Iterator {
- s.IncrRef()
- return &Iterator{list: s}
-}
-
-// MemSize returns the size of the Skiplist in terms of how much memory is used within its internal
-// arena.
-func (s *Skiplist) MemSize() int64 { return s.arena.size() }
-
-// Iterator is an iterator over skiplist object. For new objects, you just
-// need to initialize Iterator.list.
-type Iterator struct {
- list *Skiplist
- n *node
-}
-
-// Close frees the resources held by the iterator
-func (s *Iterator) Close() error {
- s.list.DecrRef()
- return nil
-}
-
-// Valid returns true iff the iterator is positioned at a valid node.
-func (s *Iterator) Valid() bool { return s.n != nil }
-
-// Key returns the key at the current position.
-func (s *Iterator) Key() []byte {
- return s.list.arena.getKey(s.n.keyOffset, s.n.keySize)
-}
-
-// Value returns value.
-func (s *Iterator) Value() y.ValueStruct {
- valOffset, valSize := s.n.getValueOffset()
- return s.list.arena.getVal(valOffset, valSize)
-}
-
-// Next advances to the next position.
-func (s *Iterator) Next() {
- y.AssertTrue(s.Valid())
- s.n = s.list.getNext(s.n, 0)
-}
-
-// Prev advances to the previous position.
-func (s *Iterator) Prev() {
- y.AssertTrue(s.Valid())
- s.n, _ = s.list.findNear(s.Key(), true, false) // find <. No equality allowed.
-}
-
-// Seek advances to the first entry with a key >= target.
-func (s *Iterator) Seek(target []byte) {
- s.n, _ = s.list.findNear(target, false, true) // find >=.
-}
-
-// SeekForPrev finds an entry with key <= target.
-func (s *Iterator) SeekForPrev(target []byte) {
- s.n, _ = s.list.findNear(target, true, true) // find <=.
-}
-
-// SeekToFirst seeks position at the first entry in list.
-// Final state of iterator is Valid() iff list is not empty.
-func (s *Iterator) SeekToFirst() {
- s.n = s.list.getNext(s.list.head, 0)
-}
-
-// SeekToLast seeks position at the last entry in list.
-// Final state of iterator is Valid() iff list is not empty.
-func (s *Iterator) SeekToLast() {
- s.n = s.list.findLast()
-}
-
-// UniIterator is a unidirectional memtable iterator. It is a thin wrapper around
-// Iterator. We like to keep Iterator as before, because it is more powerful and
-// we might support bidirectional iterators in the future.
-type UniIterator struct {
- iter *Iterator
- reversed bool
-}
-
-// NewUniIterator returns a UniIterator.
-func (s *Skiplist) NewUniIterator(reversed bool) *UniIterator {
- return &UniIterator{
- iter: s.NewIterator(),
- reversed: reversed,
- }
-}
-
-// Next implements y.Interface
-func (s *UniIterator) Next() {
- if !s.reversed {
- s.iter.Next()
- } else {
- s.iter.Prev()
- }
-}
-
-// Rewind implements y.Interface
-func (s *UniIterator) Rewind() {
- if !s.reversed {
- s.iter.SeekToFirst()
- } else {
- s.iter.SeekToLast()
- }
-}
-
-// Seek implements y.Interface
-func (s *UniIterator) Seek(key []byte) {
- if !s.reversed {
- s.iter.Seek(key)
- } else {
- s.iter.SeekForPrev(key)
- }
-}
-
-// Key implements y.Interface
-func (s *UniIterator) Key() []byte { return s.iter.Key() }
-
-// Value implements y.Interface
-func (s *UniIterator) Value() y.ValueStruct { return s.iter.Value() }
-
-// Valid implements y.Interface
-func (s *UniIterator) Valid() bool { return s.iter.Valid() }
-
-// Close implements y.Interface (and frees up the iter's resources)
-func (s *UniIterator) Close() error { return s.iter.Close() }
diff --git a/vendor/github.com/dgraph-io/badger/stream.go b/vendor/github.com/dgraph-io/badger/stream.go
deleted file mode 100644
index d89a4af8..00000000
--- a/vendor/github.com/dgraph-io/badger/stream.go
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Copyright 2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "context"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/y"
- humanize "github.com/dustin/go-humanize"
- "github.com/golang/protobuf/proto"
-)
-
-const pageSize = 4 << 20 // 4MB
-
-// Stream provides a framework to concurrently iterate over a snapshot of Badger, pick up
-// key-values, batch them up and call Send. Stream does concurrent iteration over many smaller key
-// ranges. It does NOT send keys in lexicographical sorted order. To get keys in sorted
-// order, use Iterator.
-type Stream struct {
- // Prefix to only iterate over certain range of keys. If set to nil (default), Stream would
- // iterate over the entire DB.
- Prefix []byte
-
- // Number of goroutines to use for iterating over key ranges. Defaults to 16.
- NumGo int
-
- // Badger would produce log entries in Infof to indicate the progress of Stream. LogPrefix can
- // be used to help differentiate them from other activities. Default is "Badger.Stream".
- LogPrefix string
-
- // ChooseKey is invoked each time a new key is encountered. Note that this is not called
- // on every version of the value, only the first encountered version (i.e. the highest version
- // of the value a key has). ChooseKey can be left nil to select all keys.
- //
- // Note: Calls to ChooseKey are concurrent.
- ChooseKey func(item *Item) bool
-
- // KeyToList, similar to ChooseKey, is only invoked on the highest version of the value. It
- // is upto the caller to iterate over the versions and generate zero, one or more KVs. It
- // is expected that the user would advance the iterator to go through the versions of the
- // values. However, the user MUST immediately return from this function on the first encounter
- // with a mismatching key. See example usage in ToList function. Can be left nil to use ToList
- // function by default.
- //
- // Note: Calls to KeyToList are concurrent.
- KeyToList func(key []byte, itr *Iterator) (*pb.KVList, error)
-
- // This is the method where Stream sends the final output. All calls to Send are done by a
- // single goroutine, i.e. logic within Send method can expect single threaded execution.
- Send func(*pb.KVList) error
-
- readTs uint64
- db *DB
- rangeCh chan keyRange
- kvChan chan *pb.KVList
- nextStreamId uint32
-}
-
-// ToList is a default implementation of KeyToList. It picks up all valid versions of the key,
-// skipping over deleted or expired keys.
-func (st *Stream) ToList(key []byte, itr *Iterator) (*pb.KVList, error) {
- list := &pb.KVList{}
- for ; itr.Valid(); itr.Next() {
- item := itr.Item()
- if item.IsDeletedOrExpired() {
- break
- }
- if !bytes.Equal(key, item.Key()) {
- // Break out on the first encounter with another key.
- break
- }
-
- valCopy, err := item.ValueCopy(nil)
- if err != nil {
- return nil, err
- }
- kv := &pb.KV{
- Key: item.KeyCopy(nil),
- Value: valCopy,
- UserMeta: []byte{item.UserMeta()},
- Version: item.Version(),
- ExpiresAt: item.ExpiresAt(),
- }
- list.Kv = append(list.Kv, kv)
- if st.db.opt.NumVersionsToKeep == 1 {
- break
- }
-
- if item.DiscardEarlierVersions() {
- break
- }
- }
- return list, nil
-}
-
-// keyRange is [start, end), including start, excluding end. Do ensure that the start,
-// end byte slices are owned by keyRange struct.
-func (st *Stream) produceRanges(ctx context.Context) {
- splits := st.db.KeySplits(st.Prefix)
-
- // We don't need to create more key ranges than NumGo goroutines. This way, we will have limited
- // number of "streams" coming out, which then helps limit the memory used by SSWriter.
- {
- pickEvery := int(math.Floor(float64(len(splits)) / float64(st.NumGo)))
- if pickEvery < 1 {
- pickEvery = 1
- }
- filtered := splits[:0]
- for i, split := range splits {
- if (i+1)%pickEvery == 0 {
- filtered = append(filtered, split)
- }
- }
- splits = filtered
- }
-
- start := y.SafeCopy(nil, st.Prefix)
- for _, key := range splits {
- st.rangeCh <- keyRange{left: start, right: y.SafeCopy(nil, []byte(key))}
- start = y.SafeCopy(nil, []byte(key))
- }
- // Edge case: prefix is empty and no splits exist. In that case, we should have at least one
- // keyRange output.
- st.rangeCh <- keyRange{left: start}
- close(st.rangeCh)
-}
-
-// produceKVs picks up ranges from rangeCh, generates KV lists and sends them to kvChan.
-func (st *Stream) produceKVs(ctx context.Context) error {
- var size int
- var txn *Txn
- if st.readTs > 0 {
- txn = st.db.NewTransactionAt(st.readTs, false)
- } else {
- txn = st.db.NewTransaction(false)
- }
- defer txn.Discard()
-
- iterate := func(kr keyRange) error {
- iterOpts := DefaultIteratorOptions
- iterOpts.AllVersions = true
- iterOpts.Prefix = st.Prefix
- iterOpts.PrefetchValues = false
- itr := txn.NewIterator(iterOpts)
- defer itr.Close()
-
- // This unique stream id is used to identify all the keys from this iteration.
- streamId := atomic.AddUint32(&st.nextStreamId, 1)
-
- outList := new(pb.KVList)
- var prevKey []byte
- for itr.Seek(kr.left); itr.Valid(); {
- // it.Valid would only return true for keys with the provided Prefix in iterOpts.
- item := itr.Item()
- if bytes.Equal(item.Key(), prevKey) {
- itr.Next()
- continue
- }
- prevKey = append(prevKey[:0], item.Key()...)
-
- // Check if we reached the end of the key range.
- if len(kr.right) > 0 && bytes.Compare(item.Key(), kr.right) >= 0 {
- break
- }
- // Check if we should pick this key.
- if st.ChooseKey != nil && !st.ChooseKey(item) {
- continue
- }
-
- // Now convert to key value.
- list, err := st.KeyToList(item.KeyCopy(nil), itr)
- if err != nil {
- return err
- }
- if list == nil || len(list.Kv) == 0 {
- continue
- }
- outList.Kv = append(outList.Kv, list.Kv...)
- size += proto.Size(list)
- if size >= pageSize {
- for _, kv := range outList.Kv {
- kv.StreamId = streamId
- }
- select {
- case st.kvChan <- outList:
- case <-ctx.Done():
- return ctx.Err()
- }
- outList = new(pb.KVList)
- size = 0
- }
- }
- if len(outList.Kv) > 0 {
- for _, kv := range outList.Kv {
- kv.StreamId = streamId
- }
- // TODO: Think of a way to indicate that a stream is over.
- select {
- case st.kvChan <- outList:
- case <-ctx.Done():
- return ctx.Err()
- }
- }
- return nil
- }
-
- for {
- select {
- case kr, ok := <-st.rangeCh:
- if !ok {
- // Done with the keys.
- return nil
- }
- if err := iterate(kr); err != nil {
- return err
- }
- case <-ctx.Done():
- return ctx.Err()
- }
- }
-}
-
-func (st *Stream) streamKVs(ctx context.Context) error {
- var count int
- var bytesSent uint64
- t := time.NewTicker(time.Second)
- defer t.Stop()
- now := time.Now()
-
- slurp := func(batch *pb.KVList) error {
- loop:
- for {
- select {
- case kvs, ok := <-st.kvChan:
- if !ok {
- break loop
- }
- y.AssertTrue(kvs != nil)
- batch.Kv = append(batch.Kv, kvs.Kv...)
- default:
- break loop
- }
- }
- sz := uint64(proto.Size(batch))
- bytesSent += sz
- count += len(batch.Kv)
- t := time.Now()
- if err := st.Send(batch); err != nil {
- return err
- }
- st.db.opt.Infof("%s Created batch of size: %s in %s.\n",
- st.LogPrefix, humanize.Bytes(sz), time.Since(t))
- return nil
- }
-
-outer:
- for {
- var batch *pb.KVList
- select {
- case <-ctx.Done():
- return ctx.Err()
-
- case <-t.C:
- dur := time.Since(now)
- durSec := uint64(dur.Seconds())
- if durSec == 0 {
- continue
- }
- speed := bytesSent / durSec
- st.db.opt.Infof("%s Time elapsed: %s, bytes sent: %s, speed: %s/sec\n", st.LogPrefix,
- y.FixedDuration(dur), humanize.Bytes(bytesSent), humanize.Bytes(speed))
-
- case kvs, ok := <-st.kvChan:
- if !ok {
- break outer
- }
- y.AssertTrue(kvs != nil)
- batch = kvs
- if err := slurp(batch); err != nil {
- return err
- }
- }
- }
-
- st.db.opt.Infof("%s Sent %d keys\n", st.LogPrefix, count)
- return nil
-}
-
-// Orchestrate runs Stream. It picks up ranges from the SSTables, then runs NumGo number of
-// goroutines to iterate over these ranges and batch up KVs in lists. It concurrently runs a single
-// goroutine to pick these lists, batch them up further and send to Output.Send. Orchestrate also
-// spits logs out to Infof, using provided LogPrefix. Note that all calls to Output.Send
-// are serial. In case any of these steps encounter an error, Orchestrate would stop execution and
-// return that error. Orchestrate can be called multiple times, but in serial order.
-func (st *Stream) Orchestrate(ctx context.Context) error {
- st.rangeCh = make(chan keyRange, 3) // Contains keys for posting lists.
-
- // kvChan should only have a small capacity to ensure that we don't buffer up too much data if
- // sending is slow. Page size is set to 4MB, which is used to lazily cap the size of each
- // KVList. To get 128MB buffer, we can set the channel size to 32.
- st.kvChan = make(chan *pb.KVList, 32)
-
- if st.KeyToList == nil {
- st.KeyToList = st.ToList
- }
-
- // Picks up ranges from Badger, and sends them to rangeCh.
- go st.produceRanges(ctx)
-
- errCh := make(chan error, 1) // Stores error by consumeKeys.
- var wg sync.WaitGroup
- for i := 0; i < st.NumGo; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- // Picks up ranges from rangeCh, generates KV lists, and sends them to kvChan.
- if err := st.produceKVs(ctx); err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }()
- }
-
- // Pick up key-values from kvChan and send to stream.
- kvErr := make(chan error, 1)
- go func() {
- // Picks up KV lists from kvChan, and sends them to Output.
- kvErr <- st.streamKVs(ctx)
- }()
- wg.Wait() // Wait for produceKVs to be over.
- close(st.kvChan) // Now we can close kvChan.
-
- select {
- case err := <-errCh: // Check error from produceKVs.
- return err
- default:
- }
-
- // Wait for key streaming to be over.
- err := <-kvErr
- return err
-}
-
-func (db *DB) newStream() *Stream {
- return &Stream{db: db, NumGo: 16, LogPrefix: "Badger.Stream"}
-}
-
-// NewStream creates a new Stream.
-func (db *DB) NewStream() *Stream {
- if db.opt.managedTxns {
- panic("This API can not be called in managed mode.")
- }
- return db.newStream()
-}
-
-// NewStreamAt creates a new Stream at a particular timestamp. Should only be used with managed DB.
-func (db *DB) NewStreamAt(readTs uint64) *Stream {
- if !db.opt.managedTxns {
- panic("This API can only be called in managed mode.")
- }
- stream := db.newStream()
- stream.readTs = readTs
- return stream
-}
diff --git a/vendor/github.com/dgraph-io/badger/stream_writer.go b/vendor/github.com/dgraph-io/badger/stream_writer.go
deleted file mode 100644
index 46dd3805..00000000
--- a/vendor/github.com/dgraph-io/badger/stream_writer.go
+++ /dev/null
@@ -1,439 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "math"
- "sync"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
- humanize "github.com/dustin/go-humanize"
- "github.com/pkg/errors"
-)
-
-const headStreamId uint32 = math.MaxUint32
-
-// StreamWriter is used to write data coming from multiple streams. The streams must not have any
-// overlapping key ranges. Within each stream, the keys must be sorted. Badger Stream framework is
-// capable of generating such an output. So, this StreamWriter can be used at the other end to build
-// BadgerDB at a much faster pace by writing SSTables (and value logs) directly to LSM tree levels
-// without causing any compactions at all. This is way faster than using batched writer or using
-// transactions, but only applicable in situations where the keys are pre-sorted and the DB is being
-// bootstrapped. Existing data would get deleted when using this writer. So, this is only useful
-// when restoring from backup or replicating DB across servers.
-//
-// StreamWriter should not be called on in-use DB instances. It is designed only to bootstrap new
-// DBs.
-type StreamWriter struct {
- writeLock sync.Mutex
- db *DB
- done func()
- throttle *y.Throttle
- maxVersion uint64
- writers map[uint32]*sortedWriter
- maxHead valuePointer
-}
-
-// NewStreamWriter creates a StreamWriter. Right after creating StreamWriter, Prepare must be
-// called. The memory usage of a StreamWriter is directly proportional to the number of streams
-// possible. So, efforts must be made to keep the number of streams low. Stream framework would
-// typically use 16 goroutines and hence create 16 streams.
-func (db *DB) NewStreamWriter() *StreamWriter {
- return &StreamWriter{
- db: db,
- // throttle shouldn't make much difference. Memory consumption is based on the number of
- // concurrent streams being processed.
- throttle: y.NewThrottle(16),
- writers: make(map[uint32]*sortedWriter),
- }
-}
-
-// Prepare should be called before writing any entry to StreamWriter. It deletes all data present in
-// existing DB, stops compactions and any writes being done by other means. Be very careful when
-// calling Prepare, because it could result in permanent data loss. Not calling Prepare would result
-// in a corrupt Badger instance.
-func (sw *StreamWriter) Prepare() error {
- sw.writeLock.Lock()
- defer sw.writeLock.Unlock()
-
- var err error
- sw.done, err = sw.db.dropAll()
- return err
-}
-
-// Write writes KVList to DB. Each KV within the list contains the stream id which StreamWriter
-// would use to demux the writes. Write is thread safe and can be called concurrently by mulitple
-// goroutines.
-func (sw *StreamWriter) Write(kvs *pb.KVList) error {
- if len(kvs.GetKv()) == 0 {
- return nil
- }
-
- // closedStreams keeps track of all streams which are going to be marked as done. We are
- // keeping track of all streams so that we can close them at the end, after inserting all
- // the valid kvs.
- closedStreams := make(map[uint32]struct{})
- streamReqs := make(map[uint32]*request)
- for _, kv := range kvs.Kv {
- if kv.StreamDone {
- closedStreams[kv.StreamId] = struct{}{}
- continue
- }
-
- // Panic if some kv comes after stream has been marked as closed.
- if _, ok := closedStreams[kv.StreamId]; ok {
- panic(fmt.Sprintf("write performed on closed stream: %d", kv.StreamId))
- }
-
- var meta, userMeta byte
- if len(kv.Meta) > 0 {
- meta = kv.Meta[0]
- }
- if len(kv.UserMeta) > 0 {
- userMeta = kv.UserMeta[0]
- }
- if sw.maxVersion < kv.Version {
- sw.maxVersion = kv.Version
- }
- e := &Entry{
- Key: y.KeyWithTs(kv.Key, kv.Version),
- Value: kv.Value,
- UserMeta: userMeta,
- ExpiresAt: kv.ExpiresAt,
- meta: meta,
- }
- // If the value can be collocated with the key in LSM tree, we can skip
- // writing the value to value log.
- e.skipVlog = sw.db.shouldWriteValueToLSM(*e)
- req := streamReqs[kv.StreamId]
- if req == nil {
- req = &request{}
- streamReqs[kv.StreamId] = req
- }
- req.Entries = append(req.Entries, e)
- }
- all := make([]*request, 0, len(streamReqs))
- for _, req := range streamReqs {
- all = append(all, req)
- }
-
- sw.writeLock.Lock()
- defer sw.writeLock.Unlock()
-
- // We are writing all requests to vlog even if some request belongs to already closed stream.
- // It is safe to do because we are panicking while writing to sorted writer, which will be nil
- // for closed stream. At restart, stream writer will drop all the data in Prepare function.
- if err := sw.db.vlog.write(all); err != nil {
- return err
- }
-
- for streamId, req := range streamReqs {
- writer, ok := sw.writers[streamId]
- if !ok {
- writer = sw.newWriter(streamId)
- sw.writers[streamId] = writer
- }
-
- if writer == nil {
- panic(fmt.Sprintf("write performed on closed stream: %d", streamId))
- }
-
- writer.reqCh <- req
- }
-
- // Now we can close any streams if required. We will make writer for
- // the closed streams as nil.
- for streamId := range closedStreams {
- writer, ok := sw.writers[streamId]
- if !ok {
- sw.db.opt.Logger.Warningf("Trying to close stream: %d, but no sorted "+
- "writer found for it", streamId)
- continue
- }
-
- writer.closer.SignalAndWait()
- if err := writer.Done(); err != nil {
- return err
- }
-
- if sw.maxHead.Less(writer.head) {
- sw.maxHead = writer.head
- }
-
- sw.writers[streamId] = nil
- }
- return nil
-}
-
-// Flush is called once we are done writing all the entries. It syncs DB directories. It also
-// updates Oracle with maxVersion found in all entries (if DB is not managed).
-func (sw *StreamWriter) Flush() error {
- sw.writeLock.Lock()
- defer sw.writeLock.Unlock()
-
- defer sw.done()
-
- for _, writer := range sw.writers {
- if writer != nil {
- writer.closer.SignalAndWait()
- }
- }
-
- for _, writer := range sw.writers {
- if writer == nil {
- continue
- }
- if err := writer.Done(); err != nil {
- return err
- }
- if sw.maxHead.Less(writer.head) {
- sw.maxHead = writer.head
- }
- }
-
- // Encode and write the value log head into a new table.
- data := make([]byte, vptrSize)
- data = sw.maxHead.Encode(data)
- headWriter := sw.newWriter(headStreamId)
- if err := headWriter.Add(
- y.KeyWithTs(head, sw.maxVersion),
- y.ValueStruct{Value: data}); err != nil {
- return err
- }
- if err := headWriter.Done(); err != nil {
- return err
- }
-
- if !sw.db.opt.managedTxns {
- if sw.db.orc != nil {
- sw.db.orc.Stop()
- }
- sw.db.orc = newOracle(sw.db.opt)
- sw.db.orc.nextTxnTs = sw.maxVersion
- sw.db.orc.txnMark.Done(sw.maxVersion)
- sw.db.orc.readMark.Done(sw.maxVersion)
- sw.db.orc.incrementNextTs()
- }
-
- // Wait for all files to be written.
- if err := sw.throttle.Finish(); err != nil {
- return err
- }
-
- // Sort tables at the end.
- for _, l := range sw.db.lc.levels {
- l.sortTables()
- }
-
- // Now sync the directories, so all the files are registered.
- if sw.db.opt.ValueDir != sw.db.opt.Dir {
- if err := syncDir(sw.db.opt.ValueDir); err != nil {
- return err
- }
- }
- if err := syncDir(sw.db.opt.Dir); err != nil {
- return err
- }
- return sw.db.lc.validate()
-}
-
-type sortedWriter struct {
- db *DB
- throttle *y.Throttle
-
- builder *table.Builder
- lastKey []byte
- streamId uint32
- reqCh chan *request
- head valuePointer
- // Have separate closer for each writer, as it can be closed at any time.
- closer *y.Closer
-}
-
-func (sw *StreamWriter) newWriter(streamId uint32) *sortedWriter {
- w := &sortedWriter{
- db: sw.db,
- streamId: streamId,
- throttle: sw.throttle,
- builder: table.NewTableBuilder(),
- reqCh: make(chan *request, 3),
- closer: y.NewCloser(1),
- }
-
- go w.handleRequests()
- return w
-}
-
-// ErrUnsortedKey is returned when any out of order key arrives at sortedWriter during call to Add.
-var ErrUnsortedKey = errors.New("Keys not in sorted order")
-
-func (w *sortedWriter) handleRequests() {
- defer w.closer.Done()
-
- process := func(req *request) {
- for i, e := range req.Entries {
- vptr := req.Ptrs[i]
- if !vptr.IsZero() {
- y.AssertTrue(w.head.Less(vptr))
- w.head = vptr
- }
-
- var vs y.ValueStruct
- if e.skipVlog {
- vs = y.ValueStruct{
- Value: e.Value,
- Meta: e.meta,
- UserMeta: e.UserMeta,
- ExpiresAt: e.ExpiresAt,
- }
- } else {
- vbuf := make([]byte, vptrSize)
- vs = y.ValueStruct{
- Value: vptr.Encode(vbuf),
- Meta: e.meta | bitValuePointer,
- UserMeta: e.UserMeta,
- ExpiresAt: e.ExpiresAt,
- }
- }
- if err := w.Add(e.Key, vs); err != nil {
- panic(err)
- }
- }
- }
-
- for {
- select {
- case req := <-w.reqCh:
- process(req)
- case <-w.closer.HasBeenClosed():
- close(w.reqCh)
- for req := range w.reqCh {
- process(req)
- }
- return
- }
- }
-}
-
-// Add adds key and vs to sortedWriter.
-func (w *sortedWriter) Add(key []byte, vs y.ValueStruct) error {
- if len(w.lastKey) > 0 && y.CompareKeys(key, w.lastKey) <= 0 {
- return ErrUnsortedKey
- }
-
- sameKey := y.SameKey(key, w.lastKey)
- // Same keys should go into the same SSTable.
- if !sameKey && w.builder.ReachedCapacity(w.db.opt.MaxTableSize) {
- if err := w.send(false); err != nil {
- return err
- }
- }
-
- w.lastKey = y.SafeCopy(w.lastKey, key)
- w.builder.Add(key, vs)
- return nil
-}
-
-func (w *sortedWriter) send(done bool) error {
- if err := w.throttle.Do(); err != nil {
- return err
- }
- go func(builder *table.Builder) {
- data := builder.Finish()
- err := w.createTable(data)
- w.throttle.Done(err)
- }(w.builder)
- w.builder = table.NewTableBuilder()
- return nil
-}
-
-// Done is called once we are done writing all keys and valueStructs
-// to sortedWriter. It completes writing current SST to disk.
-func (w *sortedWriter) Done() error {
- if w.builder.Empty() {
- // Assign builder as nil, so that underlying memory can be garbage collected.
- w.builder = nil
- return nil
- }
-
- return w.send(true)
-}
-
-func (w *sortedWriter) createTable(data []byte) error {
- if len(data) == 0 {
- return nil
- }
- fileID := w.db.lc.reserveFileID()
- fd, err := y.CreateSyncedFile(table.NewFilename(fileID, w.db.opt.Dir), true)
- if err != nil {
- return err
- }
- if _, err := fd.Write(data); err != nil {
- return err
- }
- tbl, err := table.OpenTable(fd, w.db.opt.TableLoadingMode, nil)
- if err != nil {
- return err
- }
- lc := w.db.lc
-
- var lhandler *levelHandler
- // We should start the levels from 1, because we need level 0 to set the !badger!head key. We
- // cannot mix up this key with other keys from the DB, otherwise we would introduce a range
- // overlap violation.
- y.AssertTrue(len(lc.levels) > 1)
- for _, l := range lc.levels[1:] {
- ratio := float64(l.getTotalSize()) / float64(l.maxTotalSize)
- if ratio < 1.0 {
- lhandler = l
- break
- }
- }
- if lhandler == nil {
- // If we're exceeding the size of the lowest level, shove it in the lowest level. Can't do
- // better than that.
- lhandler = lc.levels[len(lc.levels)-1]
- }
- if w.streamId == headStreamId {
- // This is a special !badger!head key. We should store it at level 0, separate from all the
- // other keys to avoid an overlap.
- lhandler = lc.levels[0]
- }
- // Now that table can be opened successfully, let's add this to the MANIFEST.
- change := &pb.ManifestChange{
- Id: tbl.ID(),
- Op: pb.ManifestChange_CREATE,
- Level: uint32(lhandler.level),
- Checksum: tbl.Checksum,
- }
- if err := w.db.manifest.addChanges([]*pb.ManifestChange{change}); err != nil {
- return err
- }
-
- // We are not calling lhandler.replaceTables() here, as it sorts tables on every addition.
- // We can sort all tables only once during Flush() call.
- lhandler.addTable(tbl)
-
- // Release the ref held by OpenTable.
- _ = tbl.DecrRef()
- w.db.opt.Infof("Table created: %d at level: %d for stream: %d. Size: %s\n",
- fileID, lhandler.level, w.streamId, humanize.Bytes(uint64(tbl.Size())))
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/structs.go b/vendor/github.com/dgraph-io/badger/structs.go
deleted file mode 100644
index 51d16cdb..00000000
--- a/vendor/github.com/dgraph-io/badger/structs.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package badger
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "hash/crc32"
- "time"
-
- "github.com/dgraph-io/badger/y"
-)
-
-type valuePointer struct {
- Fid uint32
- Len uint32
- Offset uint32
-}
-
-func (p valuePointer) Less(o valuePointer) bool {
- if p.Fid != o.Fid {
- return p.Fid < o.Fid
- }
- if p.Offset != o.Offset {
- return p.Offset < o.Offset
- }
- return p.Len < o.Len
-}
-
-func (p valuePointer) IsZero() bool {
- return p.Fid == 0 && p.Offset == 0 && p.Len == 0
-}
-
-const vptrSize = 12
-
-// Encode encodes Pointer into byte buffer.
-func (p valuePointer) Encode(b []byte) []byte {
- binary.BigEndian.PutUint32(b[:4], p.Fid)
- binary.BigEndian.PutUint32(b[4:8], p.Len)
- binary.BigEndian.PutUint32(b[8:12], p.Offset)
- return b[:vptrSize]
-}
-
-func (p *valuePointer) Decode(b []byte) {
- p.Fid = binary.BigEndian.Uint32(b[:4])
- p.Len = binary.BigEndian.Uint32(b[4:8])
- p.Offset = binary.BigEndian.Uint32(b[8:12])
-}
-
-// header is used in value log as a header before Entry.
-type header struct {
- klen uint32
- vlen uint32
- expiresAt uint64
- meta byte
- userMeta byte
-}
-
-const (
- headerBufSize = 18
-)
-
-func (h header) Encode(out []byte) {
- y.AssertTrue(len(out) >= headerBufSize)
- binary.BigEndian.PutUint32(out[0:4], h.klen)
- binary.BigEndian.PutUint32(out[4:8], h.vlen)
- binary.BigEndian.PutUint64(out[8:16], h.expiresAt)
- out[16] = h.meta
- out[17] = h.userMeta
-}
-
-// Decodes h from buf.
-func (h *header) Decode(buf []byte) {
- h.klen = binary.BigEndian.Uint32(buf[0:4])
- h.vlen = binary.BigEndian.Uint32(buf[4:8])
- h.expiresAt = binary.BigEndian.Uint64(buf[8:16])
- h.meta = buf[16]
- h.userMeta = buf[17]
-}
-
-// Entry provides Key, Value, UserMeta and ExpiresAt. This struct can be used by
-// the user to set data.
-type Entry struct {
- Key []byte
- Value []byte
- UserMeta byte
- ExpiresAt uint64 // time.Unix
- meta byte
-
- // Fields maintained internally.
- offset uint32
- skipVlog bool
-}
-
-func (e *Entry) estimateSize(threshold int) int {
- if len(e.Value) < threshold {
- return len(e.Key) + len(e.Value) + 2 // Meta, UserMeta
- }
- return len(e.Key) + 12 + 2 // 12 for ValuePointer, 2 for metas.
-}
-
-// Encodes e to buf. Returns number of bytes written.
-func encodeEntry(e *Entry, buf *bytes.Buffer) (int, error) {
- h := header{
- klen: uint32(len(e.Key)),
- vlen: uint32(len(e.Value)),
- expiresAt: e.ExpiresAt,
- meta: e.meta,
- userMeta: e.UserMeta,
- }
-
- var headerEnc [headerBufSize]byte
- h.Encode(headerEnc[:])
-
- hash := crc32.New(y.CastagnoliCrcTable)
-
- buf.Write(headerEnc[:])
- if _, err := hash.Write(headerEnc[:]); err != nil {
- return 0, err
- }
-
- buf.Write(e.Key)
- if _, err := hash.Write(e.Key); err != nil {
- return 0, err
- }
-
- buf.Write(e.Value)
- if _, err := hash.Write(e.Value); err != nil {
- return 0, err
- }
-
- var crcBuf [crc32.Size]byte
- binary.BigEndian.PutUint32(crcBuf[:], hash.Sum32())
- buf.Write(crcBuf[:])
-
- return len(headerEnc) + len(e.Key) + len(e.Value) + len(crcBuf), nil
-}
-
-func (e Entry) print(prefix string) {
- fmt.Printf("%s Key: %s Meta: %d UserMeta: %d Offset: %d len(val)=%d",
- prefix, e.Key, e.meta, e.UserMeta, e.offset, len(e.Value))
-}
-
-// NewEntry creates a new entry with key and value passed in args. This newly created entry can be
-// set in a transaction by calling txn.SetEntry(). All other properties of Entry can be set by
-// calling WithMeta, WithDiscard, WithTTL methods on it.
-// This function uses key and value reference, hence users must
-// not modify key and value until the end of transaction.
-func NewEntry(key, value []byte) *Entry {
- return &Entry{
- Key: key,
- Value: value,
- }
-}
-
-// WithMeta adds meta data to Entry e. This byte is stored alongside the key
-// and can be used as an aid to interpret the value or store other contextual
-// bits corresponding to the key-value pair of entry.
-func (e *Entry) WithMeta(meta byte) *Entry {
- e.UserMeta = meta
- return e
-}
-
-// WithDiscard adds a marker to Entry e. This means all the previous versions of the key (of the
-// Entry) will be eligible for garbage collection.
-// This method is only useful if you have set a higher limit for options.NumVersionsToKeep. The
-// default setting is 1, in which case, this function doesn't add any more benefit. If however, you
-// have a higher setting for NumVersionsToKeep (in Dgraph, we set it to infinity), you can use this
-// method to indicate that all the older versions can be discarded and removed during compactions.
-func (e *Entry) WithDiscard() *Entry {
- e.meta = bitDiscardEarlierVersions
- return e
-}
-
-// WithTTL adds time to live duration to Entry e. Entry stored with a TTL would automatically expire
-// after the time has elapsed, and will be eligible for garbage collection.
-func (e *Entry) WithTTL(dur time.Duration) *Entry {
- e.ExpiresAt = uint64(time.Now().Add(dur).Unix())
- return e
-}
-
-// withMergeBit sets merge bit in entry's metadata. This
-// function is called by MergeOperator's Add method.
-func (e *Entry) withMergeBit() *Entry {
- e.meta = bitMergeEntry
- return e
-}
diff --git a/vendor/github.com/dgraph-io/badger/table/README.md b/vendor/github.com/dgraph-io/badger/table/README.md
deleted file mode 100644
index a784f126..00000000
--- a/vendor/github.com/dgraph-io/badger/table/README.md
+++ /dev/null
@@ -1,69 +0,0 @@
-Size of table is 122,173,606 bytes for all benchmarks.
-
-# BenchmarkRead
-```
-$ go test -bench ^BenchmarkRead$ -run ^$ -count 3
-goos: linux
-goarch: amd64
-pkg: github.com/dgraph-io/badger/table
-BenchmarkRead-16 10 153281932 ns/op
-BenchmarkRead-16 10 153454443 ns/op
-BenchmarkRead-16 10 155349696 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 23.549s
-```
-
-Size of table is 122,173,606 bytes, which is ~117MB.
-
-The rate is ~750MB/s using LoadToRAM (when table is in RAM).
-
-To read a 64MB table, this would take ~0.0853s, which is negligible.
-
-# BenchmarkReadAndBuild
-```go
-$ go test -bench BenchmarkReadAndBuild -run ^$ -count 3
-goos: linux
-goarch: amd64
-pkg: github.com/dgraph-io/badger/table
-BenchmarkReadAndBuild-16 2 945041628 ns/op
-BenchmarkReadAndBuild-16 2 947120893 ns/op
-BenchmarkReadAndBuild-16 2 954909506 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 26.856s
-```
-
-The rate is ~122MB/s. To build a 64MB table, this would take ~0.52s. Note that this
-does NOT include the flushing of the table to disk. All we are doing above is
-reading one table (which is in RAM) and write one table in memory.
-
-The table building takes 0.52-0.0853s ~ 0.4347s.
-
-# BenchmarkReadMerged
-Below, we merge 5 tables. The total size remains unchanged at ~122M.
-
-```go
-$ go test -bench ReadMerged -run ^$ -count 3
-BenchmarkReadMerged-16 2 954475788 ns/op
-BenchmarkReadMerged-16 2 955252462 ns/op
-BenchmarkReadMerged-16 2 956857353 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 33.327s
-```
-
-The rate is ~122MB/s. To read a 64MB table using merge iterator, this would take ~0.52s.
-
-# BenchmarkRandomRead
-
-```go
-go test -bench BenchmarkRandomRead$ -run ^$ -count 3
-goos: linux
-goarch: amd64
-pkg: github.com/dgraph-io/badger/table
-BenchmarkRandomRead-16 300000 3596 ns/op
-BenchmarkRandomRead-16 300000 3621 ns/op
-BenchmarkRandomRead-16 300000 3596 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 44.727s
-```
-
-For random read benchmarking, we are randomly reading a key and verifying its value.
diff --git a/vendor/github.com/dgraph-io/badger/table/builder.go b/vendor/github.com/dgraph-io/badger/table/builder.go
deleted file mode 100644
index f9773bab..00000000
--- a/vendor/github.com/dgraph-io/badger/table/builder.go
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package table
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "math"
-
- "github.com/AndreasBriese/bbloom"
- "github.com/dgraph-io/badger/y"
-)
-
-var (
- restartInterval = 100 // Might want to change this to be based on total size instead of numKeys.
-)
-
-func newBuffer(sz int) *bytes.Buffer {
- b := new(bytes.Buffer)
- b.Grow(sz)
- return b
-}
-
-type header struct {
- plen uint16 // Overlap with base key.
- klen uint16 // Length of the diff.
- vlen uint16 // Length of value.
- prev uint32 // Offset for the previous key-value pair. The offset is relative to block base offset.
-}
-
-// Encode encodes the header.
-func (h header) Encode(b []byte) {
- binary.BigEndian.PutUint16(b[0:2], h.plen)
- binary.BigEndian.PutUint16(b[2:4], h.klen)
- binary.BigEndian.PutUint16(b[4:6], h.vlen)
- binary.BigEndian.PutUint32(b[6:10], h.prev)
-}
-
-// Decode decodes the header.
-func (h *header) Decode(buf []byte) int {
- h.plen = binary.BigEndian.Uint16(buf[0:2])
- h.klen = binary.BigEndian.Uint16(buf[2:4])
- h.vlen = binary.BigEndian.Uint16(buf[4:6])
- h.prev = binary.BigEndian.Uint32(buf[6:10])
- return h.Size()
-}
-
-// Size returns size of the header. Currently it's just a constant.
-func (h header) Size() int { return 10 }
-
-// Builder is used in building a table.
-type Builder struct {
- counter int // Number of keys written for the current block.
-
- // Typically tens or hundreds of meg. This is for one single file.
- buf *bytes.Buffer
-
- baseKey []byte // Base key for the current block.
- baseOffset uint32 // Offset for the current block.
-
- restarts []uint32 // Base offsets of every block.
-
- // Tracks offset for the previous key-value pair. Offset is relative to block base offset.
- prevOffset uint32
-
- keyBuf *bytes.Buffer
- keyCount int
-}
-
-// NewTableBuilder makes a new TableBuilder.
-func NewTableBuilder() *Builder {
- return &Builder{
- keyBuf: newBuffer(1 << 20),
- buf: newBuffer(1 << 20),
- prevOffset: math.MaxUint32, // Used for the first element!
- }
-}
-
-// Close closes the TableBuilder.
-func (b *Builder) Close() {}
-
-// Empty returns whether it's empty.
-func (b *Builder) Empty() bool { return b.buf.Len() == 0 }
-
-// keyDiff returns a suffix of newKey that is different from b.baseKey.
-func (b Builder) keyDiff(newKey []byte) []byte {
- var i int
- for i = 0; i < len(newKey) && i < len(b.baseKey); i++ {
- if newKey[i] != b.baseKey[i] {
- break
- }
- }
- return newKey[i:]
-}
-
-func (b *Builder) addHelper(key []byte, v y.ValueStruct) {
- // Add key to bloom filter.
- if len(key) > 0 {
- var klen [2]byte
- keyNoTs := y.ParseKey(key)
- binary.BigEndian.PutUint16(klen[:], uint16(len(keyNoTs)))
- b.keyBuf.Write(klen[:])
- b.keyBuf.Write(keyNoTs)
- b.keyCount++
- }
-
- // diffKey stores the difference of key with baseKey.
- var diffKey []byte
- if len(b.baseKey) == 0 {
- // Make a copy. Builder should not keep references. Otherwise, caller has to be very careful
- // and will have to make copies of keys every time they add to builder, which is even worse.
- b.baseKey = append(b.baseKey[:0], key...)
- diffKey = key
- } else {
- diffKey = b.keyDiff(key)
- }
-
- h := header{
- plen: uint16(len(key) - len(diffKey)),
- klen: uint16(len(diffKey)),
- vlen: uint16(v.EncodedSize()),
- prev: b.prevOffset, // prevOffset is the location of the last key-value added.
- }
- b.prevOffset = uint32(b.buf.Len()) - b.baseOffset // Remember current offset for the next Add call.
-
- // Layout: header, diffKey, value.
- var hbuf [10]byte
- h.Encode(hbuf[:])
- b.buf.Write(hbuf[:])
- b.buf.Write(diffKey) // We only need to store the key difference.
-
- v.EncodeTo(b.buf)
- b.counter++ // Increment number of keys added for this current block.
-}
-
-func (b *Builder) finishBlock() {
- // When we are at the end of the block and Valid=false, and the user wants to do a Prev,
- // we need a dummy header to tell us the offset of the previous key-value pair.
- b.addHelper([]byte{}, y.ValueStruct{})
-}
-
-// Add adds a key-value pair to the block.
-// If doNotRestart is true, we will not restart even if b.counter >= restartInterval.
-func (b *Builder) Add(key []byte, value y.ValueStruct) {
- if b.counter >= restartInterval {
- b.finishBlock()
- // Start a new block. Initialize the block.
- b.restarts = append(b.restarts, uint32(b.buf.Len()))
- b.counter = 0
- b.baseKey = []byte{}
- b.baseOffset = uint32(b.buf.Len())
- b.prevOffset = math.MaxUint32 // First key-value pair of block has header.prev=MaxInt.
- }
- b.addHelper(key, value)
-}
-
-// TODO: vvv this was the comment on ReachedCapacity.
-// FinalSize returns the *rough* final size of the array, counting the header which is
-// not yet written.
-// TODO: Look into why there is a discrepancy. I suspect it is because of Write(empty, empty)
-// at the end. The diff can vary.
-
-// ReachedCapacity returns true if we... roughly (?) reached capacity?
-func (b *Builder) ReachedCapacity(cap int64) bool {
- estimateSz := b.buf.Len() + 8 /* empty header */ + 4*len(b.restarts) +
- 8 /* 8 = end of buf offset + len(restarts) */
- return int64(estimateSz) > cap
-}
-
-// blockIndex generates the block index for the table.
-// It is mainly a list of all the block base offsets.
-func (b *Builder) blockIndex() []byte {
- // Store the end offset, so we know the length of the final block.
- b.restarts = append(b.restarts, uint32(b.buf.Len()))
-
- // Add 4 because we want to write out number of restarts at the end.
- sz := 4*len(b.restarts) + 4
- out := make([]byte, sz)
- buf := out
- for _, r := range b.restarts {
- binary.BigEndian.PutUint32(buf[:4], r)
- buf = buf[4:]
- }
- binary.BigEndian.PutUint32(buf[:4], uint32(len(b.restarts)))
- return out
-}
-
-// Finish finishes the table by appending the index.
-func (b *Builder) Finish() []byte {
- bf := bbloom.New(float64(b.keyCount), 0.01)
- var klen [2]byte
- key := make([]byte, 1024)
- for {
- if _, err := b.keyBuf.Read(klen[:]); err == io.EOF {
- break
- } else if err != nil {
- y.Check(err)
- }
- kl := int(binary.BigEndian.Uint16(klen[:]))
- if cap(key) < kl {
- key = make([]byte, 2*int(kl)) // 2 * uint16 will overflow
- }
- key = key[:kl]
- y.Check2(b.keyBuf.Read(key))
- bf.Add(key)
- }
-
- b.finishBlock() // This will never start a new block.
- index := b.blockIndex()
- b.buf.Write(index)
-
- // Write bloom filter.
- bdata := bf.JSONMarshal()
- n, err := b.buf.Write(bdata)
- y.Check(err)
- var buf [4]byte
- binary.BigEndian.PutUint32(buf[:], uint32(n))
- b.buf.Write(buf[:])
-
- return b.buf.Bytes()
-}
diff --git a/vendor/github.com/dgraph-io/badger/table/iterator.go b/vendor/github.com/dgraph-io/badger/table/iterator.go
deleted file mode 100644
index c928540e..00000000
--- a/vendor/github.com/dgraph-io/badger/table/iterator.go
+++ /dev/null
@@ -1,557 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package table
-
-import (
- "bytes"
- "io"
- "math"
- "sort"
-
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-type blockIterator struct {
- data []byte
- pos uint32
- err error
- baseKey []byte
-
- key []byte
- val []byte
- init bool
-
- last header // The last header we saw.
-}
-
-func (itr *blockIterator) Reset() {
- itr.pos = 0
- itr.err = nil
- itr.baseKey = []byte{}
- itr.key = []byte{}
- itr.val = []byte{}
- itr.init = false
- itr.last = header{}
-}
-
-func (itr *blockIterator) Init() {
- if !itr.init {
- itr.Next()
- }
-}
-
-func (itr *blockIterator) Valid() bool {
- return itr != nil && itr.err == nil
-}
-
-func (itr *blockIterator) Error() error {
- return itr.err
-}
-
-func (itr *blockIterator) Close() {}
-
-var (
- origin = 0
- current = 1
-)
-
-// Seek brings us to the first block element that is >= input key.
-func (itr *blockIterator) Seek(key []byte, whence int) {
- itr.err = nil
-
- switch whence {
- case origin:
- itr.Reset()
- case current:
- }
-
- var done bool
- for itr.Init(); itr.Valid(); itr.Next() {
- k := itr.Key()
- if y.CompareKeys(k, key) >= 0 {
- // We are done as k is >= key.
- done = true
- break
- }
- }
- if !done {
- itr.err = io.EOF
- }
-}
-
-func (itr *blockIterator) SeekToFirst() {
- itr.err = nil
- itr.Init()
-}
-
-// SeekToLast brings us to the last element. Valid should return true.
-func (itr *blockIterator) SeekToLast() {
- itr.err = nil
- for itr.Init(); itr.Valid(); itr.Next() {
- }
- itr.Prev()
-}
-
-// parseKV would allocate a new byte slice for key and for value.
-func (itr *blockIterator) parseKV(h header) {
- if cap(itr.key) < int(h.plen+h.klen) {
- sz := int(h.plen) + int(h.klen) // Convert to int before adding to avoid uint16 overflow.
- itr.key = make([]byte, 2*sz)
- }
- itr.key = itr.key[:h.plen+h.klen]
- copy(itr.key, itr.baseKey[:h.plen])
- copy(itr.key[h.plen:], itr.data[itr.pos:itr.pos+uint32(h.klen)])
- itr.pos += uint32(h.klen)
-
- if itr.pos+uint32(h.vlen) > uint32(len(itr.data)) {
- itr.err = errors.Errorf("Value exceeded size of block: %d %d %d %d %v",
- itr.pos, h.klen, h.vlen, len(itr.data), h)
- return
- }
- itr.val = y.SafeCopy(itr.val, itr.data[itr.pos:itr.pos+uint32(h.vlen)])
- itr.pos += uint32(h.vlen)
-}
-
-func (itr *blockIterator) Next() {
- itr.init = true
- itr.err = nil
- if itr.pos >= uint32(len(itr.data)) {
- itr.err = io.EOF
- return
- }
-
- var h header
- itr.pos += uint32(h.Decode(itr.data[itr.pos:]))
- itr.last = h // Store the last header.
-
- if h.klen == 0 && h.plen == 0 {
- // Last entry in the table.
- itr.err = io.EOF
- return
- }
-
- // Populate baseKey if it isn't set yet. This would only happen for the first Next.
- if len(itr.baseKey) == 0 {
- // This should be the first Next() for this block. Hence, prefix length should be zero.
- y.AssertTrue(h.plen == 0)
- itr.baseKey = itr.data[itr.pos : itr.pos+uint32(h.klen)]
- }
- itr.parseKV(h)
-}
-
-func (itr *blockIterator) Prev() {
- if !itr.init {
- return
- }
- itr.err = nil
- if itr.last.prev == math.MaxUint32 {
- // This is the first element of the block!
- itr.err = io.EOF
- itr.pos = 0
- return
- }
-
- // Move back using current header's prev.
- itr.pos = itr.last.prev
-
- var h header
- y.AssertTruef(itr.pos < uint32(len(itr.data)), "%d %d", itr.pos, len(itr.data))
- itr.pos += uint32(h.Decode(itr.data[itr.pos:]))
- itr.parseKV(h)
- itr.last = h
-}
-
-func (itr *blockIterator) Key() []byte {
- if itr.err != nil {
- return nil
- }
- return itr.key
-}
-
-func (itr *blockIterator) Value() []byte {
- if itr.err != nil {
- return nil
- }
- return itr.val
-}
-
-// Iterator is an iterator for a Table.
-type Iterator struct {
- t *Table
- bpos int
- bi *blockIterator
- err error
-
- // Internally, Iterator is bidirectional. However, we only expose the
- // unidirectional functionality for now.
- reversed bool
-}
-
-// NewIterator returns a new iterator of the Table
-func (t *Table) NewIterator(reversed bool) *Iterator {
- t.IncrRef() // Important.
- ti := &Iterator{t: t, reversed: reversed}
- ti.next()
- return ti
-}
-
-// Close closes the iterator (and it must be called).
-func (itr *Iterator) Close() error {
- return itr.t.DecrRef()
-}
-
-func (itr *Iterator) reset() {
- itr.bpos = 0
- itr.err = nil
-}
-
-// Valid follows the y.Iterator interface
-func (itr *Iterator) Valid() bool {
- return itr.err == nil
-}
-
-func (itr *Iterator) seekToFirst() {
- numBlocks := len(itr.t.blockIndex)
- if numBlocks == 0 {
- itr.err = io.EOF
- return
- }
- itr.bpos = 0
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi = block.NewIterator()
- itr.bi.SeekToFirst()
- itr.err = itr.bi.Error()
-}
-
-func (itr *Iterator) seekToLast() {
- numBlocks := len(itr.t.blockIndex)
- if numBlocks == 0 {
- itr.err = io.EOF
- return
- }
- itr.bpos = numBlocks - 1
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi = block.NewIterator()
- itr.bi.SeekToLast()
- itr.err = itr.bi.Error()
-}
-
-func (itr *Iterator) seekHelper(blockIdx int, key []byte) {
- itr.bpos = blockIdx
- block, err := itr.t.block(blockIdx)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi = block.NewIterator()
- itr.bi.Seek(key, origin)
- itr.err = itr.bi.Error()
-}
-
-// seekFrom brings us to a key that is >= input key.
-func (itr *Iterator) seekFrom(key []byte, whence int) {
- itr.err = nil
- switch whence {
- case origin:
- itr.reset()
- case current:
- }
-
- idx := sort.Search(len(itr.t.blockIndex), func(idx int) bool {
- ko := itr.t.blockIndex[idx]
- return y.CompareKeys(ko.key, key) > 0
- })
- if idx == 0 {
- // The smallest key in our table is already strictly > key. We can return that.
- // This is like a SeekToFirst.
- itr.seekHelper(0, key)
- return
- }
-
- // block[idx].smallest is > key.
- // Since idx>0, we know block[idx-1].smallest is <= key.
- // There are two cases.
- // 1) Everything in block[idx-1] is strictly < key. In this case, we should go to the first
- // element of block[idx].
- // 2) Some element in block[idx-1] is >= key. We should go to that element.
- itr.seekHelper(idx-1, key)
- if itr.err == io.EOF {
- // Case 1. Need to visit block[idx].
- if idx == len(itr.t.blockIndex) {
- // If idx == len(itr.t.blockIndex), then input key is greater than ANY element of table.
- // There's nothing we can do. Valid() should return false as we seek to end of table.
- return
- }
- // Since block[idx].smallest is > key. This is essentially a block[idx].SeekToFirst.
- itr.seekHelper(idx, key)
- }
- // Case 2: No need to do anything. We already did the seek in block[idx-1].
-}
-
-// seek will reset iterator and seek to >= key.
-func (itr *Iterator) seek(key []byte) {
- itr.seekFrom(key, origin)
-}
-
-// seekForPrev will reset iterator and seek to <= key.
-func (itr *Iterator) seekForPrev(key []byte) {
- // TODO: Optimize this. We shouldn't have to take a Prev step.
- itr.seekFrom(key, origin)
- if !bytes.Equal(itr.Key(), key) {
- itr.prev()
- }
-}
-
-func (itr *Iterator) next() {
- itr.err = nil
-
- if itr.bpos >= len(itr.t.blockIndex) {
- itr.err = io.EOF
- return
- }
-
- if itr.bi == nil {
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi = block.NewIterator()
- itr.bi.SeekToFirst()
- itr.err = itr.bi.Error()
- return
- }
-
- itr.bi.Next()
- if !itr.bi.Valid() {
- itr.bpos++
- itr.bi = nil
- itr.next()
- return
- }
-}
-
-func (itr *Iterator) prev() {
- itr.err = nil
- if itr.bpos < 0 {
- itr.err = io.EOF
- return
- }
-
- if itr.bi == nil {
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi = block.NewIterator()
- itr.bi.SeekToLast()
- itr.err = itr.bi.Error()
- return
- }
-
- itr.bi.Prev()
- if !itr.bi.Valid() {
- itr.bpos--
- itr.bi = nil
- itr.prev()
- return
- }
-}
-
-// Key follows the y.Iterator interface.
-// Returns the key with timestamp.
-func (itr *Iterator) Key() []byte {
- return itr.bi.Key()
-}
-
-// Value follows the y.Iterator interface
-func (itr *Iterator) Value() (ret y.ValueStruct) {
- ret.Decode(itr.bi.Value())
- return
-}
-
-// Next follows the y.Iterator interface
-func (itr *Iterator) Next() {
- if !itr.reversed {
- itr.next()
- } else {
- itr.prev()
- }
-}
-
-// Rewind follows the y.Iterator interface
-func (itr *Iterator) Rewind() {
- if !itr.reversed {
- itr.seekToFirst()
- } else {
- itr.seekToLast()
- }
-}
-
-// Seek follows the y.Iterator interface
-func (itr *Iterator) Seek(key []byte) {
- if !itr.reversed {
- itr.seek(key)
- } else {
- itr.seekForPrev(key)
- }
-}
-
-// ConcatIterator concatenates the sequences defined by several iterators. (It only works with
-// TableIterators, probably just because it's faster to not be so generic.)
-type ConcatIterator struct {
- idx int // Which iterator is active now.
- cur *Iterator
- iters []*Iterator // Corresponds to tables.
- tables []*Table // Disregarding reversed, this is in ascending order.
- reversed bool
-}
-
-// NewConcatIterator creates a new concatenated iterator
-func NewConcatIterator(tbls []*Table, reversed bool) *ConcatIterator {
- iters := make([]*Iterator, len(tbls))
- for i := 0; i < len(tbls); i++ {
- // Increment the reference count. Since, we're not creating the iterator right now.
- // Here, We'll hold the reference of the tables, till the lifecycle of the iterator.
- tbls[i].IncrRef()
-
- // Save cycles by not initializing the iterators until needed.
- // iters[i] = tbls[i].NewIterator(reversed)
- }
- return &ConcatIterator{
- reversed: reversed,
- iters: iters,
- tables: tbls,
- idx: -1, // Not really necessary because s.it.Valid()=false, but good to have.
- }
-}
-
-func (s *ConcatIterator) setIdx(idx int) {
- s.idx = idx
- if idx < 0 || idx >= len(s.iters) {
- s.cur = nil
- return
- }
- if s.iters[idx] == nil {
- s.iters[idx] = s.tables[idx].NewIterator(s.reversed)
- }
- s.cur = s.iters[s.idx]
-}
-
-// Rewind implements y.Interface
-func (s *ConcatIterator) Rewind() {
- if len(s.iters) == 0 {
- return
- }
- if !s.reversed {
- s.setIdx(0)
- } else {
- s.setIdx(len(s.iters) - 1)
- }
- s.cur.Rewind()
-}
-
-// Valid implements y.Interface
-func (s *ConcatIterator) Valid() bool {
- return s.cur != nil && s.cur.Valid()
-}
-
-// Key implements y.Interface
-func (s *ConcatIterator) Key() []byte {
- return s.cur.Key()
-}
-
-// Value implements y.Interface
-func (s *ConcatIterator) Value() y.ValueStruct {
- return s.cur.Value()
-}
-
-// Seek brings us to element >= key if reversed is false. Otherwise, <= key.
-func (s *ConcatIterator) Seek(key []byte) {
- var idx int
- if !s.reversed {
- idx = sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(s.tables[i].Biggest(), key) >= 0
- })
- } else {
- n := len(s.tables)
- idx = n - 1 - sort.Search(n, func(i int) bool {
- return y.CompareKeys(s.tables[n-1-i].Smallest(), key) <= 0
- })
- }
- if idx >= len(s.tables) || idx < 0 {
- s.setIdx(-1)
- return
- }
- // For reversed=false, we know s.tables[i-1].Biggest() < key. Thus, the
- // previous table cannot possibly contain key.
- s.setIdx(idx)
- s.cur.Seek(key)
-}
-
-// Next advances our concat iterator.
-func (s *ConcatIterator) Next() {
- s.cur.Next()
- if s.cur.Valid() {
- // Nothing to do. Just stay with the current table.
- return
- }
- for { // In case there are empty tables.
- if !s.reversed {
- s.setIdx(s.idx + 1)
- } else {
- s.setIdx(s.idx - 1)
- }
- if s.cur == nil {
- // End of list. Valid will become false.
- return
- }
- s.cur.Rewind()
- if s.cur.Valid() {
- break
- }
- }
-}
-
-// Close implements y.Interface.
-func (s *ConcatIterator) Close() error {
- for _, t := range s.tables {
- // DeReference the tables while closing the iterator.
- if err := t.DecrRef(); err != nil {
- return err
- }
- }
- for _, it := range s.iters {
- if it == nil {
- continue
- }
- if err := it.Close(); err != nil {
- return errors.Wrap(err, "ConcatIterator")
- }
- }
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/table/merge_iterator.go b/vendor/github.com/dgraph-io/badger/table/merge_iterator.go
deleted file mode 100644
index cbecd849..00000000
--- a/vendor/github.com/dgraph-io/badger/table/merge_iterator.go
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package table
-
-import (
- "bytes"
-
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-// MergeIterator merges multiple iterators.
-// NOTE: MergeIterator owns the array of iterators and is responsible for closing them.
-type MergeIterator struct {
- left node
- right node
- small *node
-
- curKey []byte
- reverse bool
-}
-
-type node struct {
- valid bool
- key []byte
- iter y.Iterator
-
- // The two iterators are type asserted from `y.Iterator`, used to inline more function calls.
- // Calling functions on concrete types is much faster (about 25-30%) than calling the
- // interface's function.
- merge *MergeIterator
- concat *ConcatIterator
-}
-
-func (n *node) setIterator(iter y.Iterator) {
- n.iter = iter
- // It's okay if the type assertion below fails and n.merge/n.concat are set to nil.
- // We handle the nil values of merge and concat in all the methods.
- n.merge, _ = iter.(*MergeIterator)
- n.concat, _ = iter.(*ConcatIterator)
-}
-
-func (n *node) setKey() {
- if n.merge != nil {
- n.valid = n.merge.small.valid
- if n.valid {
- n.key = n.merge.small.key
- }
- } else if n.concat != nil {
- n.valid = n.concat.Valid()
- if n.valid {
- n.key = n.concat.Key()
- }
- } else {
- n.valid = n.iter.Valid()
- if n.valid {
- n.key = n.iter.Key()
- }
- }
-}
-
-func (n *node) next() {
- if n.merge != nil {
- n.merge.Next()
- } else if n.concat != nil {
- n.concat.Next()
- } else {
- n.iter.Next()
- }
- n.setKey()
-}
-
-func (n *node) rewind() {
- n.iter.Rewind()
- n.setKey()
-}
-
-func (n *node) seek(key []byte) {
- n.iter.Seek(key)
- n.setKey()
-}
-
-func (mi *MergeIterator) fix() {
- if !mi.bigger().valid {
- return
- }
- if !mi.small.valid {
- mi.swapSmall()
- return
- }
- cmp := y.CompareKeys(mi.small.key, mi.bigger().key)
- // Both the keys are equal.
- if cmp == 0 {
- // In case of same keys, move the right iterator ahead.
- mi.right.next()
- if &mi.right == mi.small {
- mi.swapSmall()
- }
- return
- } else if cmp < 0 { // Small is less than bigger().
- if mi.reverse {
- mi.swapSmall()
- } else {
- // we don't need to do anything. Small already points to the smallest.
- }
- return
- } else { // bigger() is less than small.
- if mi.reverse {
- // Do nothing since we're iterating in reverse. Small currently points to
- // the bigger key and that's okay in reverse iteration.
- } else {
- mi.swapSmall()
- }
- return
- }
-}
-
-func (mi *MergeIterator) bigger() *node {
- if mi.small == &mi.left {
- return &mi.right
- }
- return &mi.left
-}
-
-func (mi *MergeIterator) swapSmall() {
- if mi.small == &mi.left {
- mi.small = &mi.right
- return
- }
- if mi.small == &mi.right {
- mi.small = &mi.left
- return
- }
-}
-
-// Next returns the next element. If it is the same as the current key, ignore it.
-func (mi *MergeIterator) Next() {
- for mi.Valid() {
- if !bytes.Equal(mi.small.key, mi.curKey) {
- break
- }
- mi.small.next()
- mi.fix()
- }
- mi.setCurrent()
-}
-
-func (mi *MergeIterator) setCurrent() {
- mi.curKey = append(mi.curKey[:0], mi.small.key...)
-}
-
-// Rewind seeks to first element (or last element for reverse iterator).
-func (mi *MergeIterator) Rewind() {
- mi.left.rewind()
- mi.right.rewind()
- mi.fix()
- mi.setCurrent()
-}
-
-// Seek brings us to element with key >= given key.
-func (mi *MergeIterator) Seek(key []byte) {
- mi.left.seek(key)
- mi.right.seek(key)
- mi.fix()
- mi.setCurrent()
-}
-
-// Valid returns whether the MergeIterator is at a valid element.
-func (mi *MergeIterator) Valid() bool {
- return mi.small.valid
-}
-
-// Key returns the key associated with the current iterator.
-func (mi *MergeIterator) Key() []byte {
- return mi.small.key
-}
-
-// Value returns the value associated with the iterator.
-func (mi *MergeIterator) Value() y.ValueStruct {
- return mi.small.iter.Value()
-}
-
-// Close implements y.Iterator.
-func (mi *MergeIterator) Close() error {
- err1 := mi.left.iter.Close()
- err2 := mi.right.iter.Close()
- if err1 != nil {
- return errors.Wrap(err1, "MergeIterator")
- }
- return errors.Wrap(err2, "MergeIterator")
-}
-
-// NewMergeIterator creates a merge iterator.
-func NewMergeIterator(iters []y.Iterator, reverse bool) y.Iterator {
- if len(iters) == 0 {
- return nil
- } else if len(iters) == 1 {
- return iters[0]
- } else if len(iters) == 2 {
- mi := &MergeIterator{
- reverse: reverse,
- }
- mi.left.setIterator(iters[0])
- mi.right.setIterator(iters[1])
- // Assign left iterator randomly. This will be fixed when user calls rewind/seek.
- mi.small = &mi.left
- return mi
- }
- mid := len(iters) / 2
- return NewMergeIterator(
- []y.Iterator{
- NewMergeIterator(iters[:mid], reverse),
- NewMergeIterator(iters[mid:], reverse),
- }, reverse)
-}
diff --git a/vendor/github.com/dgraph-io/badger/table/table.go b/vendor/github.com/dgraph-io/badger/table/table.go
deleted file mode 100644
index 9bc41787..00000000
--- a/vendor/github.com/dgraph-io/badger/table/table.go
+++ /dev/null
@@ -1,362 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package table
-
-import (
- "bytes"
- "crypto/sha256"
- "encoding/binary"
- "fmt"
- "io"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
-
- "github.com/AndreasBriese/bbloom"
- "github.com/dgraph-io/badger/options"
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-const fileSuffix = ".sst"
-
-type keyOffset struct {
- key []byte
- offset int
- len int
-}
-
-// TableInterface is useful for testing.
-type TableInterface interface {
- Smallest() []byte
- Biggest() []byte
- DoesNotHave(key []byte) bool
-}
-
-// Table represents a loaded table file with the info we have about it
-type Table struct {
- sync.Mutex
-
- fd *os.File // Own fd.
- tableSize int // Initialized in OpenTable, using fd.Stat().
-
- blockIndex []keyOffset
- ref int32 // For file garbage collection. Atomic.
-
- loadingMode options.FileLoadingMode
- mmap []byte // Memory mapped.
-
- // The following are initialized once and const.
- smallest, biggest []byte // Smallest and largest keys (with timestamps).
- id uint64 // file id, part of filename
-
- bf bbloom.Bloom
-
- Checksum []byte
-}
-
-// IncrRef increments the refcount (having to do with whether the file should be deleted)
-func (t *Table) IncrRef() {
- atomic.AddInt32(&t.ref, 1)
-}
-
-// DecrRef decrements the refcount and possibly deletes the table
-func (t *Table) DecrRef() error {
- newRef := atomic.AddInt32(&t.ref, -1)
- if newRef == 0 {
- // We can safely delete this file, because for all the current files, we always have
- // at least one reference pointing to them.
-
- // It's necessary to delete windows files
- if t.loadingMode == options.MemoryMap {
- if err := y.Munmap(t.mmap); err != nil {
- return err
- }
- t.mmap = nil
- }
- if err := t.fd.Truncate(0); err != nil {
- // This is very important to let the FS know that the file is deleted.
- return err
- }
- filename := t.fd.Name()
- if err := t.fd.Close(); err != nil {
- return err
- }
- if err := os.Remove(filename); err != nil {
- return err
- }
- }
- return nil
-}
-
-type block struct {
- offset int
- data []byte
-}
-
-func (b block) NewIterator() *blockIterator {
- return &blockIterator{data: b.data}
-}
-
-// OpenTable assumes file has only one table and opens it. Takes ownership of fd upon function
-// entry. Returns a table with one reference count on it (decrementing which may delete the file!
-// -- consider t.Close() instead). The fd has to writeable because we call Truncate on it before
-// deleting.
-func OpenTable(fd *os.File, mode options.FileLoadingMode, cksum []byte) (*Table, error) {
- fileInfo, err := fd.Stat()
- if err != nil {
- // It's OK to ignore fd.Close() errs in this function because we have only read
- // from the file.
- _ = fd.Close()
- return nil, y.Wrap(err)
- }
-
- filename := fileInfo.Name()
- id, ok := ParseFileID(filename)
- if !ok {
- _ = fd.Close()
- return nil, errors.Errorf("Invalid filename: %s", filename)
- }
- t := &Table{
- fd: fd,
- ref: 1, // Caller is given one reference.
- id: id,
- loadingMode: mode,
- }
-
- t.tableSize = int(fileInfo.Size())
-
- // We first load to RAM, so we can read the index and do checksum.
- if err := t.loadToRAM(); err != nil {
- return nil, err
- }
- // Enforce checksum before we read index. Otherwise, if the file was
- // truncated, we'd end up with panics in readIndex.
- if len(cksum) > 0 && !bytes.Equal(t.Checksum, cksum) {
- return nil, fmt.Errorf(
- "CHECKSUM_MISMATCH: Table checksum does not match checksum in MANIFEST."+
- " NOT including table %s. This would lead to missing data."+
- "\n sha256 %x Expected\n sha256 %x Found\n", filename, cksum, t.Checksum)
- }
- if err := t.readIndex(); err != nil {
- return nil, y.Wrap(err)
- }
-
- it := t.NewIterator(false)
- defer it.Close()
- it.Rewind()
- if it.Valid() {
- t.smallest = it.Key()
- }
-
- it2 := t.NewIterator(true)
- defer it2.Close()
- it2.Rewind()
- if it2.Valid() {
- t.biggest = it2.Key()
- }
-
- switch mode {
- case options.LoadToRAM:
- // No need to do anything. t.mmap is already filled.
- case options.MemoryMap:
- t.mmap, err = y.Mmap(fd, false, fileInfo.Size())
- if err != nil {
- _ = fd.Close()
- return nil, y.Wrapf(err, "Unable to map file: %q", fileInfo.Name())
- }
- case options.FileIO:
- t.mmap = nil
- default:
- panic(fmt.Sprintf("Invalid loading mode: %v", mode))
- }
- return t, nil
-}
-
-// Close closes the open table. (Releases resources back to the OS.)
-func (t *Table) Close() error {
- if t.loadingMode == options.MemoryMap {
- if err := y.Munmap(t.mmap); err != nil {
- return err
- }
- t.mmap = nil
- }
-
- return t.fd.Close()
-}
-
-func (t *Table) read(off, sz int) ([]byte, error) {
- if len(t.mmap) > 0 {
- if len(t.mmap[off:]) < sz {
- return nil, y.ErrEOF
- }
- return t.mmap[off : off+sz], nil
- }
-
- res := make([]byte, sz)
- nbr, err := t.fd.ReadAt(res, int64(off))
- y.NumReads.Add(1)
- y.NumBytesRead.Add(int64(nbr))
- return res, err
-}
-
-func (t *Table) readNoFail(off, sz int) []byte {
- res, err := t.read(off, sz)
- y.Check(err)
- return res
-}
-
-func (t *Table) readIndex() error {
- if len(t.mmap) != t.tableSize {
- panic("Table size does not match the read bytes")
- }
- readPos := t.tableSize
-
- // Read bloom filter.
- readPos -= 4
- buf := t.readNoFail(readPos, 4)
- bloomLen := int(binary.BigEndian.Uint32(buf))
- readPos -= bloomLen
- data := t.readNoFail(readPos, bloomLen)
- t.bf = bbloom.JSONUnmarshal(data)
-
- readPos -= 4
- buf = t.readNoFail(readPos, 4)
- restartsLen := int(binary.BigEndian.Uint32(buf))
-
- readPos -= 4 * restartsLen
- buf = t.readNoFail(readPos, 4*restartsLen)
-
- offsets := make([]int, restartsLen)
- for i := 0; i < restartsLen; i++ {
- offsets[i] = int(binary.BigEndian.Uint32(buf[:4]))
- buf = buf[4:]
- }
-
- // The last offset stores the end of the last block.
- for i := 0; i < len(offsets); i++ {
- var o int
- if i == 0 {
- o = 0
- } else {
- o = offsets[i-1]
- }
-
- ko := keyOffset{
- offset: o,
- len: offsets[i] - o,
- }
- t.blockIndex = append(t.blockIndex, ko)
- }
-
- // Execute this index read serially, because we already have table data in memory.
- var h header
- for idx := range t.blockIndex {
- ko := &t.blockIndex[idx]
-
- hbuf := t.readNoFail(ko.offset, h.Size())
- h.Decode(hbuf)
- y.AssertTrue(h.plen == 0)
-
- key := t.readNoFail(ko.offset+len(hbuf), int(h.klen))
- ko.key = append([]byte{}, key...)
- }
-
- return nil
-}
-
-func (t *Table) block(idx int) (block, error) {
- y.AssertTruef(idx >= 0, "idx=%d", idx)
- if idx >= len(t.blockIndex) {
- return block{}, errors.New("block out of index")
- }
-
- ko := t.blockIndex[idx]
- blk := block{
- offset: ko.offset,
- }
- var err error
- blk.data, err = t.read(blk.offset, ko.len)
- return blk, err
-}
-
-// Size is its file size in bytes
-func (t *Table) Size() int64 { return int64(t.tableSize) }
-
-// Smallest is its smallest key, or nil if there are none
-func (t *Table) Smallest() []byte { return t.smallest }
-
-// Biggest is its biggest key, or nil if there are none
-func (t *Table) Biggest() []byte { return t.biggest }
-
-// Filename is NOT the file name. Just kidding, it is.
-func (t *Table) Filename() string { return t.fd.Name() }
-
-// ID is the table's ID number (used to make the file name).
-func (t *Table) ID() uint64 { return t.id }
-
-// DoesNotHave returns true if (but not "only if") the table does not have the key. It does a
-// bloom filter lookup.
-func (t *Table) DoesNotHave(key []byte) bool { return !t.bf.Has(key) }
-
-// ParseFileID reads the file id out of a filename.
-func ParseFileID(name string) (uint64, bool) {
- name = path.Base(name)
- if !strings.HasSuffix(name, fileSuffix) {
- return 0, false
- }
- // suffix := name[len(fileSuffix):]
- name = strings.TrimSuffix(name, fileSuffix)
- id, err := strconv.Atoi(name)
- if err != nil {
- return 0, false
- }
- y.AssertTrue(id >= 0)
- return uint64(id), true
-}
-
-// IDToFilename does the inverse of ParseFileID
-func IDToFilename(id uint64) string {
- return fmt.Sprintf("%06d", id) + fileSuffix
-}
-
-// NewFilename should be named TableFilepath -- it combines the dir with the ID to make a table
-// filepath.
-func NewFilename(id uint64, dir string) string {
- return filepath.Join(dir, IDToFilename(id))
-}
-
-func (t *Table) loadToRAM() error {
- if _, err := t.fd.Seek(0, io.SeekStart); err != nil {
- return err
- }
- t.mmap = make([]byte, t.tableSize)
- sum := sha256.New()
- tee := io.TeeReader(t.fd, sum)
- read, err := tee.Read(t.mmap)
- if err != nil || read != t.tableSize {
- return y.Wrapf(err, "Unable to load file in memory. Table file: %s", t.Filename())
- }
- t.Checksum = sum.Sum(nil)
- y.NumReads.Add(1)
- y.NumBytesRead.Add(int64(read))
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/test.sh b/vendor/github.com/dgraph-io/badger/test.sh
deleted file mode 100644
index 6a68553e..00000000
--- a/vendor/github.com/dgraph-io/badger/test.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-set -e
-
-go version
-
-packages=$(go list ./... | grep github.com/dgraph-io/badger/)
-
-if [[ ! -z "$TEAMCITY_VERSION" ]]; then
- export GOFLAGS="-json"
-fi
-
-# Ensure that we can compile the binary.
-pushd badger
-go build -v .
-popd
-
-# Run the memory intensive tests first.
-go test -v -run='TestBigKeyValuePairs$' --manual=true
-go test -v -run='TestPushValueLogLimit' --manual=true
-
-# Run the special Truncate test.
-rm -rf p
-go test -v -run='TestTruncateVlogNoClose$' --manual=true
-truncate --size=4096 p/000000.vlog
-go test -v -run='TestTruncateVlogNoClose2$' --manual=true
-go test -v -run='TestTruncateVlogNoClose3$' --manual=true
-rm -rf p
-
-# Then the normal tests.
-echo
-echo "==> Starting test for table, skl and y package"
-go test -v -race github.com/dgraph-io/badger/skl
-# Run test for all package except the top level package. The top level package support the
-# `vlog_mmap` flag which rest of the packages don't support.
-go test -v -race $packages
-
-echo
-echo "==> Starting tests with value log mmapped..."
-# Run top level package tests with mmap flag.
-go test -v -race github.com/dgraph-io/badger --vlog_mmap=true
-
-echo
-echo "==> Starting tests with value log not mmapped..."
-go test -v -race github.com/dgraph-io/badger --vlog_mmap=false
-
diff --git a/vendor/github.com/dgraph-io/badger/trie/trie.go b/vendor/github.com/dgraph-io/badger/trie/trie.go
deleted file mode 100644
index 98e4a9dc..00000000
--- a/vendor/github.com/dgraph-io/badger/trie/trie.go
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package trie
-
-type node struct {
- children map[byte]*node
- ids []uint64
-}
-
-func newNode() *node {
- return &node{
- children: make(map[byte]*node),
- ids: []uint64{},
- }
-}
-
-// Trie datastructure.
-type Trie struct {
- root *node
-}
-
-// NewTrie returns Trie.
-func NewTrie() *Trie {
- return &Trie{
- root: newNode(),
- }
-}
-
-// Add adds the id in the trie for the given prefix path.
-func (t *Trie) Add(prefix []byte, id uint64) {
- node := t.root
- for _, val := range prefix {
- child, ok := node.children[val]
- if !ok {
- child = newNode()
- node.children[val] = child
- }
- node = child
- }
- // We only need to add the id to the last node of the given prefix.
- node.ids = append(node.ids, id)
-}
-
-// Get returns prefix matched ids for the given key.
-func (t *Trie) Get(key []byte) map[uint64]struct{} {
- out := make(map[uint64]struct{})
- node := t.root
- // If root has ids that means we have subscribers for "nil/[]byte{}"
- // prefix. Add them to the list.
- if len(node.ids) > 0 {
- for _, i := range node.ids {
- out[i] = struct{}{}
- }
- }
- for _, val := range key {
- child, ok := node.children[val]
- if !ok {
- break
- }
- // We need ids of the all the node in the matching key path.
- for _, id := range child.ids {
- out[id] = struct{}{}
- }
- node = child
- }
- return out
-}
-
-// Delete will delete the id if the id exist in the given index path.
-func (t *Trie) Delete(index []byte, id uint64) {
- node := t.root
- for _, val := range index {
- child, ok := node.children[val]
- if !ok {
- return
- }
- node = child
- }
- // We're just removing the id not the hanging path.
- out := node.ids[:0]
- for _, val := range node.ids {
- if val != id {
- out = append(out, val)
- }
- }
- for i := len(out); i < len(node.ids); i++ {
- node.ids[i] = 0 // garbage collecting
- }
- node.ids = out
-}
diff --git a/vendor/github.com/dgraph-io/badger/txn.go b/vendor/github.com/dgraph-io/badger/txn.go
deleted file mode 100644
index 1c635386..00000000
--- a/vendor/github.com/dgraph-io/badger/txn.go
+++ /dev/null
@@ -1,701 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "context"
- "encoding/hex"
- "math"
- "sort"
- "strconv"
- "sync"
- "sync/atomic"
-
- "github.com/dgraph-io/badger/y"
- "github.com/dgraph-io/ristretto/z"
- "github.com/pkg/errors"
-)
-
-type oracle struct {
- // A 64-bit integer must be at the top for memory alignment. See issue #311.
- refCount int64
- isManaged bool // Does not change value, so no locking required.
-
- sync.Mutex // For nextTxnTs and commits.
- // writeChLock lock is for ensuring that transactions go to the write
- // channel in the same order as their commit timestamps.
- writeChLock sync.Mutex
- nextTxnTs uint64
-
- // Used to block NewTransaction, so all previous commits are visible to a new read.
- txnMark *y.WaterMark
-
- // Either of these is used to determine which versions can be permanently
- // discarded during compaction.
- discardTs uint64 // Used by ManagedDB.
- readMark *y.WaterMark // Used by DB.
-
- // commits stores a key fingerprint and latest commit counter for it.
- // refCount is used to clear out commits map to avoid a memory blowup.
- commits map[uint64]uint64
-
- // closer is used to stop watermarks.
- closer *y.Closer
-}
-
-func newOracle(opt Options) *oracle {
- orc := &oracle{
- isManaged: opt.managedTxns,
- commits: make(map[uint64]uint64),
- // We're not initializing nextTxnTs and readOnlyTs. It would be done after replay in Open.
- //
- // WaterMarks must be 64-bit aligned for atomic package, hence we must use pointers here.
- // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- readMark: &y.WaterMark{Name: "badger.PendingReads"},
- txnMark: &y.WaterMark{Name: "badger.TxnTimestamp"},
- closer: y.NewCloser(2),
- }
- orc.readMark.Init(orc.closer, opt.EventLogging)
- orc.txnMark.Init(orc.closer, opt.EventLogging)
- return orc
-}
-
-func (o *oracle) Stop() {
- o.closer.SignalAndWait()
-}
-
-func (o *oracle) addRef() {
- atomic.AddInt64(&o.refCount, 1)
-}
-
-func (o *oracle) decrRef() {
- if atomic.AddInt64(&o.refCount, -1) != 0 {
- return
- }
-
- // Clear out commits maps to release memory.
- o.Lock()
- defer o.Unlock()
- // Avoids the race where something new is added to commitsMap
- // after we check refCount and before we take Lock.
- if atomic.LoadInt64(&o.refCount) != 0 {
- return
- }
- if len(o.commits) >= 1000 { // If the map is still small, let it slide.
- o.commits = make(map[uint64]uint64)
- }
-}
-
-func (o *oracle) readTs() uint64 {
- if o.isManaged {
- panic("ReadTs should not be retrieved for managed DB")
- }
-
- var readTs uint64
- o.Lock()
- readTs = o.nextTxnTs - 1
- o.readMark.Begin(readTs)
- o.Unlock()
-
- // Wait for all txns which have no conflicts, have been assigned a commit
- // timestamp and are going through the write to value log and LSM tree
- // process. Not waiting here could mean that some txns which have been
- // committed would not be read.
- y.Check(o.txnMark.WaitForMark(context.Background(), readTs))
- return readTs
-}
-
-func (o *oracle) nextTs() uint64 {
- o.Lock()
- defer o.Unlock()
- return o.nextTxnTs
-}
-
-func (o *oracle) incrementNextTs() {
- o.Lock()
- defer o.Unlock()
- o.nextTxnTs++
-}
-
-// Any deleted or invalid versions at or below ts would be discarded during
-// compaction to reclaim disk space in LSM tree and thence value log.
-func (o *oracle) setDiscardTs(ts uint64) {
- o.Lock()
- defer o.Unlock()
- o.discardTs = ts
-}
-
-func (o *oracle) discardAtOrBelow() uint64 {
- if o.isManaged {
- o.Lock()
- defer o.Unlock()
- return o.discardTs
- }
- return o.readMark.DoneUntil()
-}
-
-// hasConflict must be called while having a lock.
-func (o *oracle) hasConflict(txn *Txn) bool {
- if len(txn.reads) == 0 {
- return false
- }
- for _, ro := range txn.reads {
- // A commit at the read timestamp is expected.
- // But, any commit after the read timestamp should cause a conflict.
- if ts, has := o.commits[ro]; has && ts > txn.readTs {
- return true
- }
- }
- return false
-}
-
-func (o *oracle) newCommitTs(txn *Txn) uint64 {
- o.Lock()
- defer o.Unlock()
-
- if o.hasConflict(txn) {
- return 0
- }
-
- var ts uint64
- if !o.isManaged {
- // This is the general case, when user doesn't specify the read and commit ts.
- ts = o.nextTxnTs
- o.nextTxnTs++
- o.txnMark.Begin(ts)
-
- } else {
- // If commitTs is set, use it instead.
- ts = txn.commitTs
- }
-
- for _, w := range txn.writes {
- o.commits[w] = ts // Update the commitTs.
- }
- return ts
-}
-
-func (o *oracle) doneCommit(cts uint64) {
- if o.isManaged {
- // No need to update anything.
- return
- }
- o.txnMark.Done(cts)
-}
-
-// Txn represents a Badger transaction.
-type Txn struct {
- readTs uint64
- commitTs uint64
-
- update bool // update is used to conditionally keep track of reads.
- reads []uint64 // contains fingerprints of keys read.
- writes []uint64 // contains fingerprints of keys written.
-
- pendingWrites map[string]*Entry // cache stores any writes done by txn.
-
- db *DB
- discarded bool
-
- size int64
- count int64
- numIterators int32
-}
-
-type pendingWritesIterator struct {
- entries []*Entry
- nextIdx int
- readTs uint64
- reversed bool
-}
-
-func (pi *pendingWritesIterator) Next() {
- pi.nextIdx++
-}
-
-func (pi *pendingWritesIterator) Rewind() {
- pi.nextIdx = 0
-}
-
-func (pi *pendingWritesIterator) Seek(key []byte) {
- key = y.ParseKey(key)
- pi.nextIdx = sort.Search(len(pi.entries), func(idx int) bool {
- cmp := bytes.Compare(pi.entries[idx].Key, key)
- if !pi.reversed {
- return cmp >= 0
- }
- return cmp <= 0
- })
-}
-
-func (pi *pendingWritesIterator) Key() []byte {
- y.AssertTrue(pi.Valid())
- entry := pi.entries[pi.nextIdx]
- return y.KeyWithTs(entry.Key, pi.readTs)
-}
-
-func (pi *pendingWritesIterator) Value() y.ValueStruct {
- y.AssertTrue(pi.Valid())
- entry := pi.entries[pi.nextIdx]
- return y.ValueStruct{
- Value: entry.Value,
- Meta: entry.meta,
- UserMeta: entry.UserMeta,
- ExpiresAt: entry.ExpiresAt,
- Version: pi.readTs,
- }
-}
-
-func (pi *pendingWritesIterator) Valid() bool {
- return pi.nextIdx < len(pi.entries)
-}
-
-func (pi *pendingWritesIterator) Close() error {
- return nil
-}
-
-func (txn *Txn) newPendingWritesIterator(reversed bool) *pendingWritesIterator {
- if !txn.update || len(txn.pendingWrites) == 0 {
- return nil
- }
- entries := make([]*Entry, 0, len(txn.pendingWrites))
- for _, e := range txn.pendingWrites {
- entries = append(entries, e)
- }
- // Number of pending writes per transaction shouldn't be too big in general.
- sort.Slice(entries, func(i, j int) bool {
- cmp := bytes.Compare(entries[i].Key, entries[j].Key)
- if !reversed {
- return cmp < 0
- }
- return cmp > 0
- })
- return &pendingWritesIterator{
- readTs: txn.readTs,
- entries: entries,
- reversed: reversed,
- }
-}
-
-func (txn *Txn) checkSize(e *Entry) error {
- count := txn.count + 1
- // Extra bytes for version in key.
- size := txn.size + int64(e.estimateSize(txn.db.opt.ValueThreshold)) + 10
- if count >= txn.db.opt.maxBatchCount || size >= txn.db.opt.maxBatchSize {
- return ErrTxnTooBig
- }
- txn.count, txn.size = count, size
- return nil
-}
-
-func exceedsSize(prefix string, max int64, key []byte) error {
- return errors.Errorf("%s with size %d exceeded %d limit. %s:\n%s",
- prefix, len(key), max, prefix, hex.Dump(key[:1<<10]))
-}
-
-func (txn *Txn) modify(e *Entry) error {
- const maxKeySize = 65000
-
- switch {
- case !txn.update:
- return ErrReadOnlyTxn
- case txn.discarded:
- return ErrDiscardedTxn
- case len(e.Key) == 0:
- return ErrEmptyKey
- case bytes.HasPrefix(e.Key, badgerPrefix):
- return ErrInvalidKey
- case len(e.Key) > maxKeySize:
- // Key length can't be more than uint16, as determined by table::header. To
- // keep things safe and allow badger move prefix and a timestamp suffix, let's
- // cut it down to 65000, instead of using 65536.
- return exceedsSize("Key", maxKeySize, e.Key)
- case int64(len(e.Value)) > txn.db.opt.ValueLogFileSize:
- return exceedsSize("Value", txn.db.opt.ValueLogFileSize, e.Value)
- }
-
- if err := txn.checkSize(e); err != nil {
- return err
- }
- fp := z.MemHash(e.Key) // Avoid dealing with byte arrays.
- txn.writes = append(txn.writes, fp)
- txn.pendingWrites[string(e.Key)] = e
- return nil
-}
-
-// Set adds a key-value pair to the database.
-// It will return ErrReadOnlyTxn if update flag was set to false when creating the transaction.
-//
-// The current transaction keeps a reference to the key and val byte slice
-// arguments. Users must not modify key and val until the end of the transaction.
-func (txn *Txn) Set(key, val []byte) error {
- return txn.SetEntry(NewEntry(key, val))
-}
-
-// SetEntry takes an Entry struct and adds the key-value pair in the struct,
-// along with other metadata to the database.
-//
-// The current transaction keeps a reference to the entry passed in argument.
-// Users must not modify the entry until the end of the transaction.
-func (txn *Txn) SetEntry(e *Entry) error {
- return txn.modify(e)
-}
-
-// Delete deletes a key.
-//
-// This is done by adding a delete marker for the key at commit timestamp. Any
-// reads happening before this timestamp would be unaffected. Any reads after
-// this commit would see the deletion.
-//
-// The current transaction keeps a reference to the key byte slice argument.
-// Users must not modify the key until the end of the transaction.
-func (txn *Txn) Delete(key []byte) error {
- e := &Entry{
- Key: key,
- meta: bitDelete,
- }
- return txn.modify(e)
-}
-
-// Get looks for key and returns corresponding Item.
-// If key is not found, ErrKeyNotFound is returned.
-func (txn *Txn) Get(key []byte) (item *Item, rerr error) {
- if len(key) == 0 {
- return nil, ErrEmptyKey
- } else if txn.discarded {
- return nil, ErrDiscardedTxn
- }
-
- item = new(Item)
- if txn.update {
- if e, has := txn.pendingWrites[string(key)]; has && bytes.Equal(key, e.Key) {
- if isDeletedOrExpired(e.meta, e.ExpiresAt) {
- return nil, ErrKeyNotFound
- }
- // Fulfill from cache.
- item.meta = e.meta
- item.val = e.Value
- item.userMeta = e.UserMeta
- item.key = key
- item.status = prefetched
- item.version = txn.readTs
- item.expiresAt = e.ExpiresAt
- // We probably don't need to set db on item here.
- return item, nil
- }
- // Only track reads if this is update txn. No need to track read if txn serviced it
- // internally.
- txn.addReadKey(key)
- }
-
- seek := y.KeyWithTs(key, txn.readTs)
- vs, err := txn.db.get(seek)
- if err != nil {
- return nil, errors.Wrapf(err, "DB::Get key: %q", key)
- }
- if vs.Value == nil && vs.Meta == 0 {
- return nil, ErrKeyNotFound
- }
- if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
- return nil, ErrKeyNotFound
- }
-
- item.key = key
- item.version = vs.Version
- item.meta = vs.Meta
- item.userMeta = vs.UserMeta
- item.db = txn.db
- item.vptr = vs.Value // TODO: Do we need to copy this over?
- item.txn = txn
- item.expiresAt = vs.ExpiresAt
- return item, nil
-}
-
-func (txn *Txn) addReadKey(key []byte) {
- if txn.update {
- fp := z.MemHash(key)
- txn.reads = append(txn.reads, fp)
- }
-}
-
-// Discard discards a created transaction. This method is very important and must be called. Commit
-// method calls this internally, however, calling this multiple times doesn't cause any issues. So,
-// this can safely be called via a defer right when transaction is created.
-//
-// NOTE: If any operations are run on a discarded transaction, ErrDiscardedTxn is returned.
-func (txn *Txn) Discard() {
- if txn.discarded { // Avoid a re-run.
- return
- }
- if atomic.LoadInt32(&txn.numIterators) > 0 {
- panic("Unclosed iterator at time of Txn.Discard.")
- }
- txn.discarded = true
- if !txn.db.orc.isManaged {
- txn.db.orc.readMark.Done(txn.readTs)
- }
- if txn.update {
- txn.db.orc.decrRef()
- }
-}
-
-func (txn *Txn) commitAndSend() (func() error, error) {
- orc := txn.db.orc
- // Ensure that the order in which we get the commit timestamp is the same as
- // the order in which we push these updates to the write channel. So, we
- // acquire a writeChLock before getting a commit timestamp, and only release
- // it after pushing the entries to it.
- orc.writeChLock.Lock()
- defer orc.writeChLock.Unlock()
-
- commitTs := orc.newCommitTs(txn)
- if commitTs == 0 {
- return nil, ErrConflict
- }
-
- // The following debug information is what led to determining the cause of
- // bank txn violation bug, and it took a whole bunch of effort to narrow it
- // down to here. So, keep this around for at least a couple of months.
- // var b strings.Builder
- // fmt.Fprintf(&b, "Read: %d. Commit: %d. reads: %v. writes: %v. Keys: ",
- // txn.readTs, commitTs, txn.reads, txn.writes)
- entries := make([]*Entry, 0, len(txn.pendingWrites)+1)
- for _, e := range txn.pendingWrites {
- // fmt.Fprintf(&b, "[%q : %q], ", e.Key, e.Value)
-
- // Suffix the keys with commit ts, so the key versions are sorted in
- // descending order of commit timestamp.
- e.Key = y.KeyWithTs(e.Key, commitTs)
- e.meta |= bitTxn
- entries = append(entries, e)
- }
- // log.Printf("%s\n", b.String())
- e := &Entry{
- Key: y.KeyWithTs(txnKey, commitTs),
- Value: []byte(strconv.FormatUint(commitTs, 10)),
- meta: bitFinTxn,
- }
- entries = append(entries, e)
-
- req, err := txn.db.sendToWriteCh(entries)
- if err != nil {
- orc.doneCommit(commitTs)
- return nil, err
- }
- ret := func() error {
- err := req.Wait()
- // Wait before marking commitTs as done.
- // We can't defer doneCommit above, because it is being called from a
- // callback here.
- orc.doneCommit(commitTs)
- return err
- }
- return ret, nil
-}
-
-func (txn *Txn) commitPrecheck() {
- if txn.commitTs == 0 && txn.db.opt.managedTxns {
- panic("Commit cannot be called with managedDB=true. Use CommitAt.")
- }
- if txn.discarded {
- panic("Trying to commit a discarded txn")
- }
-}
-
-// Commit commits the transaction, following these steps:
-//
-// 1. If there are no writes, return immediately.
-//
-// 2. Check if read rows were updated since txn started. If so, return ErrConflict.
-//
-// 3. If no conflict, generate a commit timestamp and update written rows' commit ts.
-//
-// 4. Batch up all writes, write them to value log and LSM tree.
-//
-// 5. If callback is provided, Badger will return immediately after checking
-// for conflicts. Writes to the database will happen in the background. If
-// there is a conflict, an error will be returned and the callback will not
-// run. If there are no conflicts, the callback will be called in the
-// background upon successful completion of writes or any error during write.
-//
-// If error is nil, the transaction is successfully committed. In case of a non-nil error, the LSM
-// tree won't be updated, so there's no need for any rollback.
-func (txn *Txn) Commit() error {
- txn.commitPrecheck() // Precheck before discarding txn.
- defer txn.Discard()
-
- if len(txn.writes) == 0 {
- return nil // Nothing to do.
- }
-
- txnCb, err := txn.commitAndSend()
- if err != nil {
- return err
- }
- // If batchSet failed, LSM would not have been updated. So, no need to rollback anything.
-
- // TODO: What if some of the txns successfully make it to value log, but others fail.
- // Nothing gets updated to LSM, until a restart happens.
- return txnCb()
-}
-
-type txnCb struct {
- commit func() error
- user func(error)
- err error
-}
-
-func runTxnCallback(cb *txnCb) {
- switch {
- case cb == nil:
- panic("txn callback is nil")
- case cb.user == nil:
- panic("Must have caught a nil callback for txn.CommitWith")
- case cb.err != nil:
- cb.user(cb.err)
- case cb.commit != nil:
- err := cb.commit()
- cb.user(err)
- default:
- cb.user(nil)
- }
-}
-
-// CommitWith acts like Commit, but takes a callback, which gets run via a
-// goroutine to avoid blocking this function. The callback is guaranteed to run,
-// so it is safe to increment sync.WaitGroup before calling CommitWith, and
-// decrementing it in the callback; to block until all callbacks are run.
-func (txn *Txn) CommitWith(cb func(error)) {
- txn.commitPrecheck() // Precheck before discarding txn.
- defer txn.Discard()
-
- if cb == nil {
- panic("Nil callback provided to CommitWith")
- }
-
- if len(txn.writes) == 0 {
- // Do not run these callbacks from here, because the CommitWith and the
- // callback might be acquiring the same locks. Instead run the callback
- // from another goroutine.
- go runTxnCallback(&txnCb{user: cb, err: nil})
- return
- }
-
- commitCb, err := txn.commitAndSend()
- if err != nil {
- go runTxnCallback(&txnCb{user: cb, err: err})
- return
- }
-
- go runTxnCallback(&txnCb{user: cb, commit: commitCb})
-}
-
-// ReadTs returns the read timestamp of the transaction.
-func (txn *Txn) ReadTs() uint64 {
- return txn.readTs
-}
-
-// NewTransaction creates a new transaction. Badger supports concurrent execution of transactions,
-// providing serializable snapshot isolation, avoiding write skews. Badger achieves this by tracking
-// the keys read and at Commit time, ensuring that these read keys weren't concurrently modified by
-// another transaction.
-//
-// For read-only transactions, set update to false. In this mode, we don't track the rows read for
-// any changes. Thus, any long running iterations done in this mode wouldn't pay this overhead.
-//
-// Running transactions concurrently is OK. However, a transaction itself isn't thread safe, and
-// should only be run serially. It doesn't matter if a transaction is created by one goroutine and
-// passed down to other, as long as the Txn APIs are called serially.
-//
-// When you create a new transaction, it is absolutely essential to call
-// Discard(). This should be done irrespective of what the update param is set
-// to. Commit API internally runs Discard, but running it twice wouldn't cause
-// any issues.
-//
-// txn := db.NewTransaction(false)
-// defer txn.Discard()
-// // Call various APIs.
-func (db *DB) NewTransaction(update bool) *Txn {
- return db.newTransaction(update, false)
-}
-
-func (db *DB) newTransaction(update, isManaged bool) *Txn {
- if db.opt.ReadOnly && update {
- // DB is read-only, force read-only transaction.
- update = false
- }
-
- txn := &Txn{
- update: update,
- db: db,
- count: 1, // One extra entry for BitFin.
- size: int64(len(txnKey) + 10), // Some buffer for the extra entry.
- }
- if update {
- txn.pendingWrites = make(map[string]*Entry)
- txn.db.orc.addRef()
- }
- // It is important that the oracle addRef happens BEFORE we retrieve a read
- // timestamp. Otherwise, it is possible that the oracle commit map would
- // become nil after we get the read timestamp.
- // The sequence of events can be:
- // 1. This txn gets a read timestamp.
- // 2. Another txn working on the same keyset commits them, and decrements
- // the reference to oracle.
- // 3. Oracle ref reaches zero, resetting commit map.
- // 4. This txn increments the oracle reference.
- // 5. Now this txn would go on to commit the keyset, and no conflicts
- // would be detected.
- // See issue: https://github.com/dgraph-io/badger/issues/574
- if !isManaged {
- txn.readTs = db.orc.readTs()
- }
- return txn
-}
-
-// View executes a function creating and managing a read-only transaction for the user. Error
-// returned by the function is relayed by the View method.
-// If View is used with managed transactions, it would assume a read timestamp of MaxUint64.
-func (db *DB) View(fn func(txn *Txn) error) error {
- var txn *Txn
- if db.opt.managedTxns {
- txn = db.NewTransactionAt(math.MaxUint64, false)
- } else {
- txn = db.NewTransaction(false)
- }
- defer txn.Discard()
-
- return fn(txn)
-}
-
-// Update executes a function, creating and managing a read-write transaction
-// for the user. Error returned by the function is relayed by the Update method.
-// Update cannot be used with managed transactions.
-func (db *DB) Update(fn func(txn *Txn) error) error {
- if db.opt.managedTxns {
- panic("Update can only be used with managedDB=false.")
- }
- txn := db.NewTransaction(true)
- defer txn.Discard()
-
- if err := fn(txn); err != nil {
- return err
- }
-
- return txn.Commit()
-}
diff --git a/vendor/github.com/dgraph-io/badger/util.go b/vendor/github.com/dgraph-io/badger/util.go
deleted file mode 100644
index 2726b7ad..00000000
--- a/vendor/github.com/dgraph-io/badger/util.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "encoding/hex"
- "io/ioutil"
- "math/rand"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-func (s *levelsController) validate() error {
- for _, l := range s.levels {
- if err := l.validate(); err != nil {
- return errors.Wrap(err, "Levels Controller")
- }
- }
- return nil
-}
-
-// Check does some sanity check on one level of data or in-memory index.
-func (s *levelHandler) validate() error {
- if s.level == 0 {
- return nil
- }
-
- s.RLock()
- defer s.RUnlock()
- numTables := len(s.tables)
- for j := 1; j < numTables; j++ {
- if j >= len(s.tables) {
- return errors.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables)
- }
-
- if y.CompareKeys(s.tables[j-1].Biggest(), s.tables[j].Smallest()) >= 0 {
- return errors.Errorf(
- "Inter: Biggest(j-1) \n%s\n vs Smallest(j): \n%s\n: level=%d j=%d numTables=%d",
- hex.Dump(s.tables[j-1].Biggest()), hex.Dump(s.tables[j].Smallest()),
- s.level, j, numTables)
- }
-
- if y.CompareKeys(s.tables[j].Smallest(), s.tables[j].Biggest()) > 0 {
- return errors.Errorf(
- "Intra: \n%s\n vs \n%s\n: level=%d j=%d numTables=%d",
- hex.Dump(s.tables[j].Smallest()), hex.Dump(s.tables[j].Biggest()), s.level, j, numTables)
- }
- }
- return nil
-}
-
-// func (s *KV) debugPrintMore() { s.lc.debugPrintMore() }
-
-// // debugPrintMore shows key ranges of each level.
-// func (s *levelsController) debugPrintMore() {
-// s.Lock()
-// defer s.Unlock()
-// for i := 0; i < s.kv.opt.MaxLevels; i++ {
-// s.levels[i].debugPrintMore()
-// }
-// }
-
-// func (s *levelHandler) debugPrintMore() {
-// s.RLock()
-// defer s.RUnlock()
-// s.elog.Printf("Level %d:", s.level)
-// for _, t := range s.tables {
-// y.Printf(" [%s, %s]", t.Smallest(), t.Biggest())
-// }
-// y.Printf("\n")
-// }
-
-// reserveFileID reserves a unique file id.
-func (s *levelsController) reserveFileID() uint64 {
- id := atomic.AddUint64(&s.nextFileID, 1)
- return id - 1
-}
-
-func getIDMap(dir string) map[uint64]struct{} {
- fileInfos, err := ioutil.ReadDir(dir)
- y.Check(err)
- idMap := make(map[uint64]struct{})
- for _, info := range fileInfos {
- if info.IsDir() {
- continue
- }
- fileID, ok := table.ParseFileID(info.Name())
- if !ok {
- continue
- }
- idMap[fileID] = struct{}{}
- }
- return idMap
-}
-
-func init() {
- rand.Seed(time.Now().UnixNano())
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/.deepsource.toml b/vendor/github.com/dgraph-io/badger/v2/.deepsource.toml
deleted file mode 100644
index 266045f0..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/.deepsource.toml
+++ /dev/null
@@ -1,18 +0,0 @@
-version = 1
-
-test_patterns = [
- 'integration/testgc/**',
- '**/*_test.go'
-]
-
-exclude_patterns = [
-
-]
-
-[[analyzers]]
-name = 'go'
-enabled = true
-
-
- [analyzers.meta]
- import_path = 'github.com/dgraph-io/badger'
diff --git a/vendor/github.com/dgraph-io/badger/v2/.gitignore b/vendor/github.com/dgraph-io/badger/v2/.gitignore
deleted file mode 100644
index e3efdf58..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-p/
-badger-test*/
diff --git a/vendor/github.com/dgraph-io/badger/v2/.golangci.yml b/vendor/github.com/dgraph-io/badger/v2/.golangci.yml
deleted file mode 100644
index fecb8644..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/.golangci.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-run:
- tests: false
-
-linters-settings:
- lll:
- line-length: 100
-
-linters:
- disable-all: true
- enable:
- - errcheck
- - ineffassign
- - gas
- - gofmt
- - golint
- - gosimple
- - govet
- - lll
- - varcheck
- - unused
-
-issues:
- exclude-rules:
- - linters:
- - gosec
- text: "G404: "
-
\ No newline at end of file
diff --git a/vendor/github.com/dgraph-io/badger/v2/.travis.yml b/vendor/github.com/dgraph-io/badger/v2/.travis.yml
deleted file mode 100644
index fbcefbae..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/.travis.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-language: go
-
-go:
- - "1.12"
- - "1.13"
- - tip
-os:
- - osx
-env:
- jobs:
- - GOARCH=386
- - GOARCH=amd64
- global:
- - secure: CRkV2+/jlO0gXzzS50XGxfMS117FNwiVjxNY/LeWq06RKD+dDCPxTJl3JCNe3l0cYEPAglV2uMMYukDiTqJ7e+HI4nh4N4mv6lwx39N8dAvJe1x5ITS2T4qk4kTjuQb1Q1vw/ZOxoQqmvNKj2uRmBdJ/HHmysbRJ1OzCWML3OXdUwJf0AYlJzTjpMfkOKr7sTtE4rwyyQtd4tKH1fGdurgI9ZuFd9qvYxK2qcJhsQ6CNqMXt+7FkVkN1rIPmofjjBTNryzUr4COFXuWH95aDAif19DeBW4lbNgo1+FpDsrgmqtuhl6NAuptI8q/imow2KXBYJ8JPXsxW8DVFj0IIp0RCd3GjaEnwBEbxAyiIHLfW7AudyTS/dJOvZffPqXnuJ8xj3OPIdNe4xY0hWl8Ju2HhKfLOAHq7VadHZWd3IHLil70EiL4/JLD1rNbMImUZisFaA8pyrcIvYYebjOnk4TscwKFLedClRSX1XsMjWWd0oykQtrdkHM2IxknnBpaLu7mFnfE07f6dkG0nlpyu4SCLey7hr5FdcEmljA0nIxTSYDg6035fQkBEAbe7hlESOekkVNT9IZPwG+lmt3vU4ofi6NqNbJecOuSB+h36IiZ9s4YQtxYNnLgW14zjuFGGyT5smc3IjBT7qngDjKIgyrSVoRkY/8udy9qbUgvBeW8=
-
-
-jobs:
- allow_failures:
- - go: tip
- exclude:
- # Exclude builds for 386 architecture on go 1.12 and tip
- # Since we don't want it to run for 32 bit
- - go: "1.12"
- env: GOARCH=386
- - go: tip
- env: GOARCH=386
- include:
- # Define one extra linux build, which we use to run cross
- # compiled 32 bit tests
- - os: linux
- arch: arm64
- go: "1.14"
- env: go_32=yes
-
-notifications:
- email: false
- slack:
- secure: X7uBLWYbuUhf8QFE16CoS5z7WvFR8EN9j6cEectMW6mKZ3vwXGwVXRIPsgUq/606DsQdCCx34MR8MRWYGlu6TBolbSe9y0EP0i46yipPz22YtuT7umcVUbGEyx8MZKgG0v1u/zA0O4aCsOBpGAA3gxz8h3JlEHDt+hv6U8xRsSllVLzLSNb5lwxDtcfEDxVVqP47GMEgjLPM28Pyt5qwjk7o5a4YSVzkfdxBXxd3gWzFUWzJ5E3cTacli50dK4GVfiLcQY2aQYoYO7AAvDnvP+TPfjDkBlUEE4MUz5CDIN51Xb+WW33sX7g+r3Bj7V5IRcF973RiYkpEh+3eoiPnyWyxhDZBYilty3b+Hysp6d4Ov/3I3ll7Bcny5+cYjakjkMH3l9w3gs6Y82GlpSLSJshKWS8vPRsxFe0Pstj6QSJXTd9EBaFr+l1ScXjJv/Sya9j8N9FfTuOTESWuaL1auX4Y7zEEVHlA8SCNOO8K0eTfxGZnC/YcIHsR8rePEAcFxfOYQppkyLF/XvAtnb/LMUuu0g4y2qNdme6Oelvyar1tFEMRtbl4mRCdu/krXBFtkrsfUaVY6WTPdvXAGotsFJ0wuA53zGVhlcd3+xAlSlR3c1QX95HIMeivJKb5L4nTjP+xnrmQNtnVk+tG4LSH2ltuwcZSSczModtcBmRefrk=
-
-script: >-
- if [ $TRAVIS_OS_NAME = "linux" ] && [ $go_32 ]; then
- uname -a
- GOOS=linux GOARCH=arm go test -v ./...
- # Another round of tests after turning off mmap.
- GOOS=linux GOARCH=arm go test -v -vlog_mmap=false github.com/dgraph-io/badger
- else
- go test -v ./...
- # Another round of tests after turning off mmap.
- go test -v -vlog_mmap=false github.com/dgraph-io/badger
- # Cross-compile for Plan 9
- GOOS=plan9 go build ./...
- fi
diff --git a/vendor/github.com/dgraph-io/badger/v2/CHANGELOG.md b/vendor/github.com/dgraph-io/badger/v2/CHANGELOG.md
deleted file mode 100644
index 2cc490e4..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/CHANGELOG.md
+++ /dev/null
@@ -1,482 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
-
-## [2.2007.4] - 2021-08-25
-
-### Fixed
- - Fix build on Plan 9 (#1451) (#1508) (#1738)
-
-### Features
- - feat(zstd): backport replacement of DataDog's zstd with Klauspost's zstd (#1736)
-
-## [2.2007.3] - 2021-07-21
-
-### Fixed
- - fix(maxVersion): Use choosekey instead of KeyToList (#1532) #1533
- - fix(flatten): Add --num_versions flag (#1518) #1520
- - fix(build): Fix integer overflow on 32-bit architectures #1558
- - fix(pb): avoid protobuf warning due to common filename (#1519)
-
-### Features
- - Add command to stream contents of DB into another DB. (#1486)
-
-### New APIs
- - DB.StreamDB
- - DB.MaxVersion
-
-## [2.2007.2] - 2020-08-31
-
-### Fixed
- - Compaction: Use separate compactors for L0, L1 (#1466)
- - Rework Block and Index cache (#1473)
- - Add IsClosed method (#1478)
- - Cleanup: Avoid truncating in vlog.Open on error (#1465)
- - Cleanup: Do not close cache before compactions (#1464)
-
-### New APIs
-- Badger.DB
- - BlockCacheMetrics (#1473)
- - IndexCacheMetrics (#1473)
-- Badger.Option
- - WithBlockCacheSize (#1473)
- - WithIndexCacheSize (#1473)
-
-### Removed APIs [Breaking Changes]
-- Badger.DB
- - DataCacheMetrics (#1473)
- - BfCacheMetrics (#1473)
-- Badger.Option
- - WithMaxCacheSize (#1473)
- - WithMaxBfCacheSize (#1473)
- - WithKeepBlockIndicesInCache (#1473)
- - WithKeepBlocksInCache (#1473)
-
-## [2.2007.1] - 2020-08-19
-
-### Fixed
- - Remove vlog file if bootstrap, syncDir or mmap fails (#1434)
- - levels: Compaction incorrectly drops some delete markers (#1422)
- - Replay: Update head for LSM entires also (#1456)
-
-## [2.2007.0] - 2020-08-10
-
-### Fixed
- - Add a limit to the size of the batches sent over a stream. (#1412)
- - Fix Sequence generates duplicate values (#1281)
- - Fix race condition in DoesNotHave (#1287)
- - Fail fast if cgo is disabled and compression is ZSTD (#1284)
- - Proto: make badger/v2 compatible with v1 (#1293)
- - Proto: Rename dgraph.badger.v2.pb to badgerpb2 (#1314)
- - Handle duplicates in ManagedWriteBatch (#1315)
- - Ensure `bitValuePointer` flag is cleared for LSM entry values written to LSM (#1313)
- - DropPrefix: Return error on blocked writes (#1329)
- - Confirm `badgerMove` entry required before rewrite (#1302)
- - Drop move keys when its key prefix is dropped (#1331)
- - Iterator: Always add key to txn.reads (#1328)
- - Restore: Account for value size as well (#1358)
- - Compaction: Expired keys and delete markers are never purged (#1354)
- - GC: Consider size of value while rewriting (#1357)
- - Force KeepL0InMemory to be true when InMemory is true (#1375)
- - Rework DB.DropPrefix (#1381)
- - Update head while replaying value log (#1372)
- - Avoid panic on multiple closer.Signal calls (#1401)
- - Return error if the vlog writes exceeds more than 4GB (#1400)
-
-### Performance
- - Clean up transaction oracle as we go (#1275)
- - Use cache for storing block offsets (#1336)
-
-### Features
- - Support disabling conflict detection (#1344)
- - Add leveled logging (#1249)
- - Support entry version in Write batch (#1310)
- - Add Write method to batch write (#1321)
- - Support multiple iterators in read-write transactions (#1286)
-
-### New APIs
-- Badger.DB
- - NewManagedWriteBatch (#1310)
- - DropPrefix (#1381)
-- Badger.Option
- - WithDetectConflicts (#1344)
- - WithKeepBlockIndicesInCache (#1336)
- - WithKeepBlocksInCache (#1336)
-- Badger.WriteBatch
- - DeleteAt (#1310)
- - SetEntryAt (#1310)
- - Write (#1321)
-
-### Changes to Default Options
- - DefaultOptions: Set KeepL0InMemory to false (#1345)
- - Increase default valueThreshold from 32B to 1KB (#1346)
-
-### Deprecated
-- Badger.Option
- - WithEventLogging (#1203)
-
-### Reverts
-This sections lists the changes which were reverted because of non-reproducible crashes.
-- Compress/Encrypt Blocks in the background (#1227)
-
-
-## [2.0.3] - 2020-03-24
-
-### Fixed
-
-- Add support for watching nil prefix in subscribe API (#1246)
-
-### Performance
-
-- Compress/Encrypt Blocks in the background (#1227)
-- Disable cache by default (#1257)
-
-### Features
-
-- Add BypassDirLock option (#1243)
-- Add separate cache for bloomfilters (#1260)
-
-### New APIs
-- badger.DB
- - BfCacheMetrics (#1260)
- - DataCacheMetrics (#1260)
-- badger.Options
- - WithBypassLockGuard (#1243)
- - WithLoadBloomsOnOpen (#1260)
- - WithMaxBfCacheSize (#1260)
-
-## [2.0.3] - 2020-03-24
-
-### Fixed
-
-- Add support for watching nil prefix in subscribe API (#1246)
-
-### Performance
-
-- Compress/Encrypt Blocks in the background (#1227)
-- Disable cache by default (#1257)
-
-### Features
-
-- Add BypassDirLock option (#1243)
-- Add separate cache for bloomfilters (#1260)
-
-### New APIs
-- badger.DB
- - BfCacheMetrics (#1260)
- - DataCacheMetrics (#1260)
-- badger.Options
- - WithBypassLockGuard (#1243)
- - WithLoadBloomsOnOpen (#1260)
- - WithMaxBfCacheSize (#1260)
-
-## [2.0.2] - 2020-03-02
-
-### Fixed
-
-- Cast sz to uint32 to fix compilation on 32 bit. (#1175)
-- Fix checkOverlap in compaction. (#1166)
-- Avoid sync in inmemory mode. (#1190)
-- Support disabling the cache completely. (#1185)
-- Add support for caching bloomfilters. (#1204)
-- Fix int overflow for 32bit. (#1216)
-- Remove the 'this entry should've caught' log from value.go. (#1170)
-- Rework concurrency semantics of valueLog.maxFid. (#1187)
-
-### Performance
-
-- Use fastRand instead of locked-rand in skiplist. (#1173)
-- Improve write stalling on level 0 and 1. (#1186)
-- Disable compression and set ZSTD Compression Level to 1. (#1191)
-
-## [2.0.1] - 2020-01-02
-
-### New APIs
-
-- badger.Options
- - WithInMemory (f5b6321)
- - WithZSTDCompressionLevel (3eb4e72)
-
-- Badger.TableInfo
- - EstimatedSz (f46f8ea)
-
-### Features
-
-- Introduce in-memory mode in badger. (#1113)
-
-### Fixed
-
-- Limit manifest's change set size. (#1119)
-- Cast idx to uint32 to fix compilation on i386. (#1118)
-- Fix request increment ref bug. (#1121)
-- Fix windows dataloss issue. (#1134)
-- Fix VerifyValueChecksum checks. (#1138)
-- Fix encryption in stream writer. (#1146)
-- Fix segmentation fault in vlog.Read. (header.Decode) (#1150)
-- Fix merge iterator duplicates issue. (#1157)
-
-### Performance
-
-- Set level 15 as default compression level in Zstd. (#1111)
-- Optimize createTable in stream_writer.go. (#1132)
-
-## [2.0.0] - 2019-11-12
-
-### New APIs
-
-- badger.DB
- - NewWriteBatchAt (7f43769)
- - CacheMetrics (b9056f1)
-
-- badger.Options
- - WithMaxCacheSize (b9056f1)
- - WithEventLogging (75c6a44)
- - WithBlockSize (1439463)
- - WithBloomFalsePositive (1439463)
- - WithKeepL0InMemory (ee70ff2)
- - WithVerifyValueChecksum (ee70ff2)
- - WithCompression (5f3b061)
- - WithEncryptionKey (a425b0e)
- - WithEncryptionKeyRotationDuration (a425b0e)
- - WithChecksumVerificationMode (7b4083d)
-
-### Features
-
-- Data cache to speed up lookups and iterations. (#1066)
-- Data compression. (#1013)
-- Data encryption-at-rest. (#1042)
-
-### Fixed
-
-- Fix deadlock when flushing discard stats. (#976)
-- Set move key's expiresAt for keys with TTL. (#1006)
-- Fix unsafe usage in Decode. (#1097)
-- Fix race condition on db.orc.nextTxnTs. (#1101)
-- Fix level 0 GC dataloss bug. (#1090)
-- Fix deadlock in discard stats. (#1070)
-- Support checksum verification for values read from vlog. (#1052)
-- Store entire L0 in memory. (#963)
-- Fix table.Smallest/Biggest and iterator Prefix bug. (#997)
-- Use standard proto functions for Marshal/Unmarshal and Size. (#994)
-- Fix boundaries on GC batch size. (#987)
-- VlogSize to store correct directory name to expvar.Map. (#956)
-- Fix transaction too big issue in restore. (#957)
-- Fix race condition in updateDiscardStats. (#973)
-- Cast results of len to uint32 to fix compilation in i386 arch. (#961)
-- Making the stream writer APIs goroutine-safe. (#959)
-- Fix prefix bug in key iterator and allow all versions. (#950)
-- Drop discard stats if we can't unmarshal it. (#936)
-- Fix race condition in flushDiscardStats function. (#921)
-- Ensure rewrite in vlog is within transactional limits. (#911)
-- Fix discard stats moved by GC bug. (#929)
-- Fix busy-wait loop in Watermark. (#920)
-
-### Performance
-
-- Introduce fast merge iterator. (#1080)
-- Binary search based table picker. (#983)
-- Flush vlog buffer if it grows beyond threshold. (#1067)
-- Introduce StreamDone in Stream Writer. (#1061)
-- Performance Improvements to block iterator. (#977)
-- Prevent unnecessary safecopy in iterator parseKV. (#971)
-- Use pointers instead of binary encoding. (#965)
-- Reuse block iterator inside table iterator. (#972)
-- [breaking/format] Remove vlen from entry header. (#945)
-- Replace FarmHash with AESHash for Oracle conflicts. (#952)
-- [breaking/format] Optimize Bloom filters. (#940)
-- [breaking/format] Use varint for header encoding (without header length). (#935)
-- Change file picking strategy in compaction. (#894)
-- [breaking/format] Block level changes. (#880)
-- [breaking/format] Add key-offset index to the end of SST table. (#881)
-
-
-## [1.6.0] - 2019-07-01
-
-This is a release including almost 200 commits, so expect many changes - some of them
-not backward compatible.
-
-Regarding backward compatibility in Badger versions, you might be interested on reading
-[VERSIONING.md](VERSIONING.md).
-
-_Note_: The hashes in parentheses correspond to the commits that impacted the given feature.
-
-### New APIs
-
-- badger.DB
- - DropPrefix (291295e)
- - Flatten (7e41bba)
- - KeySplits (4751ef1)
- - MaxBatchCount (b65e2a3)
- - MaxBatchSize (b65e2a3)
- - PrintKeyValueHistogram (fd59907)
- - Subscribe (26128a7)
- - Sync (851e462)
-
-- badger.DefaultOptions() and badger.LSMOnlyOptions() (91ce687)
- - badger.Options.WithX methods
-
-- badger.Entry (e9447c9)
- - NewEntry
- - WithMeta
- - WithDiscard
- - WithTTL
-
-- badger.Item
- - KeySize (fd59907)
- - ValueSize (5242a99)
-
-- badger.IteratorOptions
- - PickTable (7d46029, 49a49e3)
- - Prefix (7d46029)
-
-- badger.Logger (fbb2778)
-
-- badger.Options
- - CompactL0OnClose (7e41bba)
- - Logger (3f66663)
- - LogRotatesToFlush (2237832)
-
-- badger.Stream (14cbd89, 3258067)
-- badger.StreamWriter (7116e16)
-- badger.TableInfo.KeyCount (fd59907)
-- badger.TableManifest (2017987)
-- badger.Tx.NewKeyIterator (49a49e3)
-- badger.WriteBatch (6daccf9, 7e78e80)
-
-### Modified APIs
-
-#### Breaking changes:
-
-- badger.DefaultOptions and badger.LSMOnlyOptions are now functions rather than variables (91ce687)
-- badger.Item.Value now receives a function that returns an error (439fd46)
-- badger.Txn.Commit doesn't receive any params now (6daccf9)
-- badger.DB.Tables now receives a boolean (76b5341)
-
-#### Not breaking changes:
-
-- badger.LSMOptions changed values (799c33f)
-- badger.DB.NewIterator now allows multiple iterators per RO txn (41d9656)
-- badger.Options.TableLoadingMode's new default is options.MemoryMap (6b97bac)
-
-### Removed APIs
-
-- badger.ManagedDB (d22c0e8)
-- badger.Options.DoNotCompact (7e41bba)
-- badger.Txn.SetWithX (e9447c9)
-
-### Tools:
-
-- badger bank disect (13db058)
-- badger bank test (13db058) --mmap (03870e3)
-- badger fill (7e41bba)
-- badger flatten (7e41bba)
-- badger info --histogram (fd59907) --history --lookup --show-keys --show-meta --with-prefix (09e9b63) --show-internal (fb2eed9)
-- badger benchmark read (239041e)
-- badger benchmark write (6d3b67d)
-
-## [1.5.5] - 2019-06-20
-
-* Introduce support for Go Modules
-
-## [1.5.3] - 2018-07-11
-Bug Fixes:
-* Fix a panic caused due to item.vptr not copying over vs.Value, when looking
- for a move key.
-
-## [1.5.2] - 2018-06-19
-Bug Fixes:
-* Fix the way move key gets generated.
-* If a transaction has unclosed, or multiple iterators running simultaneously,
- throw a panic. Every iterator must be properly closed. At any point in time,
- only one iterator per transaction can be running. This is to avoid bugs in a
- transaction data structure which is thread unsafe.
-
-* *Warning: This change might cause panics in user code. Fix is to properly
- close your iterators, and only have one running at a time per transaction.*
-
-## [1.5.1] - 2018-06-04
-Bug Fixes:
-* Fix for infinite yieldItemValue recursion. #503
-* Fix recursive addition of `badgerMove` prefix. https://github.com/dgraph-io/badger/commit/2e3a32f0ccac3066fb4206b28deb39c210c5266f
-* Use file size based window size for sampling, instead of fixing it to 10MB. #501
-
-Cleanup:
-* Clarify comments and documentation.
-* Move badger tool one directory level up.
-
-## [1.5.0] - 2018-05-08
-* Introduce `NumVersionsToKeep` option. This option is used to discard many
- versions of the same key, which saves space.
-* Add a new `SetWithDiscard` method, which would indicate that all the older
- versions of the key are now invalid. Those versions would be discarded during
- compactions.
-* Value log GC moves are now bound to another keyspace to ensure latest versions
- of data are always at the top in LSM tree.
-* Introduce `ValueLogMaxEntries` to restrict the number of key-value pairs per
- value log file. This helps bound the time it takes to garbage collect one
- file.
-
-## [1.4.0] - 2018-05-04
-* Make mmap-ing of value log optional.
-* Run GC multiple times, based on recorded discard statistics.
-* Add MergeOperator.
-* Force compact L0 on clsoe (#439).
-* Add truncate option to warn about data loss (#452).
-* Discard key versions during compaction (#464).
-* Introduce new `LSMOnlyOptions`, to make Badger act like a typical LSM based DB.
-
-Bug fix:
-* (Temporary) Check max version across all tables in Get (removed in next
- release).
-* Update commit and read ts while loading from backup.
-* Ensure all transaction entries are part of the same value log file.
-* On commit, run unlock callbacks before doing writes (#413).
-* Wait for goroutines to finish before closing iterators (#421).
-
-## [1.3.0] - 2017-12-12
-* Add `DB.NextSequence()` method to generate monotonically increasing integer
- sequences.
-* Add `DB.Size()` method to return the size of LSM and value log files.
-* Tweaked mmap code to make Windows 32-bit builds work.
-* Tweaked build tags on some files to make iOS builds work.
-* Fix `DB.PurgeOlderVersions()` to not violate some constraints.
-
-## [1.2.0] - 2017-11-30
-* Expose a `Txn.SetEntry()` method to allow setting the key-value pair
- and all the metadata at the same time.
-
-## [1.1.1] - 2017-11-28
-* Fix bug where txn.Get was returing key deleted in same transaction.
-* Fix race condition while decrementing reference in oracle.
-* Update doneCommit in the callback for CommitAsync.
-* Iterator see writes of current txn.
-
-## [1.1.0] - 2017-11-13
-* Create Badger directory if it does not exist when `badger.Open` is called.
-* Added `Item.ValueCopy()` to avoid deadlocks in long-running iterations
-* Fixed 64-bit alignment issues to make Badger run on Arm v7
-
-## [1.0.1] - 2017-11-06
-* Fix an uint16 overflow when resizing key slice
-
-[Unreleased]: https://github.com/dgraph-io/badger/compare/v2.2007.2...HEAD
-[2.2007.2]: https://github.com/dgraph-io/badger/compare/v2.2007.1...v2.2007.2
-[2.2007.1]: https://github.com/dgraph-io/badger/compare/v2.2007.0...v2.2007.1
-[2.2007.0]: https://github.com/dgraph-io/badger/compare/v2.0.3...v2.2007.0
-[2.0.3]: https://github.com/dgraph-io/badger/compare/v2.0.2...v2.0.3
-[2.0.2]: https://github.com/dgraph-io/badger/compare/v2.0.1...v2.0.2
-[2.0.1]: https://github.com/dgraph-io/badger/compare/v2.0.0...v2.0.1
-[2.0.0]: https://github.com/dgraph-io/badger/compare/v1.6.0...v2.0.0
-[1.6.0]: https://github.com/dgraph-io/badger/compare/v1.5.5...v1.6.0
-[1.5.5]: https://github.com/dgraph-io/badger/compare/v1.5.3...v1.5.5
-[1.5.3]: https://github.com/dgraph-io/badger/compare/v1.5.2...v1.5.3
-[1.5.2]: https://github.com/dgraph-io/badger/compare/v1.5.1...v1.5.2
-[1.5.1]: https://github.com/dgraph-io/badger/compare/v1.5.0...v1.5.1
-[1.5.0]: https://github.com/dgraph-io/badger/compare/v1.4.0...v1.5.0
-[1.4.0]: https://github.com/dgraph-io/badger/compare/v1.3.0...v1.4.0
-[1.3.0]: https://github.com/dgraph-io/badger/compare/v1.2.0...v1.3.0
-[1.2.0]: https://github.com/dgraph-io/badger/compare/v1.1.1...v1.2.0
-[1.1.1]: https://github.com/dgraph-io/badger/compare/v1.1.0...v1.1.1
-[1.1.0]: https://github.com/dgraph-io/badger/compare/v1.0.1...v1.1.0
-[1.0.1]: https://github.com/dgraph-io/badger/compare/v1.0.0...v1.0.1
diff --git a/vendor/github.com/dgraph-io/badger/v2/CODE_OF_CONDUCT.md b/vendor/github.com/dgraph-io/badger/v2/CODE_OF_CONDUCT.md
deleted file mode 100644
index bf7bbc29..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Code of Conduct
-
-Our Code of Conduct can be found here:
-
-https://dgraph.io/conduct
diff --git a/vendor/github.com/dgraph-io/badger/v2/CONTRIBUTING.md b/vendor/github.com/dgraph-io/badger/v2/CONTRIBUTING.md
deleted file mode 100644
index 30512e9d..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/CONTRIBUTING.md
+++ /dev/null
@@ -1,107 +0,0 @@
-# Contribution Guide
-
-* [Before you get started](#before-you-get-started)
- * [Code of Conduct](#code-of-conduct)
-* [Your First Contribution](#your-first-contribution)
- * [Find a good first topic](#find-a-good-first-topic)
-* [Setting up your development environment](#setting-up-your-development-environment)
- * [Fork the project](#fork-the-project)
- * [Clone the project](#clone-the-project)
- * [New branch for a new code](#new-branch-for-a-new-code)
- * [Test](#test)
- * [Commit and push](#commit-and-push)
- * [Create a Pull Request](#create-a-pull-request)
- * [Sign the CLA](#sign-the-cla)
- * [Get a code review](#get-a-code-review)
-
-## Before you get started
-
-### Code of Conduct
-
-Please make sure to read and observe our [Code of Conduct](./CODE_OF_CONDUCT.md).
-
-## Your First Contribution
-
-### Find a good first topic
-
-You can start by finding an existing issue with the
-[good first issue](https://github.com/dgraph-io/badger/labels/good%20first%20issue) or [help wanted](https://github.com/dgraph-io/badger/labels/help%20wanted) labels. These issues are well suited for new contributors.
-
-
-## Setting up your development environment
-
-Badger uses [`Go Modules`](https://github.com/golang/go/wiki/Modules)
-to manage dependencies. The version of Go should be **1.12** or above.
-
-### Fork the project
-
-- Visit https://github.com/dgraph-io/badger
-- Click the `Fork` button (top right) to create a fork of the repository
-
-### Clone the project
-
-```sh
-$ git clone https://github.com/$GITHUB_USER/badger
-$ cd badger
-$ git remote add upstream git@github.com:dgraph-io/badger.git
-
-# Never push to the upstream master
-git remote set-url --push upstream no_push
-```
-
-### New branch for a new code
-
-Get your local master up to date:
-
-```sh
-$ git fetch upstream
-$ git checkout master
-$ git rebase upstream/master
-```
-
-Create a new branch from the master:
-
-```sh
-$ git checkout -b my_new_feature
-```
-
-And now you can finally add your changes to project.
-
-### Test
-
-Build and run all tests:
-
-```sh
-$ ./test.sh
-```
-
-### Commit and push
-
-Commit your changes:
-
-```sh
-$ git commit
-```
-
-When the changes are ready to review:
-
-```sh
-$ git push origin my_new_feature
-```
-
-### Create a Pull Request
-
-Just open `https://github.com/$GITHUB_USER/badger/pull/new/my_new_feature` and
-fill the PR description.
-
-### Sign the CLA
-
-Click the **Sign in with Github to agree** button to sign the CLA. [An example](https://cla-assistant.io/dgraph-io/badger?pullRequest=1377).
-
-### Get a code review
-
-If your pull request (PR) is opened, it will be assigned to one or more
-reviewers. Those reviewers will do a code review.
-
-To address review comments, you should commit the changes to the same branch of
-the PR on your fork.
diff --git a/vendor/github.com/dgraph-io/badger/v2/LICENSE b/vendor/github.com/dgraph-io/badger/v2/LICENSE
deleted file mode 100644
index d9a10c0d..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/LICENSE
+++ /dev/null
@@ -1,176 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
diff --git a/vendor/github.com/dgraph-io/badger/v2/README.md b/vendor/github.com/dgraph-io/badger/v2/README.md
deleted file mode 100644
index f92b82a2..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/README.md
+++ /dev/null
@@ -1,928 +0,0 @@
-# BadgerDB [![GoDoc](https://godoc.org/github.com/dgraph-io/badger?status.svg)](https://godoc.org/github.com/dgraph-io/badger) [![Go Report Card](https://goreportcard.com/badge/github.com/dgraph-io/badger)](https://goreportcard.com/report/github.com/dgraph-io/badger) [![Sourcegraph](https://sourcegraph.com/github.com/dgraph-io/badger/-/badge.svg)](https://sourcegraph.com/github.com/dgraph-io/badger?badge) [![Build Status](https://teamcity.dgraph.io/guestAuth/app/rest/builds/buildType:(id:Badger_UnitTests)/statusIcon.svg)](https://teamcity.dgraph.io/viewLog.html?buildTypeId=Badger_UnitTests&buildId=lastFinished&guest=1) ![Appveyor](https://ci.appveyor.com/api/projects/status/github/dgraph-io/badger?branch=master&svg=true) [![Coverage Status](https://coveralls.io/repos/github/dgraph-io/badger/badge.svg?branch=master)](https://coveralls.io/github/dgraph-io/badger?branch=master)
-
-![Badger mascot](images/diggy-shadow.png)
-
-BadgerDB is an embeddable, persistent and fast key-value (KV) database written
-in pure Go. It is the underlying database for [Dgraph](https://dgraph.io), a
-fast, distributed graph database. It's meant to be a performant alternative to
-non-Go-based key-value stores like RocksDB.
-
-## Project Status [March 24, 2020]
-
-Badger is stable and is being used to serve data sets worth hundreds of
-terabytes. Badger supports concurrent ACID transactions with serializable
-snapshot isolation (SSI) guarantees. A Jepsen-style bank test runs nightly for
-8h, with `--race` flag and ensures the maintenance of transactional guarantees.
-Badger has also been tested to work with filesystem level anomalies, to ensure
-persistence and consistency. Badger is being used by a number of projects which
-includes Dgraph, Jaeger Tracing, UsenetExpress, and many more.
-
-The list of projects using Badger can be found [here](#projects-using-badger).
-
-Badger v1.0 was released in Nov 2017, and the latest version that is data-compatible
-with v1.0 is v1.6.0.
-
-Badger v2.0 was released in Nov 2019 with a new storage format which won't
-be compatible with all of the v1.x. Badger v2.0 supports compression, encryption and uses a cache to speed up lookup.
-
-The [Changelog] is kept fairly up-to-date.
-
-For more details on our version naming schema please read [Choosing a version](#choosing-a-version).
-
-[Changelog]:https://github.com/dgraph-io/badger/blob/master/CHANGELOG.md
-
-## Table of Contents
- * [Getting Started](#getting-started)
- + [Installing](#installing)
- - [Choosing a version](#choosing-a-version)
- + [Opening a database](#opening-a-database)
- + [Transactions](#transactions)
- - [Read-only transactions](#read-only-transactions)
- - [Read-write transactions](#read-write-transactions)
- - [Managing transactions manually](#managing-transactions-manually)
- + [Using key/value pairs](#using-keyvalue-pairs)
- + [Monotonically increasing integers](#monotonically-increasing-integers)
- * [Merge Operations](#merge-operations)
- + [Setting Time To Live(TTL) and User Metadata on Keys](#setting-time-to-livettl-and-user-metadata-on-keys)
- + [Iterating over keys](#iterating-over-keys)
- - [Prefix scans](#prefix-scans)
- - [Key-only iteration](#key-only-iteration)
- + [Stream](#stream)
- + [Garbage Collection](#garbage-collection)
- + [Database backup](#database-backup)
- + [Memory usage](#memory-usage)
- + [Statistics](#statistics)
- * [Resources](#resources)
- + [Blog Posts](#blog-posts)
- * [Contact](#contact)
- * [Design](#design)
- + [Comparisons](#comparisons)
- + [Benchmarks](#benchmarks)
- * [Projects Using Badger](#projects-using-badger)
- * [Contributing](#contributing)
- * [Frequently Asked Questions](#frequently-asked-questions)
-
-## Getting Started
-
-### Installing
-To start using Badger, install Go 1.12 or above and run `go get`:
-
-```sh
-$ go get github.com/dgraph-io/badger/v2
-```
-
-This will retrieve the library and install the `badger` command line
-utility into your `$GOBIN` path.
-
-
-#### Choosing a version
-
-BadgerDB is a pretty special package from the point of view that the most important change we can
-make to it is not on its API but rather on how data is stored on disk.
-
-This is why we follow a version naming schema that differs from Semantic Versioning.
-
-- New major versions are released when the data format on disk changes in an incompatible way.
-- New minor versions are released whenever the API changes but data compatibility is maintained.
- Note that the changes on the API could be backward-incompatible - unlike Semantic Versioning.
-- New patch versions are released when there's no changes to the data format nor the API.
-
-Following these rules:
-
-- v1.5.0 and v1.6.0 can be used on top of the same files without any concerns, as their major
- version is the same, therefore the data format on disk is compatible.
-- v1.6.0 and v2.0.0 are data incompatible as their major version implies, so files created with
- v1.6.0 will need to be converted into the new format before they can be used by v2.0.0.
-
-For a longer explanation on the reasons behind using a new versioning naming schema, you can read
-[VERSIONING.md](VERSIONING.md).
-
-### Opening a database
-The top-level object in Badger is a `DB`. It represents multiple files on disk
-in specific directories, which contain the data for a single database.
-
-To open your database, use the `badger.Open()` function, with the appropriate
-options. The `Dir` and `ValueDir` options are mandatory and must be
-specified by the client. They can be set to the same value to simplify things.
-
-```go
-package main
-
-import (
- "log"
-
- badger "github.com/dgraph-io/badger/v2"
-)
-
-func main() {
- // Open the Badger database located in the /tmp/badger directory.
- // It will be created if it doesn't exist.
- db, err := badger.Open(badger.DefaultOptions("/tmp/badger"))
- if err != nil {
- log.Fatal(err)
- }
- defer db.Close()
-  // Your code here…
-}
-```
-
-Please note that Badger obtains a lock on the directories so multiple processes
-cannot open the same database at the same time.
-
-#### In-Memory Mode/Diskless Mode
-By default, Badger ensures all the data is persisted to the disk. It also supports a pure
-in-memory mode. When Badger is running in in-memory mode, all the data is stored in the memory.
-Reads and writes are much faster in in-memory mode, but all the data stored in Badger will be lost
-in case of a crash or close. To open badger in in-memory mode, set the `InMemory` option.
-
-```
-opt := badger.DefaultOptions("").WithInMemory(true)
-```
-
-### Transactions
-
-#### Read-only transactions
-To start a read-only transaction, you can use the `DB.View()` method:
-
-```go
-err := db.View(func(txn *badger.Txn) error {
-  // Your code here…
- Â return nil
-})
-```
-
-You cannot perform any writes or deletes within this transaction. Badger
-ensures that you get a consistent view of the database within this closure. Any
-writes that happen elsewhere after the transaction has started, will not be
-seen by calls made within the closure.
-
-#### Read-write transactions
-To start a read-write transaction, you can use the `DB.Update()` method:
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
-  // Your code here…
- Â return nil
-})
-```
-
-All database operations are allowed inside a read-write transaction.
-
-Always check the returned error value. If you return an error
-within your closure it will be passed through.
-
-An `ErrConflict` error will be reported in case of a conflict. Depending on the state
-of your application, you have the option to retry the operation if you receive
-this error.
-
-An `ErrTxnTooBig` will be reported in case the number of pending writes/deletes in
-the transaction exceeds a certain limit. In that case, it is best to commit the
-transaction and start a new transaction immediately. Here is an example (we are
-not checking for errors in some places for simplicity):
-
-```go
-updates := make(map[string]string)
-txn := db.NewTransaction(true)
-for k,v := range updates {
- if err := txn.Set([]byte(k),[]byte(v)); err == badger.ErrTxnTooBig {
- _ = txn.Commit()
- txn = db.NewTransaction(true)
- _ = txn.Set([]byte(k),[]byte(v))
- }
-}
-_ = txn.Commit()
-```
-
-#### Managing transactions manually
-The `DB.View()` and `DB.Update()` methods are wrappers around the
-`DB.NewTransaction()` and `Txn.Commit()` methods (or `Txn.Discard()` in case of
-read-only transactions). These helper methods will start the transaction,
-execute a function, and then safely discard your transaction if an error is
-returned. This is the recommended way to use Badger transactions.
-
-However, sometimes you may want to manually create and commit your
-transactions. You can use the `DB.NewTransaction()` function directly, which
-takes in a boolean argument to specify whether a read-write transaction is
-required. For read-write transactions, it is necessary to call `Txn.Commit()`
-to ensure the transaction is committed. For read-only transactions, calling
-`Txn.Discard()` is sufficient. `Txn.Commit()` also calls `Txn.Discard()`
-internally to cleanup the transaction, so just calling `Txn.Commit()` is
-sufficient for read-write transaction. However, if your code doesn’t call
-`Txn.Commit()` for some reason (for e.g it returns prematurely with an error),
-then please make sure you call `Txn.Discard()` in a `defer` block. Refer to the
-code below.
-
-```go
-// Start a writable transaction.
-txn := db.NewTransaction(true)
-defer txn.Discard()
-
-// Use the transaction...
-err := txn.Set([]byte("answer"), []byte("42"))
-if err != nil {
- return err
-}
-
-// Commit the transaction and check for error.
-if err := txn.Commit(); err != nil {
- return err
-}
-```
-
-The first argument to `DB.NewTransaction()` is a boolean stating if the transaction
-should be writable.
-
-Badger allows an optional callback to the `Txn.Commit()` method. Normally, the
-callback can be set to `nil`, and the method will return after all the writes
-have succeeded. However, if this callback is provided, the `Txn.Commit()`
-method returns as soon as it has checked for any conflicts. The actual writing
-to the disk happens asynchronously, and the callback is invoked once the
-writing has finished, or an error has occurred. This can improve the throughput
-of the application in some cases. But it also means that a transaction is not
-durable until the callback has been invoked with a `nil` error value.
-
-### Using key/value pairs
-To save a key/value pair, use the `Txn.Set()` method:
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- err := txn.Set([]byte("answer"), []byte("42"))
- return err
-})
-```
-
-Key/Value pair can also be saved by first creating `Entry`, then setting this
-`Entry` using `Txn.SetEntry()`. `Entry` also exposes methods to set properties
-on it.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := badger.NewEntry([]byte("answer"), []byte("42"))
- err := txn.SetEntry(e)
- return err
-})
-```
-
-This will set the value of the `"answer"` key to `"42"`. To retrieve this
-value, we can use the `Txn.Get()` method:
-
-```go
-err := db.View(func(txn *badger.Txn) error {
- item, err := txn.Get([]byte("answer"))
- handle(err)
-
- var valNot, valCopy []byte
- err := item.Value(func(val []byte) error {
- // This func with val would only be called if item.Value encounters no error.
-
- // Accessing val here is valid.
- fmt.Printf("The answer is: %s\n", val)
-
- // Copying or parsing val is valid.
- valCopy = append([]byte{}, val...)
-
- // Assigning val slice to another variable is NOT OK.
- valNot = val // Do not do this.
- return nil
- })
- handle(err)
-
- // DO NOT access val here. It is the most common cause of bugs.
- fmt.Printf("NEVER do this. %s\n", valNot)
-
- // You must copy it to use it outside item.Value(...).
- fmt.Printf("The answer is: %s\n", valCopy)
-
- // Alternatively, you could also use item.ValueCopy().
- valCopy, err = item.ValueCopy(nil)
- handle(err)
- fmt.Printf("The answer is: %s\n", valCopy)
-
- return nil
-})
-```
-
-`Txn.Get()` returns `ErrKeyNotFound` if the value is not found.
-
-Please note that values returned from `Get()` are only valid while the
-transaction is open. If you need to use a value outside of the transaction
-then you must use `copy()` to copy it to another byte slice.
-
-Use the `Txn.Delete()` method to delete a key.
-
-### Monotonically increasing integers
-
-To get unique monotonically increasing integers with strong durability, you can
-use the `DB.GetSequence` method. This method returns a `Sequence` object, which
-is thread-safe and can be used concurrently via various goroutines.
-
-Badger would lease a range of integers to hand out from memory, with the
-bandwidth provided to `DB.GetSequence`. The frequency at which disk writes are
-done is determined by this lease bandwidth and the frequency of `Next`
-invocations. Setting a bandwidth too low would do more disk writes, setting it
-too high would result in wasted integers if Badger is closed or crashes.
-To avoid wasted integers, call `Release` before closing Badger.
-
-```go
-seq, err := db.GetSequence(key, 1000)
-defer seq.Release()
-for {
- num, err := seq.Next()
-}
-```
-
-### Merge Operations
-Badger provides support for ordered merge operations. You can define a func
-of type `MergeFunc` which takes in an existing value, and a value to be
-_merged_ with it. It returns a new value which is the result of the _merge_
-operation. All values are specified in byte arrays. For e.g., here is a merge
-function (`add`) which appends a `[]byte` value to an existing `[]byte` value.
-
-```Go
-// Merge function to append one byte slice to another
-func add(originalValue, newValue []byte) []byte {
- return append(originalValue, newValue...)
-}
-```
-
-This function can then be passed to the `DB.GetMergeOperator()` method, along
-with a key, and a duration value. The duration specifies how often the merge
-function is run on values that have been added using the `MergeOperator.Add()`
-method.
-
-`MergeOperator.Get()` method can be used to retrieve the cumulative value of the key
-associated with the merge operation.
-
-```Go
-key := []byte("merge")
-
-m := db.GetMergeOperator(key, add, 200*time.Millisecond)
-defer m.Stop()
-
-m.Add([]byte("A"))
-m.Add([]byte("B"))
-m.Add([]byte("C"))
-
-res, _ := m.Get() // res should have value ABC encoded
-```
-
-Example: Merge operator which increments a counter
-
-```Go
-func uint64ToBytes(i uint64) []byte {
- var buf [8]byte
- binary.BigEndian.PutUint64(buf[:], i)
- return buf[:]
-}
-
-func bytesToUint64(b []byte) uint64 {
- return binary.BigEndian.Uint64(b)
-}
-
-// Merge function to add two uint64 numbers
-func add(existing, new []byte) []byte {
- return uint64ToBytes(bytesToUint64(existing) + bytesToUint64(new))
-}
-```
-It can be used as
-```Go
-key := []byte("merge")
-
-m := db.GetMergeOperator(key, add, 200*time.Millisecond)
-defer m.Stop()
-
-m.Add(uint64ToBytes(1))
-m.Add(uint64ToBytes(2))
-m.Add(uint64ToBytes(3))
-
-res, _ := m.Get() // res should have value 6 encoded
-```
-
-### Setting Time To Live(TTL) and User Metadata on Keys
-Badger allows setting an optional Time to Live (TTL) value on keys. Once the TTL has
-elapsed, the key will no longer be retrievable and will be eligible for garbage
-collection. A TTL can be set as a `time.Duration` value using the `Entry.WithTTL()`
-and `Txn.SetEntry()` API methods.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := badger.NewEntry([]byte("answer"), []byte("42")).WithTTL(time.Hour)
- err := txn.SetEntry(e)
- return err
-})
-```
-
-An optional user metadata value can be set on each key. A user metadata value
-is represented by a single byte. It can be used to set certain bits along
-with the key to aid in interpreting or decoding the key-value pair. User
-metadata can be set using `Entry.WithMeta()` and `Txn.SetEntry()` API methods.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := badger.NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1))
- err := txn.SetEntry(e)
- return err
-})
-```
-
-`Entry` APIs can be used to add the user metadata and TTL for same key. This `Entry`
-then can be set using `Txn.SetEntry()`.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := badger.NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1)).WithTTL(time.Hour)
- err := txn.SetEntry(e)
- return err
-})
-```
-
-### Iterating over keys
-To iterate over keys, we can use an `Iterator`, which can be obtained using the
-`Txn.NewIterator()` method. Iteration happens in byte-wise lexicographical sorting
-order.
-
-
-```go
-err := db.View(func(txn *badger.Txn) error {
- opts := badger.DefaultIteratorOptions
- opts.PrefetchSize = 10
- it := txn.NewIterator(opts)
- defer it.Close()
- for it.Rewind(); it.Valid(); it.Next() {
- item := it.Item()
- k := item.Key()
- err := item.Value(func(v []byte) error {
- fmt.Printf("key=%s, value=%s\n", k, v)
- return nil
- })
- if err != nil {
- return err
- }
- }
- return nil
-})
-```
-
-The iterator allows you to move to a specific point in the list of keys and move
-forward or backward through the keys one at a time.
-
-By default, Badger prefetches the values of the next 100 items. You can adjust
-that with the `IteratorOptions.PrefetchSize` field. However, setting it to
-a value higher than `GOMAXPROCS` (which we recommend to be 128 or higher)
-shouldn’t give any additional benefits. You can also turn off the fetching of
-values altogether. See section below on key-only iteration.
-
-#### Prefix scans
-To iterate over a key prefix, you can combine `Seek()` and `ValidForPrefix()`:
-
-```go
-db.View(func(txn *badger.Txn) error {
- it := txn.NewIterator(badger.DefaultIteratorOptions)
- defer it.Close()
- prefix := []byte("1234")
- for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
- item := it.Item()
- k := item.Key()
- err := item.Value(func(v []byte) error {
- fmt.Printf("key=%s, value=%s\n", k, v)
- return nil
- })
- if err != nil {
- return err
- }
- }
- return nil
-})
-```
-
-#### Key-only iteration
-Badger supports a unique mode of iteration called _key-only_ iteration. It is
-several order of magnitudes faster than regular iteration, because it involves
-access to the LSM-tree only, which is usually resident entirely in RAM. To
-enable key-only iteration, you need to set the `IteratorOptions.PrefetchValues`
-field to `false`. This can also be used to do sparse reads for selected keys
-during an iteration, by calling `item.Value()` only when required.
-
-```go
-err := db.View(func(txn *badger.Txn) error {
- opts := badger.DefaultIteratorOptions
- opts.PrefetchValues = false
- it := txn.NewIterator(opts)
- defer it.Close()
- for it.Rewind(); it.Valid(); it.Next() {
- item := it.Item()
- k := item.Key()
- fmt.Printf("key=%s\n", k)
- }
- return nil
-})
-```
-
-### Stream
-Badger provides a Stream framework, which concurrently iterates over all or a
-portion of the DB, converting data into custom key-values, and streams it out
-serially to be sent over network, written to disk, or even written back to
-Badger. This is a lot faster way to iterate over Badger than using a single
-Iterator. Stream supports Badger in both managed and normal mode.
-
-Stream uses the natural boundaries created by SSTables within the LSM tree, to
-quickly generate key ranges. Each goroutine then picks a range and runs an
-iterator to iterate over it. Each iterator iterates over all versions of values
-and is created from the same transaction, thus working over a snapshot of the
-DB. Every time a new key is encountered, it calls `ChooseKey(item)`, followed
-by `KeyToList(key, itr)`. This allows a user to select or reject that key, and
-if selected, convert the value versions into custom key-values. The goroutine
-batches up 4MB worth of key-values, before sending it over to a channel.
-Another goroutine further batches up data from this channel using *smart
-batching* algorithm and calls `Send` serially.
-
-This framework is designed for high throughput key-value iteration, spreading
-the work of iteration across many goroutines. `DB.Backup` uses this framework to
-provide full and incremental backups quickly. Dgraph is a heavy user of this
-framework. In fact, this framework was developed and used within Dgraph, before
-getting ported over to Badger.
-
-```go
-stream := db.NewStream()
-// db.NewStreamAt(readTs) for managed mode.
-
-// -- Optional settings
-stream.NumGo = 16 // Set number of goroutines to use for iteration.
-stream.Prefix = []byte("some-prefix") // Leave nil for iteration over the whole DB.
-stream.LogPrefix = "Badger.Streaming" // For identifying stream logs. Outputs to Logger.
-
-// ChooseKey is called concurrently for every key. If left nil, assumes true by default.
-stream.ChooseKey = func(item *badger.Item) bool {
- return bytes.HasSuffix(item.Key(), []byte("er"))
-}
-
-// KeyToList is called concurrently for chosen keys. This can be used to convert
-// Badger data into custom key-values. If nil, uses stream.ToList, a default
-// implementation, which picks all valid key-values.
-stream.KeyToList = nil
-
-// -- End of optional settings.
-
-// Send is called serially, while Stream.Orchestrate is running.
-stream.Send = func(list *pb.KVList) error {
- return proto.MarshalText(w, list) // Write to w.
-}
-
-// Run the stream
-if err := stream.Orchestrate(context.Background()); err != nil {
- return err
-}
-// Done.
-```
-
-### Garbage Collection
-Badger values need to be garbage collected, because of two reasons:
-
-* Badger keeps values separately from the LSM tree. This means that the compaction operations
-that clean up the LSM tree do not touch the values at all. Values need to be cleaned up
-separately.
-
-* Concurrent read/write transactions could leave behind multiple values for a single key, because they
-are stored with different versions. These could accumulate, and take up unneeded space beyond the
-time these older versions are needed.
-
-Badger relies on the client to perform garbage collection at a time of their choosing. It provides
-the following method, which can be invoked at an appropriate time:
-
-* `DB.RunValueLogGC()`: This method is designed to do garbage collection while
- Badger is online. Along with randomly picking a file, it uses statistics generated by the
- LSM-tree compactions to pick files that are likely to lead to maximum space
- reclamation. It is recommended to be called during periods of low activity in
- your system, or periodically. One call would only result in removal of at max
- one log file. As an optimization, you could also immediately re-run it whenever
- it returns nil error (indicating a successful value log GC), as shown below.
-
- ```go
- ticker := time.NewTicker(5 * time.Minute)
- defer ticker.Stop()
- for range ticker.C {
- again:
- err := db.RunValueLogGC(0.7)
- if err == nil {
- goto again
- }
- }
- ```
-
-* `DB.PurgeOlderVersions()`: This method is **DEPRECATED** since v1.5.0. Now, Badger's LSM tree automatically discards older/invalid versions of keys.
-
-**Note: The RunValueLogGC method would not garbage collect the latest value log.**
-
-### Database backup
-There are two public API methods `DB.Backup()` and `DB.Load()` which can be
-used to do online backups and restores. Badger v0.9 provides a CLI tool
-`badger`, which can do offline backup/restore. Make sure you have `$GOPATH/bin`
-in your PATH to use this tool.
-
-The command below will create a version-agnostic backup of the database, to a
-file `badger.bak` in the current working directory
-
-```
-badger backup --dir
-```
-
-To restore `badger.bak` in the current working directory to a new database:
-
-```
-badger restore --dir
-```
-
-See `badger --help` for more details.
-
-If you have a Badger database that was created using v0.8 (or below), you can
-use the `badger_backup` tool provided in v0.8.1, and then restore it using the
-command above to upgrade your database to work with the latest version.
-
-```
-badger_backup --dir --backup-file badger.bak
-```
-
-We recommend all users to use the `Backup` and `Restore` APIs and tools. However,
-Badger is also rsync-friendly because all files are immutable, barring the
-latest value log which is append-only. So, rsync can be used as rudimentary way
-to perform a backup. In the following script, we repeat rsync to ensure that the
-LSM tree remains consistent with the MANIFEST file while doing a full backup.
-
-```
-#!/bin/bash
-set -o history
-set -o histexpand
-# Makes a complete copy of a Badger database directory.
-# Repeat rsync if the MANIFEST and SSTables are updated.
-rsync -avz --delete db/ dst
-while !! | grep -q "(MANIFEST\|\.sst)$"; do :; done
-```
-
-### Memory usage
-Badger's memory usage can be managed by tweaking several options available in
-the `Options` struct that is passed in when opening the database using
-`DB.Open`.
-
-- `Options.ValueLogLoadingMode` can be set to `options.FileIO` (instead of the
- default `options.MemoryMap`) to avoid memory-mapping log files. This can be
- useful in environments with low RAM.
-- Number of memtables (`Options.NumMemtables`)
- - If you modify `Options.NumMemtables`, also adjust `Options.NumLevelZeroTables` and
- `Options.NumLevelZeroTablesStall` accordingly.
-- Number of concurrent compactions (`Options.NumCompactors`)
-- Mode in which LSM tree is loaded (`Options.TableLoadingMode`)
-- Size of table (`Options.MaxTableSize`)
-- Size of value log file (`Options.ValueLogFileSize`)
-
-If you want to decrease the memory usage of Badger instance, tweak these
-options (ideally one at a time) until you achieve the desired
-memory usage.
-
-### Statistics
-Badger records metrics using the [expvar] package, which is included in the Go
-standard library. All the metrics are documented in [y/metrics.go][metrics]
-file.
-
-`expvar` package adds a handler in to the default HTTP server (which has to be
-started explicitly), and serves up the metrics at the `/debug/vars` endpoint.
-These metrics can then be collected by a system like [Prometheus], to get
-better visibility into what Badger is doing.
-
-[expvar]: https://golang.org/pkg/expvar/
-[metrics]: https://github.com/dgraph-io/badger/blob/master/y/metrics.go
-[Prometheus]: https://prometheus.io/
-
-## Resources
-
-### Blog Posts
-1. [Introducing Badger: A fast key-value store written natively in
-Go](https://open.dgraph.io/post/badger/)
-2. [Make Badger crash resilient with ALICE](https://blog.dgraph.io/post/alice/)
-3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://blog.dgraph.io/post/badger-lmdb-boltdb/)
-4. [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/)
-
-## Design
-Badger was written with these design goals in mind:
-
-- Write a key-value database in pure Go.
-- Use latest research to build the fastest KV database for data sets spanning terabytes.
-- Optimize for SSDs.
-
-Badger’s design is based on a paper titled _[WiscKey: Separating Keys from
-Values in SSD-conscious Storage][wisckey]_.
-
-[wisckey]: https://www.usenix.org/system/files/conference/fast16/fast16-papers-lu.pdf
-
-### Comparisons
-| Feature | Badger | RocksDB | BoltDB |
-| ------- | ------ | ------- | ------ |
-| Design | LSM tree with value log | LSM tree only | B+ tree |
-| High Read throughput | Yes | No | Yes |
-| High Write throughput | Yes | Yes | No |
-| Designed for SSDs | Yes (with latest research 1) | Not specifically 2 | No |
-| Embeddable | Yes | Yes | Yes |
-| Sorted KV access | Yes | Yes | Yes |
-| Pure Go (no Cgo) | Yes | No | Yes |
-| Transactions | Yes, ACID, concurrent with SSI3 | Yes (but non-ACID) | Yes, ACID |
-| Snapshots | Yes | Yes | Yes |
-| TTL support | Yes | Yes | No |
-| 3D access (key-value-version) | Yes4 | No | No |
-
-1 The [WISCKEY paper][wisckey] (on which Badger is based) saw big
-wins with separating values from keys, significantly reducing the write
-amplification compared to a typical LSM tree.
-
-2 RocksDB is an SSD optimized version of LevelDB, which was designed specifically for rotating disks.
-As such RocksDB's design isn't aimed at SSDs.
-
-3 SSI: Serializable Snapshot Isolation. For more details, see the blog post [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/)
-
-4 Badger provides direct access to value versions via its Iterator API.
-Users can also specify how many versions to keep per key via Options.
-
-### Benchmarks
-We have run comprehensive benchmarks against RocksDB, Bolt and LMDB. The
-benchmarking code, and the detailed logs for the benchmarks can be found in the
-[badger-bench] repo. More explanation, including graphs can be found the blog posts (linked
-above).
-
-[badger-bench]: https://github.com/dgraph-io/badger-bench
-
-## Projects Using Badger
-Below is a list of known projects that use Badger:
-
-* [Dgraph](https://github.com/dgraph-io/dgraph) - Distributed graph database.
-* [Jaeger](https://github.com/jaegertracing/jaeger) - Distributed tracing platform.
-* [go-ipfs](https://github.com/ipfs/go-ipfs) - Go client for the InterPlanetary File System (IPFS), a new hypermedia distribution protocol.
-* [Riot](https://github.com/go-ego/riot) - An open-source, distributed search engine.
-* [emitter](https://github.com/emitter-io/emitter) - Scalable, low latency, distributed pub/sub broker with message storage, uses MQTT, gossip and badger.
-* [OctoSQL](https://github.com/cube2222/octosql) - Query tool that allows you to join, analyse and transform data from multiple databases using SQL.
-* [Dkron](https://dkron.io/) - Distributed, fault tolerant job scheduling system.
-* [Sandglass](https://github.com/celrenheit/sandglass) - distributed, horizontally scalable, persistent, time sorted message queue.
-* [TalariaDB](https://github.com/grab/talaria) - Grab's Distributed, low latency time-series database.
-* [Sloop](https://github.com/salesforce/sloop) - Salesforce's Kubernetes History Visualization Project.
-* [Immudb](https://github.com/codenotary/immudb) - Lightweight, high-speed immutable database for systems and applications.
-* [Usenet Express](https://usenetexpress.com/) - Serving over 300TB of data with Badger.
-* [gorush](https://github.com/appleboy/gorush) - A push notification server written in Go.
-* [0-stor](https://github.com/zero-os/0-stor) - Single device object store.
-* [Dispatch Protocol](https://github.com/dispatchlabs/disgo) - Blockchain protocol for distributed application data analytics.
-* [GarageMQ](https://github.com/valinurovam/garagemq) - AMQP server written in Go.
-* [RedixDB](https://alash3al.github.io/redix/) - A real-time persistent key-value store with the same redis protocol.
-* [BBVA](https://github.com/BBVA/raft-badger) - Raft backend implementation using BadgerDB for Hashicorp raft.
-* [Fantom](https://github.com/Fantom-foundation/go-lachesis) - aBFT Consensus platform for distributed applications.
-* [decred](https://github.com/decred/dcrdata) - An open, progressive, and self-funding cryptocurrency with a system of community-based governance integrated into its blockchain.
-* [OpenNetSys](https://github.com/opennetsys/c3-go) - Create useful dApps in any software language.
-* [HoneyTrap](https://github.com/honeytrap/honeytrap) - An extensible and opensource system for running, monitoring and managing honeypots.
-* [Insolar](https://github.com/insolar/insolar) - Enterprise-ready blockchain platform.
-* [IoTeX](https://github.com/iotexproject/iotex-core) - The next generation of the decentralized network for IoT powered by scalability- and privacy-centric blockchains.
-* [go-sessions](https://github.com/kataras/go-sessions) - The sessions manager for Go net/http and fasthttp.
-* [Babble](https://github.com/mosaicnetworks/babble) - BFT Consensus platform for distributed applications.
-* [Tormenta](https://github.com/jpincas/tormenta) - Embedded object-persistence layer / simple JSON database for Go projects.
-* [BadgerHold](https://github.com/timshannon/badgerhold) - An embeddable NoSQL store for querying Go types built on Badger
-* [Goblero](https://github.com/didil/goblero) - Pure Go embedded persistent job queue backed by BadgerDB
-* [Surfline](https://www.surfline.com) - Serving global wave and weather forecast data with Badger.
-* [Cete](https://github.com/mosuka/cete) - Simple and highly available distributed key-value store built on Badger. Makes it easy bringing up a cluster of Badger with Raft consensus algorithm by hashicorp/raft.
-* [Volument](https://volument.com/) - A new take on website analytics backed by Badger.
-* [KVdb](https://kvdb.io/) - Hosted key-value store and serverless platform built on top of Badger.
-
-If you are using Badger in a project please send a pull request to add it to the list.
-
-## Contributing
-
-If you're interested in contributing to Badger see [CONTRIBUTING.md](./CONTRIBUTING.md).
-
-## Frequently Asked Questions
-### My writes are getting stuck. Why?
-
-**Update: With the new `Value(func(v []byte))` API, this deadlock can no longer
-happen.**
-
-The following is true for users on Badger v1.x.
-
-This can happen if a long running iteration with `Prefetch` is set to false, but
-a `Item::Value` call is made internally in the loop. That causes Badger to
-acquire read locks over the value log files to avoid value log GC removing the
-file from underneath. As a side effect, this also blocks a new value log GC
-file from being created, when the value log file boundary is hit.
-
-Please see Github issues [#293](https://github.com/dgraph-io/badger/issues/293)
-and [#315](https://github.com/dgraph-io/badger/issues/315).
-
-There are multiple workarounds during iteration:
-
-1. Use `Item::ValueCopy` instead of `Item::Value` when retrieving value.
-1. Set `Prefetch` to true. Badger would then copy over the value and release the
- file lock immediately.
-1. When `Prefetch` is false, don't call `Item::Value` and do a pure key-only
- iteration. This might be useful if you just want to delete a lot of keys.
-1. Do the writes in a separate transaction after the reads.
-
-### My writes are really slow. Why?
-
-Are you creating a new transaction for every single key update, and waiting for
-it to `Commit` fully before creating a new one? This will lead to very low
-throughput.
-
-We have created `WriteBatch` API which provides a way to batch up
-many updates into a single transaction and `Commit` that transaction using
-callbacks to avoid blocking. This amortizes the cost of a transaction really
-well, and provides the most efficient way to do bulk writes.
-
-```go
-wb := db.NewWriteBatch()
-defer wb.Cancel()
-
-for i := 0; i < N; i++ {
- err := wb.Set(key(i), value(i), 0) // Will create txns as needed.
- handle(err)
-}
-handle(wb.Flush()) // Wait for all txns to finish.
-```
-
-Note that `WriteBatch` API does not allow any reads. For read-modify-write
-workloads, you should be using the `Transaction` API.
-
-### I don't see any disk writes. Why?
-
-If you're using Badger with `SyncWrites=false`, then your writes might not be written to value log
-and won't get synced to disk immediately. Writes to LSM tree are done inmemory first, before they
-get compacted to disk. The compaction would only happen once `MaxTableSize` has been reached. So, if
-you're doing a few writes and then checking, you might not see anything on disk. Once you `Close`
-the database, you'll see these writes on disk.
-
-### Reverse iteration doesn't give me the right results.
-
-Just like forward iteration goes to the first key which is equal or greater than the SEEK key, reverse iteration goes to the first key which is equal or lesser than the SEEK key. Therefore, SEEK key would not be part of the results. You can typically add a `0xff` byte as a suffix to the SEEK key to include it in the results. See the following issues: [#436](https://github.com/dgraph-io/badger/issues/436) and [#347](https://github.com/dgraph-io/badger/issues/347).
-
-### Which instances should I use for Badger?
-
-We recommend using instances which provide local SSD storage, without any limit
-on the maximum IOPS. In AWS, these are storage optimized instances like i3. They
-provide local SSDs which clock 100K IOPS over 4KB blocks easily.
-
-### I'm getting a closed channel error. Why?
-
-```
-panic: close of closed channel
-panic: send on closed channel
-```
-
-If you're seeing panics like above, this would be because you're operating on a closed DB. This can happen, if you call `Close()` before sending a write, or multiple times. You should ensure that you only call `Close()` once, and all your read/write operations finish before closing.
-
-### Are there any Go specific settings that I should use?
-
-We *highly* recommend setting a high number for `GOMAXPROCS`, which allows Go to
-observe the full IOPS throughput provided by modern SSDs. In Dgraph, we have set
-it to 128. For more details, [see this
-thread](https://groups.google.com/d/topic/golang-nuts/jPb_h3TvlKE/discussion).
-
-### Are there any Linux specific settings that I should use?
-
-We recommend setting `max file descriptors` to a high number depending upon the expected size of
-your data. On Linux and Mac, you can check the file descriptor limit with `ulimit -n -H` for the
-hard limit and `ulimit -n -S` for the soft limit. A soft limit of `65535` is a good lower bound.
-You can adjust the limit as needed.
-
-### I see "manifest has unsupported version: X (we support Y)" error.
-
-This error means you have a badger directory which was created by an older version of badger and
-you're trying to open in a newer version of badger. The underlying data format can change across
-badger versions and users will have to migrate their data directory.
-Badger data can be migrated from version X of badger to version Y of badger by following the steps
-listed below.
-Assume you were on badger v1.6.0 and you wish to migrate to v2.0.0 version.
-1. Install badger version v1.6.0
- - `cd $GOPATH/src/github.com/dgraph-io/badger`
- - `git checkout v1.6.0`
- - `cd badger && go install`
-
- This should install the old badger binary in your $GOBIN.
-2. Create Backup
- - `badger backup --dir path/to/badger/directory -f badger.backup`
-3. Install badger version v2.0.0
- - `cd $GOPATH/src/github.com/dgraph-io/badger`
- - `git checkout v2.0.0`
- - `cd badger && go install`
-
- This should install new badger binary in your $GOBIN
-4. Install badger version v2.0.0
- - `badger restore --dir path/to/new/badger/directory -f badger.backup`
-
- This will create a new directory on `path/to/new/badger/directory` and add badger data in
- newer format to it.
-
-NOTE - The above steps shouldn't cause any data loss but please ensure the new data is valid before
-deleting the old badger directory.
-
-### Why do I need gcc to build badger? Does badger need CGO?
-
-Badger does not directly use CGO but it relies on https://github.com/DataDog/zstd library for
-zstd compression and the library requires `gcc/cgo`. You can build badger without cgo by running
-`CGO_ENABLED=0 go build`. This will build badger without the support for ZSTD compression algorithm.
-
-## Contact
-- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions.
-- Please use [Github issue tracker](https://github.com/dgraph-io/badger/issues) for filing bugs or feature requests.
-- Join [![Slack Status](http://slack.dgraph.io/badge.svg)](http://slack.dgraph.io).
-- Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs).
-
diff --git a/vendor/github.com/dgraph-io/badger/v2/VERSIONING.md b/vendor/github.com/dgraph-io/badger/v2/VERSIONING.md
deleted file mode 100644
index a890a36f..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/VERSIONING.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# Serialization Versioning: Semantic Versioning for databases
-
-Semantic Versioning, commonly known as SemVer, is a great idea that has been very widely adopted as
-a way to decide how to name software versions. The whole concept is very well summarized on
-semver.org with the following lines:
-
-> Given a version number MAJOR.MINOR.PATCH, increment the:
->
-> 1. MAJOR version when you make incompatible API changes,
-> 2. MINOR version when you add functionality in a backwards-compatible manner, and
-> 3. PATCH version when you make backwards-compatible bug fixes.
->
-> Additional labels for pre-release and build metadata are available as extensions to the
-> MAJOR.MINOR.PATCH format.
-
-Unfortunately, API changes are not the most important changes for libraries that serialize data for
-later consumption. For these libraries, such as BadgerDB, changes to the API are much easier to
-handle than change to the data format used to store data on disk.
-
-## Serialization Version specification
-
-Serialization Versioning, like Semantic Versioning, uses 3 numbers and also calls them
-MAJOR.MINOR.PATCH, but the semantics of the numbers are slightly modified:
-
-Given a version number MAJOR.MINOR.PATCH, increment the:
-
-- MAJOR version when you make changes that require a transformation of the dataset before it can be
-used again.
-- MINOR version when old datasets are still readable but the API might have changed in
-backwards-compatible or incompatible ways.
-- PATCH version when you make backwards-compatible bug fixes.
-
-Additional labels for pre-release and build metadata are available as extensions to the
-MAJOR.MINOR.PATCH format.
-
-Following this naming strategy, migration from v1.x to v2.x requires a migration strategy for your
-existing dataset, and as such has to be carefully planned. Migrations in between different minor
-versions (e.g. v1.5.x and v1.6.x) might break your build, as the API *might* have changed, but once
-your code compiles there's no need for any data migration. Lastly, changes in between two different
-patch versions should never break your build or dataset.
-
-For more background on our decision to adopt Serialization Versioning, read the blog post
-[Semantic Versioning, Go Modules, and Databases][blog] and the original proposal on
-[this comment on Dgraph's Discuss forum][discuss].
-
-[blog]: https://blog.dgraph.io/post/serialization-versioning/
-[discuss]: https://discuss.dgraph.io/t/go-modules-on-badger-and-dgraph/4662/7
\ No newline at end of file
diff --git a/vendor/github.com/dgraph-io/badger/v2/appveyor.yml b/vendor/github.com/dgraph-io/badger/v2/appveyor.yml
deleted file mode 100644
index ac3a9505..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/appveyor.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-# version format
-version: "{build}"
-
-# Operating system (build VM template)
-os: Windows Server 2012 R2
-
-# Platform.
-platform: x64
-
-clone_folder: c:\gopath\src\github.com\dgraph-io\badger
-
-# Environment variables
-environment:
- GOVERSION: 1.12
- GOPATH: c:\gopath
- GO111MODULE: on
-
-# scripts that run after cloning repository
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;c:\msys64\mingw64\bin;%PATH%
- - go version
- - go env
- - python --version
- - gcc --version
-
-# To run your custom scripts instead of automatic MSBuild
-build_script:
- # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648
- - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)'
- - cd c:\gopath\src\github.com\dgraph-io\badger
- - git branch
- - go get -t ./...
-
-# To run your custom scripts instead of automatic tests
-test_script:
- # Unit tests
- - ps: Add-AppveyorTest "Unit Tests" -Outcome Running
- - go test -v github.com/dgraph-io/badger/...
- - go test -v -vlog_mmap=false github.com/dgraph-io/badger/...
- - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
-
-notifications:
- - provider: Email
- to:
- - pawan@dgraph.io
- on_build_failure: true
- on_build_status_changed: true
-# to disable deployment
-deploy: off
-
diff --git a/vendor/github.com/dgraph-io/badger/v2/backup.go b/vendor/github.com/dgraph-io/badger/v2/backup.go
deleted file mode 100644
index 3c1b7592..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/backup.go
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bufio"
- "bytes"
- "context"
- "encoding/binary"
- "io"
-
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/y"
- "github.com/golang/protobuf/proto"
-)
-
-// flushThreshold determines when a buffer will be flushed. When performing a
-// backup/restore, the entries will be batched up until the total size of batch
-// is more than flushThreshold or entry size (without the value size) is more
-// than the maxBatchSize.
-const flushThreshold = 100 << 20
-
-// Backup is a wrapper function over Stream.Backup to generate full and incremental backups of the
-// DB. For more control over how many goroutines are used to generate the backup, or if you wish to
-// backup only a certain range of keys, use Stream.Backup directly.
-func (db *DB) Backup(w io.Writer, since uint64) (uint64, error) {
- stream := db.NewStream()
- stream.LogPrefix = "DB.Backup"
- return stream.Backup(w, since)
-}
-
-// Backup dumps a protobuf-encoded list of all entries in the database into the
-// given writer, that are newer than the specified version. It returns a
-// timestamp indicating when the entries were dumped which can be passed into a
-// later invocation to generate an incremental dump, of entries that have been
-// added/modified since the last invocation of Stream.Backup().
-//
-// This can be used to backup the data in a database at a given point in time.
-func (stream *Stream) Backup(w io.Writer, since uint64) (uint64, error) {
- stream.KeyToList = func(key []byte, itr *Iterator) (*pb.KVList, error) {
- list := &pb.KVList{}
- for ; itr.Valid(); itr.Next() {
- item := itr.Item()
- if !bytes.Equal(item.Key(), key) {
- return list, nil
- }
- if item.Version() < since {
- // Ignore versions less than given timestamp, or skip older
- // versions of the given key.
- return list, nil
- }
-
- var valCopy []byte
- if !item.IsDeletedOrExpired() {
- // No need to copy value, if item is deleted or expired.
- var err error
- valCopy, err = item.ValueCopy(nil)
- if err != nil {
- stream.db.opt.Errorf("Key [%x, %d]. Error while fetching value [%v]\n",
- item.Key(), item.Version(), err)
- return nil, err
- }
- }
-
- // clear txn bits
- meta := item.meta &^ (bitTxn | bitFinTxn)
- kv := &pb.KV{
- Key: item.KeyCopy(nil),
- Value: valCopy,
- UserMeta: []byte{item.UserMeta()},
- Version: item.Version(),
- ExpiresAt: item.ExpiresAt(),
- Meta: []byte{meta},
- }
- list.Kv = append(list.Kv, kv)
-
- switch {
- case item.DiscardEarlierVersions():
- // If we need to discard earlier versions of this item, add a delete
- // marker just below the current version.
- list.Kv = append(list.Kv, &pb.KV{
- Key: item.KeyCopy(nil),
- Version: item.Version() - 1,
- Meta: []byte{bitDelete},
- })
- return list, nil
-
- case item.IsDeletedOrExpired():
- return list, nil
- }
- }
- return list, nil
- }
-
- var maxVersion uint64
- stream.Send = func(list *pb.KVList) error {
- for _, kv := range list.Kv {
- if maxVersion < kv.Version {
- maxVersion = kv.Version
- }
- }
- return writeTo(list, w)
- }
-
- if err := stream.Orchestrate(context.Background()); err != nil {
- return 0, err
- }
- return maxVersion, nil
-}
-
-func writeTo(list *pb.KVList, w io.Writer) error {
- if err := binary.Write(w, binary.LittleEndian, uint64(proto.Size(list))); err != nil {
- return err
- }
- buf, err := proto.Marshal(list)
- if err != nil {
- return err
- }
- _, err = w.Write(buf)
- return err
-}
-
-// KVLoader is used to write KVList objects in to badger. It can be used to restore a backup.
-type KVLoader struct {
- db *DB
- throttle *y.Throttle
- entries []*Entry
- entriesSize int64
- totalSize int64
-}
-
-// NewKVLoader returns a new instance of KVLoader.
-func (db *DB) NewKVLoader(maxPendingWrites int) *KVLoader {
- return &KVLoader{
- db: db,
- throttle: y.NewThrottle(maxPendingWrites),
- entries: make([]*Entry, 0, db.opt.maxBatchCount),
- }
-}
-
-// Set writes the key-value pair to the database.
-func (l *KVLoader) Set(kv *pb.KV) error {
- var userMeta, meta byte
- if len(kv.UserMeta) > 0 {
- userMeta = kv.UserMeta[0]
- }
- if len(kv.Meta) > 0 {
- meta = kv.Meta[0]
- }
- e := &Entry{
- Key: y.KeyWithTs(kv.Key, kv.Version),
- Value: kv.Value,
- UserMeta: userMeta,
- ExpiresAt: kv.ExpiresAt,
- meta: meta,
- }
- estimatedSize := int64(e.estimateSize(l.db.opt.ValueThreshold))
- // Flush entries if inserting the next entry would overflow the transactional limits.
- if int64(len(l.entries))+1 >= l.db.opt.maxBatchCount ||
- l.entriesSize+estimatedSize >= l.db.opt.maxBatchSize ||
- l.totalSize >= flushThreshold {
- if err := l.send(); err != nil {
- return err
- }
- }
- l.entries = append(l.entries, e)
- l.entriesSize += estimatedSize
- l.totalSize += estimatedSize + int64(len(e.Value))
- return nil
-}
-
-func (l *KVLoader) send() error {
- if err := l.throttle.Do(); err != nil {
- return err
- }
- if err := l.db.batchSetAsync(l.entries, func(err error) {
- l.throttle.Done(err)
- }); err != nil {
- return err
- }
-
- l.entries = make([]*Entry, 0, l.db.opt.maxBatchCount)
- l.entriesSize = 0
- l.totalSize = 0
- return nil
-}
-
-// Finish is meant to be called after all the key-value pairs have been loaded.
-func (l *KVLoader) Finish() error {
- if len(l.entries) > 0 {
- if err := l.send(); err != nil {
- return err
- }
- }
- return l.throttle.Finish()
-}
-
-// Load reads a protobuf-encoded list of all entries from a reader and writes
-// them to the database. This can be used to restore the database from a backup
-// made by calling DB.Backup(). If more complex logic is needed to restore a badger
-// backup, the KVLoader interface should be used instead.
-//
-// DB.Load() should be called on a database that is not running any other
-// concurrent transactions while it is running.
-func (db *DB) Load(r io.Reader, maxPendingWrites int) error {
- br := bufio.NewReaderSize(r, 16<<10)
- unmarshalBuf := make([]byte, 1<<10)
-
- ldr := db.NewKVLoader(maxPendingWrites)
- for {
- var sz uint64
- err := binary.Read(br, binary.LittleEndian, &sz)
- if err == io.EOF {
- break
- } else if err != nil {
- return err
- }
-
- if cap(unmarshalBuf) < int(sz) {
- unmarshalBuf = make([]byte, sz)
- }
-
- if _, err = io.ReadFull(br, unmarshalBuf[:sz]); err != nil {
- return err
- }
-
- list := &pb.KVList{}
- if err := proto.Unmarshal(unmarshalBuf[:sz], list); err != nil {
- return err
- }
-
- for _, kv := range list.Kv {
- if err := ldr.Set(kv); err != nil {
- return err
- }
-
- // Update nextTxnTs, memtable stores this
- // timestamp in badger head when flushed.
- if kv.Version >= db.orc.nextTxnTs {
- db.orc.nextTxnTs = kv.Version + 1
- }
- }
- }
-
- if err := ldr.Finish(); err != nil {
- return err
- }
- db.orc.txnMark.Done(db.orc.nextTxnTs - 1)
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/batch.go b/vendor/github.com/dgraph-io/badger/v2/batch.go
deleted file mode 100644
index ff94e861..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/batch.go
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Copyright 2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "sync"
-
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/y"
- "github.com/pkg/errors"
-)
-
-// WriteBatch holds the necessary info to perform batched writes.
-type WriteBatch struct {
- sync.Mutex
- txn *Txn
- db *DB
- throttle *y.Throttle
- err error
-
- isManaged bool
- commitTs uint64
-}
-
-// NewWriteBatch creates a new WriteBatch. This provides a way to conveniently do a lot of writes,
-// batching them up as tightly as possible in a single transaction and using callbacks to avoid
-// waiting for them to commit, thus achieving good performance. This API hides away the logic of
-// creating and committing transactions. Due to the nature of SSI guaratees provided by Badger,
-// blind writes can never encounter transaction conflicts (ErrConflict).
-func (db *DB) NewWriteBatch() *WriteBatch {
- if db.opt.managedTxns {
- panic("cannot use NewWriteBatch in managed mode. Use NewWriteBatchAt instead")
- }
- return db.newWriteBatch(false)
-}
-
-func (db *DB) newWriteBatch(isManaged bool) *WriteBatch {
- return &WriteBatch{
- db: db,
- isManaged: isManaged,
- txn: db.newTransaction(true, isManaged),
- throttle: y.NewThrottle(16),
- }
-}
-
-// SetMaxPendingTxns sets a limit on maximum number of pending transactions while writing batches.
-// This function should be called before using WriteBatch. Default value of MaxPendingTxns is
-// 16 to minimise memory usage.
-func (wb *WriteBatch) SetMaxPendingTxns(max int) {
- wb.throttle = y.NewThrottle(max)
-}
-
-// Cancel function must be called if there's a chance that Flush might not get
-// called. If neither Flush or Cancel is called, the transaction oracle would
-// never get a chance to clear out the row commit timestamp map, thus causing an
-// unbounded memory consumption. Typically, you can call Cancel as a defer
-// statement right after NewWriteBatch is called.
-//
-// Note that any committed writes would still go through despite calling Cancel.
-func (wb *WriteBatch) Cancel() {
- if err := wb.throttle.Finish(); err != nil {
- wb.db.opt.Errorf("WatchBatch.Cancel error while finishing: %v", err)
- }
- wb.txn.Discard()
-}
-
-func (wb *WriteBatch) callback(err error) {
- // sync.WaitGroup is thread-safe, so it doesn't need to be run inside wb.Lock.
- defer wb.throttle.Done(err)
- if err == nil {
- return
- }
-
- wb.Lock()
- defer wb.Unlock()
- if wb.err != nil {
- return
- }
- wb.err = err
-}
-
-func (wb *WriteBatch) Write(kvList *pb.KVList) error {
- wb.Lock()
- defer wb.Unlock()
- for _, kv := range kvList.Kv {
- e := Entry{Key: kv.Key, Value: kv.Value}
- if len(kv.UserMeta) > 0 {
- e.UserMeta = kv.UserMeta[0]
- }
- y.AssertTrue(kv.Version != 0)
- e.version = kv.Version
- if err := wb.handleEntry(&e); err != nil {
- return err
- }
- }
- return nil
-}
-
-// SetEntryAt is the equivalent of Txn.SetEntry but it also allows setting version for the entry.
-// SetEntryAt can be used only in managed mode.
-func (wb *WriteBatch) SetEntryAt(e *Entry, ts uint64) error {
- if !wb.db.opt.managedTxns {
- return errors.New("SetEntryAt can only be used in managed mode. Use SetEntry instead")
- }
- e.version = ts
- return wb.SetEntry(e)
-}
-
-// Should be called with lock acquired.
-func (wb *WriteBatch) handleEntry(e *Entry) error {
- if err := wb.txn.SetEntry(e); err != ErrTxnTooBig {
- return err
- }
- // Txn has reached it's zenith. Commit now.
- if cerr := wb.commit(); cerr != nil {
- return cerr
- }
- // This time the error must not be ErrTxnTooBig, otherwise, we make the
- // error permanent.
- if err := wb.txn.SetEntry(e); err != nil {
- wb.err = err
- return err
- }
- return nil
-}
-
-// SetEntry is the equivalent of Txn.SetEntry.
-func (wb *WriteBatch) SetEntry(e *Entry) error {
- wb.Lock()
- defer wb.Unlock()
- return wb.handleEntry(e)
-}
-
-// Set is equivalent of Txn.Set().
-func (wb *WriteBatch) Set(k, v []byte) error {
- e := &Entry{Key: k, Value: v}
- return wb.SetEntry(e)
-}
-
-// DeleteAt is equivalent of Txn.Delete but accepts a delete timestamp.
-func (wb *WriteBatch) DeleteAt(k []byte, ts uint64) error {
- e := Entry{Key: k, meta: bitDelete, version: ts}
- return wb.SetEntry(&e)
-}
-
-// Delete is equivalent of Txn.Delete.
-func (wb *WriteBatch) Delete(k []byte) error {
- wb.Lock()
- defer wb.Unlock()
-
- if err := wb.txn.Delete(k); err != ErrTxnTooBig {
- return err
- }
- if err := wb.commit(); err != nil {
- return err
- }
- if err := wb.txn.Delete(k); err != nil {
- wb.err = err
- return err
- }
- return nil
-}
-
-// Caller to commit must hold a write lock.
-func (wb *WriteBatch) commit() error {
- if wb.err != nil {
- return wb.err
- }
- if err := wb.throttle.Do(); err != nil {
- return err
- }
- wb.txn.CommitWith(wb.callback)
- wb.txn = wb.db.newTransaction(true, wb.isManaged)
- wb.txn.commitTs = wb.commitTs
- return wb.err
-}
-
-// Flush must be called at the end to ensure that any pending writes get committed to Badger. Flush
-// returns any error stored by WriteBatch.
-func (wb *WriteBatch) Flush() error {
- wb.Lock()
- _ = wb.commit()
- wb.txn.Discard()
- wb.Unlock()
-
- if err := wb.throttle.Finish(); err != nil {
- return err
- }
-
- return wb.err
-}
-
-// Error returns any errors encountered so far. No commits would be run once an error is detected.
-func (wb *WriteBatch) Error() error {
- wb.Lock()
- defer wb.Unlock()
- return wb.err
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/compaction.go b/vendor/github.com/dgraph-io/badger/v2/compaction.go
deleted file mode 100644
index 0372b8b7..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/compaction.go
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "fmt"
- "log"
- "math"
- "sync"
-
- "golang.org/x/net/trace"
-
- "github.com/dgraph-io/badger/v2/table"
- "github.com/dgraph-io/badger/v2/y"
-)
-
-type keyRange struct {
- left []byte
- right []byte
- inf bool
-}
-
-var infRange = keyRange{inf: true}
-
-func (r keyRange) String() string {
- return fmt.Sprintf("[left=%x, right=%x, inf=%v]", r.left, r.right, r.inf)
-}
-
-func (r keyRange) equals(dst keyRange) bool {
- return bytes.Equal(r.left, dst.left) &&
- bytes.Equal(r.right, dst.right) &&
- r.inf == dst.inf
-}
-
-func (r keyRange) overlapsWith(dst keyRange) bool {
- if r.inf || dst.inf {
- return true
- }
-
- // If my left is greater than dst right, we have no overlap.
- if y.CompareKeys(r.left, dst.right) > 0 {
- return false
- }
- // If my right is less than dst left, we have no overlap.
- if y.CompareKeys(r.right, dst.left) < 0 {
- return false
- }
- // We have overlap.
- return true
-}
-
-func getKeyRange(tables ...*table.Table) keyRange {
- if len(tables) == 0 {
- return keyRange{}
- }
- smallest := tables[0].Smallest()
- biggest := tables[0].Biggest()
- for i := 1; i < len(tables); i++ {
- if y.CompareKeys(tables[i].Smallest(), smallest) < 0 {
- smallest = tables[i].Smallest()
- }
- if y.CompareKeys(tables[i].Biggest(), biggest) > 0 {
- biggest = tables[i].Biggest()
- }
- }
-
- // We pick all the versions of the smallest and the biggest key. Note that version zero would
- // be the rightmost key, considering versions are default sorted in descending order.
- return keyRange{
- left: y.KeyWithTs(y.ParseKey(smallest), math.MaxUint64),
- right: y.KeyWithTs(y.ParseKey(biggest), 0),
- }
-}
-
-type levelCompactStatus struct {
- ranges []keyRange
- delSize int64
-}
-
-func (lcs *levelCompactStatus) debug() string {
- var b bytes.Buffer
- for _, r := range lcs.ranges {
- b.WriteString(r.String())
- }
- return b.String()
-}
-
-func (lcs *levelCompactStatus) overlapsWith(dst keyRange) bool {
- for _, r := range lcs.ranges {
- if r.overlapsWith(dst) {
- return true
- }
- }
- return false
-}
-
-func (lcs *levelCompactStatus) remove(dst keyRange) bool {
- final := lcs.ranges[:0]
- var found bool
- for _, r := range lcs.ranges {
- if !r.equals(dst) {
- final = append(final, r)
- } else {
- found = true
- }
- }
- lcs.ranges = final
- return found
-}
-
-type compactStatus struct {
- sync.RWMutex
- levels []*levelCompactStatus
-}
-
-func (cs *compactStatus) toLog(tr trace.Trace) {
- cs.RLock()
- defer cs.RUnlock()
-
- tr.LazyPrintf("Compaction status:")
- for i, l := range cs.levels {
- if l.debug() == "" {
- continue
- }
- tr.LazyPrintf("[%d] %s", i, l.debug())
- }
-}
-
-func (cs *compactStatus) overlapsWith(level int, this keyRange) bool {
- cs.RLock()
- defer cs.RUnlock()
-
- thisLevel := cs.levels[level]
- return thisLevel.overlapsWith(this)
-}
-
-func (cs *compactStatus) delSize(l int) int64 {
- cs.RLock()
- defer cs.RUnlock()
- return cs.levels[l].delSize
-}
-
-type thisAndNextLevelRLocked struct{}
-
-// compareAndAdd will check whether we can run this compactDef. That it doesn't overlap with any
-// other running compaction. If it can be run, it would store this run in the compactStatus state.
-func (cs *compactStatus) compareAndAdd(_ thisAndNextLevelRLocked, cd compactDef) bool {
- cs.Lock()
- defer cs.Unlock()
-
- level := cd.thisLevel.level
-
- y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels))
- thisLevel := cs.levels[level]
- nextLevel := cs.levels[level+1]
-
- if thisLevel.overlapsWith(cd.thisRange) {
- return false
- }
- if nextLevel.overlapsWith(cd.nextRange) {
- return false
- }
- // Check whether this level really needs compaction or not. Otherwise, we'll end up
- // running parallel compactions for the same level.
- // Update: We should not be checking size here. Compaction priority already did the size checks.
- // Here we should just be executing the wish of others.
-
- thisLevel.ranges = append(thisLevel.ranges, cd.thisRange)
- nextLevel.ranges = append(nextLevel.ranges, cd.nextRange)
- thisLevel.delSize += cd.thisSize
- return true
-}
-
-func (cs *compactStatus) delete(cd compactDef) {
- cs.Lock()
- defer cs.Unlock()
-
- level := cd.thisLevel.level
- y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels))
-
- thisLevel := cs.levels[level]
- nextLevel := cs.levels[level+1]
-
- thisLevel.delSize -= cd.thisSize
- found := thisLevel.remove(cd.thisRange)
- found = nextLevel.remove(cd.nextRange) && found
-
- if !found {
- this := cd.thisRange
- next := cd.nextRange
- fmt.Printf("Looking for: [%q, %q, %v] in this level.\n", this.left, this.right, this.inf)
- fmt.Printf("This Level:\n%s\n", thisLevel.debug())
- fmt.Println()
- fmt.Printf("Looking for: [%q, %q, %v] in next level.\n", next.left, next.right, next.inf)
- fmt.Printf("Next Level:\n%s\n", nextLevel.debug())
- log.Fatal("keyRange not found")
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/db.go b/vendor/github.com/dgraph-io/badger/v2/db.go
deleted file mode 100644
index cdb1f490..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/db.go
+++ /dev/null
@@ -1,1845 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "expvar"
- "fmt"
- "math"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/v2/options"
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/skl"
- "github.com/dgraph-io/badger/v2/table"
- "github.com/dgraph-io/badger/v2/y"
- "github.com/dgraph-io/ristretto"
- humanize "github.com/dustin/go-humanize"
- "github.com/pkg/errors"
-)
-
-var (
- badgerPrefix = []byte("!badger!") // Prefix for internal keys used by badger.
- head = []byte("!badger!head") // For storing value offset for replay.
- txnKey = []byte("!badger!txn") // For indicating end of entries in txn.
- badgerMove = []byte("!badger!move") // For key-value pairs which got moved during GC.
- lfDiscardStatsKey = []byte("!badger!discard") // For storing lfDiscardStats
-)
-
-type closers struct {
- updateSize *y.Closer
- compactors *y.Closer
- memtable *y.Closer
- writes *y.Closer
- valueGC *y.Closer
- pub *y.Closer
-}
-
-// DB provides the various functions required to interact with Badger.
-// DB is thread-safe.
-type DB struct {
- sync.RWMutex // Guards list of inmemory tables, not individual reads and writes.
-
- dirLockGuard *directoryLockGuard
- // nil if Dir and ValueDir are the same
- valueDirGuard *directoryLockGuard
-
- closers closers
- mt *skl.Skiplist // Our latest (actively written) in-memory table
- imm []*skl.Skiplist // Add here only AFTER pushing to flushChan.
- opt Options
- manifest *manifestFile
- lc *levelsController
- vlog valueLog
- vhead valuePointer // less than or equal to a pointer to the last vlog value put into mt
- writeCh chan *request
- flushChan chan flushTask // For flushing memtables.
- closeOnce sync.Once // For closing DB only once.
-
- // Number of log rotates since the last memtable flush. We will access this field via atomic
- // functions. Since we are not going to use any 64bit atomic functions, there is no need for
- // 64 bit alignment of this struct(see #311).
- logRotates int32
-
- blockWrites int32
- isClosed uint32
-
- orc *oracle
-
- pub *publisher
- registry *KeyRegistry
- blockCache *ristretto.Cache
- indexCache *ristretto.Cache
-}
-
-const (
- kvWriteChCapacity = 1000
-)
-
-func (db *DB) replayFunction() func(Entry, valuePointer) error {
- type txnEntry struct {
- nk []byte
- v y.ValueStruct
- }
-
- var txn []txnEntry
- var lastCommit uint64
-
- toLSM := func(nk []byte, vs y.ValueStruct) {
- for err := db.ensureRoomForWrite(); err != nil; err = db.ensureRoomForWrite() {
- db.opt.Debugf("Replay: Making room for writes")
- time.Sleep(10 * time.Millisecond)
- }
- db.mt.Put(nk, vs)
- }
-
- first := true
- return func(e Entry, vp valuePointer) error { // Function for replaying.
- if first {
- db.opt.Debugf("First key=%q\n", e.Key)
- }
- first = false
- db.orc.Lock()
- if db.orc.nextTxnTs < y.ParseTs(e.Key) {
- db.orc.nextTxnTs = y.ParseTs(e.Key)
- }
- db.orc.Unlock()
-
- nk := make([]byte, len(e.Key))
- copy(nk, e.Key)
- var nv []byte
- meta := e.meta
- if db.shouldWriteValueToLSM(e) {
- nv = make([]byte, len(e.Value))
- copy(nv, e.Value)
- } else {
- nv = vp.Encode()
- meta = meta | bitValuePointer
- }
- // Update vhead. If the crash happens while replay was in progess
- // and the head is not updated, we will end up replaying all the
- // files starting from file zero, again.
- db.updateHead([]valuePointer{vp})
-
- v := y.ValueStruct{
- Value: nv,
- Meta: meta,
- UserMeta: e.UserMeta,
- ExpiresAt: e.ExpiresAt,
- }
-
- switch {
- case e.meta&bitFinTxn > 0:
- txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
- if err != nil {
- return errors.Wrapf(err, "Unable to parse txn fin: %q", e.Value)
- }
- y.AssertTrue(lastCommit == txnTs)
- y.AssertTrue(len(txn) > 0)
- // Got the end of txn. Now we can store them.
- for _, t := range txn {
- toLSM(t.nk, t.v)
- }
- txn = txn[:0]
- lastCommit = 0
-
- case e.meta&bitTxn > 0:
- txnTs := y.ParseTs(nk)
- if lastCommit == 0 {
- lastCommit = txnTs
- }
- if lastCommit != txnTs {
- db.opt.Warningf("Found an incomplete txn at timestamp %d. Discarding it.\n",
- lastCommit)
- txn = txn[:0]
- lastCommit = txnTs
- }
- te := txnEntry{nk: nk, v: v}
- txn = append(txn, te)
-
- default:
- // This entry is from a rewrite or via SetEntryAt(..).
- toLSM(nk, v)
-
- // We shouldn't get this entry in the middle of a transaction.
- y.AssertTrue(lastCommit == 0)
- y.AssertTrue(len(txn) == 0)
- }
- return nil
- }
-}
-
-// Open returns a new DB object.
-func Open(opt Options) (db *DB, err error) {
- // It's okay to have zero compactors which will disable all compactions but
- // we cannot have just one compactor otherwise we will end up with all data
- // one level 2.
- if opt.NumCompactors == 1 {
- return nil, errors.New("Cannot have 1 compactor. Need at least 2")
- }
- if opt.InMemory && (opt.Dir != "" || opt.ValueDir != "") {
- return nil, errors.New("Cannot use badger in Disk-less mode with Dir or ValueDir set")
- }
- opt.maxBatchSize = (15 * opt.MaxTableSize) / 100
- opt.maxBatchCount = opt.maxBatchSize / int64(skl.MaxNodeSize)
-
- // We are limiting opt.ValueThreshold to maxValueThreshold for now.
- if opt.ValueThreshold > maxValueThreshold {
- return nil, errors.Errorf("Invalid ValueThreshold, must be less or equal to %d",
- maxValueThreshold)
- }
-
- // If ValueThreshold is greater than opt.maxBatchSize, we won't be able to push any data using
- // the transaction APIs. Transaction batches entries into batches of size opt.maxBatchSize.
- if int64(opt.ValueThreshold) > opt.maxBatchSize {
- return nil, errors.Errorf("Valuethreshold greater than max batch size of %d. Either "+
- "reduce opt.ValueThreshold or increase opt.MaxTableSize.", opt.maxBatchSize)
- }
- if !(opt.ValueLogFileSize <= 2<<30 && opt.ValueLogFileSize >= 1<<20) {
- return nil, ErrValueLogSize
- }
- if !(opt.ValueLogLoadingMode == options.FileIO ||
- opt.ValueLogLoadingMode == options.MemoryMap) {
- return nil, ErrInvalidLoadingMode
- }
-
- // Keep L0 in memory if either KeepL0InMemory is set or if InMemory is set.
- opt.KeepL0InMemory = opt.KeepL0InMemory || opt.InMemory
-
- // Compact L0 on close if either it is set or if KeepL0InMemory is set. When
- // keepL0InMemory is set we need to compact L0 on close otherwise we might lose data.
- opt.CompactL0OnClose = opt.CompactL0OnClose || opt.KeepL0InMemory
-
- if opt.ReadOnly {
- // Can't truncate if the DB is read only.
- opt.Truncate = false
- // Do not perform compaction in read only mode.
- opt.CompactL0OnClose = false
- }
- var dirLockGuard, valueDirLockGuard *directoryLockGuard
-
- // Create directories and acquire lock on it only if badger is not running in InMemory mode.
- // We don't have any directories/files in InMemory mode so we don't need to acquire
- // any locks on them.
- if !opt.InMemory {
- if err := createDirs(opt); err != nil {
- return nil, err
- }
- if !opt.BypassLockGuard {
- dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly)
- if err != nil {
- return nil, err
- }
- defer func() {
- if dirLockGuard != nil {
- _ = dirLockGuard.release()
- }
- }()
- absDir, err := filepath.Abs(opt.Dir)
- if err != nil {
- return nil, err
- }
- absValueDir, err := filepath.Abs(opt.ValueDir)
- if err != nil {
- return nil, err
- }
- if absValueDir != absDir {
- valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly)
- if err != nil {
- return nil, err
- }
- defer func() {
- if valueDirLockGuard != nil {
- _ = valueDirLockGuard.release()
- }
- }()
- }
- }
- }
-
- manifestFile, manifest, err := openOrCreateManifestFile(opt)
- if err != nil {
- return nil, err
- }
- defer func() {
- if manifestFile != nil {
- _ = manifestFile.close()
- }
- }()
-
- db = &DB{
- imm: make([]*skl.Skiplist, 0, opt.NumMemtables),
- flushChan: make(chan flushTask, opt.NumMemtables),
- writeCh: make(chan *request, kvWriteChCapacity),
- opt: opt,
- manifest: manifestFile,
- dirLockGuard: dirLockGuard,
- valueDirGuard: valueDirLockGuard,
- orc: newOracle(opt),
- pub: newPublisher(),
- }
- // Cleanup all the goroutines started by badger in case of an error.
- defer func() {
- if err != nil {
- db.cleanup()
- db = nil
- }
- }()
-
- if opt.BlockCacheSize > 0 {
- config := ristretto.Config{
- // Use 5% of cache memory for storing counters.
- NumCounters: int64(float64(opt.BlockCacheSize) * 0.05 * 2),
- MaxCost: int64(float64(opt.BlockCacheSize) * 0.95),
- BufferItems: 64,
- Metrics: true,
- }
- db.blockCache, err = ristretto.NewCache(&config)
- if err != nil {
- return nil, errors.Wrap(err, "failed to create data cache")
- }
- }
-
- if opt.IndexCacheSize > 0 {
- config := ristretto.Config{
- // Use 5% of cache memory for storing counters.
- NumCounters: int64(float64(opt.IndexCacheSize) * 0.05 * 2),
- MaxCost: int64(float64(opt.IndexCacheSize) * 0.95),
- BufferItems: 64,
- Metrics: true,
- }
- db.indexCache, err = ristretto.NewCache(&config)
- if err != nil {
- return nil, errors.Wrap(err, "failed to create bf cache")
- }
- }
- if db.opt.InMemory {
- db.opt.SyncWrites = false
- // If badger is running in memory mode, push everything into the LSM Tree.
- db.opt.ValueThreshold = math.MaxInt32
- }
- krOpt := KeyRegistryOptions{
- ReadOnly: opt.ReadOnly,
- Dir: opt.Dir,
- EncryptionKey: opt.EncryptionKey,
- EncryptionKeyRotationDuration: opt.EncryptionKeyRotationDuration,
- InMemory: opt.InMemory,
- }
-
- if db.registry, err = OpenKeyRegistry(krOpt); err != nil {
- return db, err
- }
- db.calculateSize()
- db.closers.updateSize = y.NewCloser(1)
- go db.updateSize(db.closers.updateSize)
- db.mt = skl.NewSkiplist(arenaSize(opt))
-
- // newLevelsController potentially loads files in directory.
- if db.lc, err = newLevelsController(db, &manifest); err != nil {
- return db, err
- }
-
- // Initialize vlog struct.
- db.vlog.init(db)
-
- if !opt.ReadOnly {
- db.closers.compactors = y.NewCloser(1)
- db.lc.startCompact(db.closers.compactors)
-
- db.closers.memtable = y.NewCloser(1)
- go func() {
- _ = db.flushMemtable(db.closers.memtable) // Need levels controller to be up.
- }()
- }
-
- headKey := y.KeyWithTs(head, math.MaxUint64)
- // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
- vs, err := db.get(headKey)
- if err != nil {
- return db, errors.Wrap(err, "Retrieving head")
- }
- db.orc.nextTxnTs = vs.Version
- var vptr valuePointer
- if len(vs.Value) > 0 {
- vptr.Decode(vs.Value)
- }
-
- replayCloser := y.NewCloser(1)
- go db.doWrites(replayCloser)
-
- if err = db.vlog.open(db, vptr, db.replayFunction()); err != nil {
- replayCloser.SignalAndWait()
- return db, y.Wrapf(err, "During db.vlog.open")
- }
- replayCloser.SignalAndWait() // Wait for replay to be applied first.
-
- // Let's advance nextTxnTs to one more than whatever we observed via
- // replaying the logs.
- db.orc.txnMark.Done(db.orc.nextTxnTs)
- // In normal mode, we must update readMark so older versions of keys can be removed during
- // compaction when run in offline mode via the flatten tool.
- db.orc.readMark.Done(db.orc.nextTxnTs)
- db.orc.incrementNextTs()
-
- db.closers.writes = y.NewCloser(1)
- go db.doWrites(db.closers.writes)
-
- if !db.opt.InMemory {
- db.closers.valueGC = y.NewCloser(1)
- go db.vlog.waitOnGC(db.closers.valueGC)
- }
-
- db.closers.pub = y.NewCloser(1)
- go db.pub.listenForUpdates(db.closers.pub)
-
- valueDirLockGuard = nil
- dirLockGuard = nil
- manifestFile = nil
- return db, nil
-}
-
-// cleanup stops all the goroutines started by badger. This is used in open to
-// cleanup goroutines in case of an error.
-func (db *DB) cleanup() {
- db.stopMemoryFlush()
- db.stopCompactions()
-
- db.blockCache.Close()
- db.indexCache.Close()
- if db.closers.updateSize != nil {
- db.closers.updateSize.Signal()
- }
- if db.closers.valueGC != nil {
- db.closers.valueGC.Signal()
- }
- if db.closers.writes != nil {
- db.closers.writes.Signal()
- }
- if db.closers.pub != nil {
- db.closers.pub.Signal()
- }
-
- db.orc.Stop()
-
- // Do not use vlog.Close() here. vlog.Close truncates the files. We don't
- // want to truncate files unless the user has specified the truncate flag.
- db.vlog.stopFlushDiscardStats()
-}
-
-// BlockCacheMetrics returns the metrics for the underlying block cache.
-func (db *DB) BlockCacheMetrics() *ristretto.Metrics {
- if db.blockCache != nil {
- return db.blockCache.Metrics
- }
- return nil
-}
-
-// IndexCacheMetrics returns the metrics for the underlying index cache.
-func (db *DB) IndexCacheMetrics() *ristretto.Metrics {
- if db.indexCache != nil {
- return db.indexCache.Metrics
- }
- return nil
-}
-
-// Close closes a DB. It's crucial to call it to ensure all the pending updates make their way to
-// disk. Calling DB.Close() multiple times would still only close the DB once.
-func (db *DB) Close() error {
- var err error
- db.closeOnce.Do(func() {
- err = db.close()
- })
- return err
-}
-
-// IsClosed denotes if the badger DB is closed or not. A DB instance should not
-// be used after closing it.
-func (db *DB) IsClosed() bool {
- return atomic.LoadUint32(&db.isClosed) == 1
-}
-
-func (db *DB) close() (err error) {
- db.opt.Debugf("Closing database")
-
- atomic.StoreInt32(&db.blockWrites, 1)
-
- if !db.opt.InMemory {
- // Stop value GC first.
- db.closers.valueGC.SignalAndWait()
- }
-
- // Stop writes next.
- db.closers.writes.SignalAndWait()
-
- // Don't accept any more write.
- close(db.writeCh)
-
- db.closers.pub.SignalAndWait()
-
- // Now close the value log.
- if vlogErr := db.vlog.Close(); vlogErr != nil {
- err = errors.Wrap(vlogErr, "DB.Close")
- }
-
- // Make sure that block writer is done pushing stuff into memtable!
- // Otherwise, you will have a race condition: we are trying to flush memtables
- // and remove them completely, while the block / memtable writer is still
- // trying to push stuff into the memtable. This will also resolve the value
- // offset problem: as we push into memtable, we update value offsets there.
- if !db.mt.Empty() {
- db.opt.Debugf("Flushing memtable")
- for {
- pushedFlushTask := func() bool {
- db.Lock()
- defer db.Unlock()
- y.AssertTrue(db.mt != nil)
- select {
- case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
- db.imm = append(db.imm, db.mt) // Flusher will attempt to remove this from s.imm.
- db.mt = nil // Will segfault if we try writing!
- db.opt.Debugf("pushed to flush chan\n")
- return true
- default:
- // If we fail to push, we need to unlock and wait for a short while.
- // The flushing operation needs to update s.imm. Otherwise, we have a deadlock.
- // TODO: Think about how to do this more cleanly, maybe without any locks.
- }
- return false
- }()
- if pushedFlushTask {
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
- }
- db.stopMemoryFlush()
- db.stopCompactions()
-
- // Force Compact L0
- // We don't need to care about cstatus since no parallel compaction is running.
- if db.opt.CompactL0OnClose {
- err := db.lc.doCompact(173, compactionPriority{level: 0, score: 1.73})
- switch err {
- case errFillTables:
- // This error only means that there might be enough tables to do a compaction. So, we
- // should not report it to the end user to avoid confusing them.
- case nil:
- db.opt.Infof("Force compaction on level 0 done")
- default:
- db.opt.Warningf("While forcing compaction on level 0: %v", err)
- }
- }
-
- if lcErr := db.lc.close(); err == nil {
- err = errors.Wrap(lcErr, "DB.Close")
- }
- db.opt.Debugf("Waiting for closer")
- db.closers.updateSize.SignalAndWait()
- db.orc.Stop()
- db.blockCache.Close()
- db.indexCache.Close()
-
- atomic.StoreUint32(&db.isClosed, 1)
-
- if db.opt.InMemory {
- return
- }
-
- if db.dirLockGuard != nil {
- if guardErr := db.dirLockGuard.release(); err == nil {
- err = errors.Wrap(guardErr, "DB.Close")
- }
- }
- if db.valueDirGuard != nil {
- if guardErr := db.valueDirGuard.release(); err == nil {
- err = errors.Wrap(guardErr, "DB.Close")
- }
- }
- if manifestErr := db.manifest.close(); err == nil {
- err = errors.Wrap(manifestErr, "DB.Close")
- }
- if registryErr := db.registry.Close(); err == nil {
- err = errors.Wrap(registryErr, "DB.Close")
- }
-
- // Fsync directories to ensure that lock file, and any other removed files whose directory
- // we haven't specifically fsynced, are guaranteed to have their directory entry removal
- // persisted to disk.
- if syncErr := db.syncDir(db.opt.Dir); err == nil {
- err = errors.Wrap(syncErr, "DB.Close")
- }
- if syncErr := db.syncDir(db.opt.ValueDir); err == nil {
- err = errors.Wrap(syncErr, "DB.Close")
- }
-
- return err
-}
-
-// VerifyChecksum verifies checksum for all tables on all levels.
-// This method can be used to verify checksum, if opt.ChecksumVerificationMode is NoVerification.
-func (db *DB) VerifyChecksum() error {
- return db.lc.verifyChecksum()
-}
-
-const (
- lockFile = "LOCK"
-)
-
-// Sync syncs database content to disk. This function provides
-// more control to user to sync data whenever required.
-func (db *DB) Sync() error {
- return db.vlog.sync(math.MaxUint32)
-}
-
-// getMemtables returns the current memtables and get references.
-func (db *DB) getMemTables() ([]*skl.Skiplist, func()) {
- db.RLock()
- defer db.RUnlock()
-
- tables := make([]*skl.Skiplist, len(db.imm)+1)
-
- // Get mutable memtable.
- tables[0] = db.mt
- tables[0].IncrRef()
-
- // Get immutable memtables.
- last := len(db.imm) - 1
- for i := range db.imm {
- tables[i+1] = db.imm[last-i]
- tables[i+1].IncrRef()
- }
- return tables, func() {
- for _, tbl := range tables {
- tbl.DecrRef()
- }
- }
-}
-
-// get returns the value in memtable or disk for given key.
-// Note that value will include meta byte.
-//
-// IMPORTANT: We should never write an entry with an older timestamp for the same key, We need to
-// maintain this invariant to search for the latest value of a key, or else we need to search in all
-// tables and find the max version among them. To maintain this invariant, we also need to ensure
-// that all versions of a key are always present in the same table from level 1, because compaction
-// can push any table down.
-//
-// Update (Sep 22, 2018): To maintain the above invariant, and to allow keys to be moved from one
-// value log to another (while reclaiming space during value log GC), we have logically moved this
-// need to write "old versions after new versions" to the badgerMove keyspace. Thus, for normal
-// gets, we can stop going down the LSM tree once we find any version of the key (note however that
-// we will ALWAYS skip versions with ts greater than the key version). However, if that key has
-// been moved, then for the corresponding movekey, we'll look through all the levels of the tree
-// to ensure that we pick the highest version of the movekey present.
-func (db *DB) get(key []byte) (y.ValueStruct, error) {
- if db.IsClosed() {
- return y.ValueStruct{}, ErrDBClosed
- }
- tables, decr := db.getMemTables() // Lock should be released.
- defer decr()
-
- var maxVs *y.ValueStruct
- var version uint64
- if bytes.HasPrefix(key, badgerMove) {
- // If we are checking badgerMove key, we should look into all the
- // levels, so we can pick up the newer versions, which might have been
- // compacted down the tree.
- maxVs = &y.ValueStruct{}
- version = y.ParseTs(key)
- }
-
- y.NumGets.Add(1)
- for i := 0; i < len(tables); i++ {
- vs := tables[i].Get(key)
- y.NumMemtableGets.Add(1)
- if vs.Meta == 0 && vs.Value == nil {
- continue
- }
- // Found a version of the key. For user keyspace, return immediately. For move keyspace,
- // continue iterating, unless we found a version == given key version.
- if maxVs == nil || vs.Version == version {
- return vs, nil
- }
- if maxVs.Version < vs.Version {
- *maxVs = vs
- }
- }
- return db.lc.get(key, maxVs, 0)
-}
-
-// updateHead should not be called without the db.Lock() since db.vhead is used
-// by the writer go routines and memtable flushing goroutine.
-func (db *DB) updateHead(ptrs []valuePointer) {
- var ptr valuePointer
- for i := len(ptrs) - 1; i >= 0; i-- {
- p := ptrs[i]
- if !p.IsZero() {
- ptr = p
- break
- }
- }
- if ptr.IsZero() {
- return
- }
-
- y.AssertTrue(!ptr.Less(db.vhead))
- db.vhead = ptr
-}
-
-var requestPool = sync.Pool{
- New: func() interface{} {
- return new(request)
- },
-}
-
-func (db *DB) shouldWriteValueToLSM(e Entry) bool {
- return len(e.Value) < db.opt.ValueThreshold
-}
-
-func (db *DB) writeToLSM(b *request) error {
- // We should check the length of b.Prts and b.Entries only when badger is not
- // running in InMemory mode. In InMemory mode, we don't write anything to the
- // value log and that's why the length of b.Ptrs will always be zero.
- if !db.opt.InMemory && len(b.Ptrs) != len(b.Entries) {
- return errors.Errorf("Ptrs and Entries don't match: %+v", b)
- }
-
- for i, entry := range b.Entries {
- if entry.meta&bitFinTxn != 0 {
- continue
- }
- if db.shouldWriteValueToLSM(*entry) { // Will include deletion / tombstone case.
- db.mt.Put(entry.Key,
- y.ValueStruct{
- Value: entry.Value,
- // Ensure value pointer flag is removed. Otherwise, the value will fail
- // to be retrieved during iterator prefetch. `bitValuePointer` is only
- // known to be set in write to LSM when the entry is loaded from a backup
- // with lower ValueThreshold and its value was stored in the value log.
- Meta: entry.meta &^ bitValuePointer,
- UserMeta: entry.UserMeta,
- ExpiresAt: entry.ExpiresAt,
- })
- } else {
- db.mt.Put(entry.Key,
- y.ValueStruct{
- Value: b.Ptrs[i].Encode(),
- Meta: entry.meta | bitValuePointer,
- UserMeta: entry.UserMeta,
- ExpiresAt: entry.ExpiresAt,
- })
- }
- }
- return nil
-}
-
-// writeRequests is called serially by only one goroutine.
-func (db *DB) writeRequests(reqs []*request) error {
- if len(reqs) == 0 {
- return nil
- }
-
- done := func(err error) {
- for _, r := range reqs {
- r.Err = err
- r.Wg.Done()
- }
- }
- db.opt.Debugf("writeRequests called. Writing to value log")
- err := db.vlog.write(reqs)
- if err != nil {
- done(err)
- return err
- }
-
- db.opt.Debugf("Sending updates to subscribers")
- db.pub.sendUpdates(reqs)
- db.opt.Debugf("Writing to memtable")
- var count int
- for _, b := range reqs {
- if len(b.Entries) == 0 {
- continue
- }
- count += len(b.Entries)
- var i uint64
- for err = db.ensureRoomForWrite(); err == errNoRoom; err = db.ensureRoomForWrite() {
- i++
- if i%100 == 0 {
- db.opt.Debugf("Making room for writes")
- }
- // We need to poll a bit because both hasRoomForWrite and the flusher need access to s.imm.
- // When flushChan is full and you are blocked there, and the flusher is trying to update s.imm,
- // you will get a deadlock.
- time.Sleep(10 * time.Millisecond)
- }
- if err != nil {
- done(err)
- return errors.Wrap(err, "writeRequests")
- }
- if err := db.writeToLSM(b); err != nil {
- done(err)
- return errors.Wrap(err, "writeRequests")
- }
- db.Lock()
- db.updateHead(b.Ptrs)
- db.Unlock()
- }
- done(nil)
- db.opt.Debugf("%d entries written", count)
- return nil
-}
-
-func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) {
- if atomic.LoadInt32(&db.blockWrites) == 1 {
- return nil, ErrBlockedWrites
- }
- var count, size int64
- for _, e := range entries {
- size += int64(e.estimateSize(db.opt.ValueThreshold))
- count++
- }
- if count >= db.opt.maxBatchCount || size >= db.opt.maxBatchSize {
- return nil, ErrTxnTooBig
- }
-
- // We can only service one request because we need each txn to be stored in a contigous section.
- // Txns should not interleave among other txns or rewrites.
- req := requestPool.Get().(*request)
- req.reset()
- req.Entries = entries
- req.Wg.Add(1)
- req.IncrRef() // for db write
- db.writeCh <- req // Handled in doWrites.
- y.NumPuts.Add(int64(len(entries)))
-
- return req, nil
-}
-
-func (db *DB) doWrites(lc *y.Closer) {
- defer lc.Done()
- pendingCh := make(chan struct{}, 1)
-
- writeRequests := func(reqs []*request) {
- if err := db.writeRequests(reqs); err != nil {
- db.opt.Errorf("writeRequests: %v", err)
- }
- <-pendingCh
- }
-
- // This variable tracks the number of pending writes.
- reqLen := new(expvar.Int)
- y.PendingWrites.Set(db.opt.Dir, reqLen)
-
- reqs := make([]*request, 0, 10)
- for {
- var r *request
- select {
- case r = <-db.writeCh:
- case <-lc.HasBeenClosed():
- goto closedCase
- }
-
- for {
- reqs = append(reqs, r)
- reqLen.Set(int64(len(reqs)))
-
- if len(reqs) >= 3*kvWriteChCapacity {
- pendingCh <- struct{}{} // blocking.
- goto writeCase
- }
-
- select {
- // Either push to pending, or continue to pick from writeCh.
- case r = <-db.writeCh:
- case pendingCh <- struct{}{}:
- goto writeCase
- case <-lc.HasBeenClosed():
- goto closedCase
- }
- }
-
- closedCase:
- // All the pending request are drained.
- // Don't close the writeCh, because it has be used in several places.
- for {
- select {
- case r = <-db.writeCh:
- reqs = append(reqs, r)
- default:
- pendingCh <- struct{}{} // Push to pending before doing a write.
- writeRequests(reqs)
- return
- }
- }
-
- writeCase:
- go writeRequests(reqs)
- reqs = make([]*request, 0, 10)
- reqLen.Set(0)
- }
-}
-
-// batchSet applies a list of badger.Entry. If a request level error occurs it
-// will be returned.
-// Check(kv.BatchSet(entries))
-func (db *DB) batchSet(entries []*Entry) error {
- req, err := db.sendToWriteCh(entries)
- if err != nil {
- return err
- }
-
- return req.Wait()
-}
-
-// batchSetAsync is the asynchronous version of batchSet. It accepts a callback
-// function which is called when all the sets are complete. If a request level
-// error occurs, it will be passed back via the callback.
-// err := kv.BatchSetAsync(entries, func(err error)) {
-// Check(err)
-// }
-func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error {
- req, err := db.sendToWriteCh(entries)
- if err != nil {
- return err
- }
- go func() {
- err := req.Wait()
- // Write is complete. Let's call the callback function now.
- f(err)
- }()
- return nil
-}
-
-var errNoRoom = errors.New("No room for write")
-
-// ensureRoomForWrite is always called serially.
-func (db *DB) ensureRoomForWrite() error {
- var err error
- db.Lock()
- defer db.Unlock()
-
- // Here we determine if we need to force flush memtable. Given we rotated log file, it would
- // make sense to force flush a memtable, so the updated value head would have a chance to be
- // pushed to L0. Otherwise, it would not go to L0, until the memtable has been fully filled,
- // which can take a lot longer if the write load has fewer keys and larger values. This force
- // flush, thus avoids the need to read through a lot of log files on a crash and restart.
- // Above approach is quite simple with small drawback. We are calling ensureRoomForWrite before
- // inserting every entry in Memtable. We will get latest db.head after all entries for a request
- // are inserted in Memtable. If we have done >= db.logRotates rotations, then while inserting
- // first entry in Memtable, below condition will be true and we will endup flushing old value of
- // db.head. Hence we are limiting no of value log files to be read to db.logRotates only.
- forceFlush := atomic.LoadInt32(&db.logRotates) >= db.opt.LogRotatesToFlush
-
- if !forceFlush && db.mt.MemSize() < db.opt.MaxTableSize {
- return nil
- }
-
- y.AssertTrue(db.mt != nil) // A nil mt indicates that DB is being closed.
- select {
- case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
- // After every memtable flush, let's reset the counter.
- atomic.StoreInt32(&db.logRotates, 0)
-
- // Ensure value log is synced to disk so this memtable's contents wouldn't be lost.
- err = db.vlog.sync(db.vhead.Fid)
- if err != nil {
- return err
- }
-
- db.opt.Debugf("Flushing memtable, mt.size=%d size of flushChan: %d\n",
- db.mt.MemSize(), len(db.flushChan))
- // We manage to push this task. Let's modify imm.
- db.imm = append(db.imm, db.mt)
- db.mt = skl.NewSkiplist(arenaSize(db.opt))
- // New memtable is empty. We certainly have room.
- return nil
- default:
- // We need to do this to unlock and allow the flusher to modify imm.
- return errNoRoom
- }
-}
-
-func arenaSize(opt Options) int64 {
- return opt.MaxTableSize + opt.maxBatchSize + opt.maxBatchCount*int64(skl.MaxNodeSize)
-}
-
-// buildL0Table builds a new table from the memtable.
-func buildL0Table(ft flushTask, bopts table.Options) []byte {
- iter := ft.mt.NewIterator()
- defer iter.Close()
- b := table.NewTableBuilder(bopts)
- defer b.Close()
- var vp valuePointer
- for iter.SeekToFirst(); iter.Valid(); iter.Next() {
- if len(ft.dropPrefixes) > 0 && hasAnyPrefixes(iter.Key(), ft.dropPrefixes) {
- continue
- }
- vs := iter.Value()
- if vs.Meta&bitValuePointer > 0 {
- vp.Decode(vs.Value)
- }
- b.Add(iter.Key(), iter.Value(), vp.Len)
- }
- return b.Finish()
-}
-
-type flushTask struct {
- mt *skl.Skiplist
- vptr valuePointer
- dropPrefixes [][]byte
-}
-
-func (db *DB) pushHead(ft flushTask) error {
- // We don't need to store head pointer in the in-memory mode since we will
- // never be replay anything.
- if db.opt.InMemory {
- return nil
- }
- // Ensure we never push a zero valued head pointer.
- if ft.vptr.IsZero() {
- return errors.New("Head should not be zero")
- }
-
- // Store badger head even if vptr is zero, need it for readTs
- db.opt.Infof("Storing value log head: %+v\n", ft.vptr)
- val := ft.vptr.Encode()
-
- // Pick the max commit ts, so in case of crash, our read ts would be higher than all the
- // commits.
- headTs := y.KeyWithTs(head, db.orc.nextTs())
- ft.mt.Put(headTs, y.ValueStruct{Value: val})
-
- return nil
-}
-
-// handleFlushTask must be run serially.
-func (db *DB) handleFlushTask(ft flushTask) error {
- // There can be a scenario, when empty memtable is flushed. For example, memtable is empty and
- // after writing request to value log, rotation count exceeds db.LogRotatesToFlush.
- if ft.mt.Empty() {
- return nil
- }
-
- if err := db.pushHead(ft); err != nil {
- return err
- }
-
- dk, err := db.registry.latestDataKey()
- if err != nil {
- return y.Wrapf(err, "failed to get datakey in db.handleFlushTask")
- }
- bopts := buildTableOptions(db.opt)
- bopts.DataKey = dk
- // Builder does not need cache but the same options are used for opening table.
- bopts.BlockCache = db.blockCache
- bopts.IndexCache = db.indexCache
- tableData := buildL0Table(ft, bopts)
-
- fileID := db.lc.reserveFileID()
- if db.opt.KeepL0InMemory {
- tbl, err := table.OpenInMemoryTable(tableData, fileID, &bopts)
- if err != nil {
- return errors.Wrapf(err, "failed to open table in memory")
- }
- return db.lc.addLevel0Table(tbl)
- }
-
- fd, err := y.CreateSyncedFile(table.NewFilename(fileID, db.opt.Dir), true)
- if err != nil {
- return y.Wrap(err)
- }
-
- // Don't block just to sync the directory entry.
- dirSyncCh := make(chan error, 1)
- go func() { dirSyncCh <- db.syncDir(db.opt.Dir) }()
-
- if _, err = fd.Write(tableData); err != nil {
- db.opt.Errorf("ERROR while writing to level 0: %v", err)
- return err
- }
-
- if dirSyncErr := <-dirSyncCh; dirSyncErr != nil {
- // Do dir sync as best effort. No need to return due to an error there.
- db.opt.Errorf("ERROR while syncing level directory: %v", dirSyncErr)
- }
- tbl, err := table.OpenTable(fd, bopts)
- if err != nil {
- db.opt.Debugf("ERROR while opening table: %v", err)
- return err
- }
- // We own a ref on tbl.
- err = db.lc.addLevel0Table(tbl) // This will incrRef
- _ = tbl.DecrRef() // Releases our ref.
- return err
-}
-
-// flushMemtable must keep running until we send it an empty flushTask. If there
-// are errors during handling the flush task, we'll retry indefinitely.
-func (db *DB) flushMemtable(lc *y.Closer) error {
- defer lc.Done()
-
- for ft := range db.flushChan {
- if ft.mt == nil {
- // We close db.flushChan now, instead of sending a nil ft.mt.
- continue
- }
- for {
- err := db.handleFlushTask(ft)
- if err == nil {
- // Update s.imm. Need a lock.
- db.Lock()
- // This is a single-threaded operation. ft.mt corresponds to the head of
- // db.imm list. Once we flush it, we advance db.imm. The next ft.mt
- // which would arrive here would match db.imm[0], because we acquire a
- // lock over DB when pushing to flushChan.
- // TODO: This logic is dirty AF. Any change and this could easily break.
- y.AssertTrue(ft.mt == db.imm[0])
- db.imm = db.imm[1:]
- ft.mt.DecrRef() // Return memory.
- db.Unlock()
-
- break
- }
- // Encountered error. Retry indefinitely.
- db.opt.Errorf("Failure while flushing memtable to disk: %v. Retrying...\n", err)
- time.Sleep(time.Second)
- }
- }
- return nil
-}
-
-func exists(path string) (bool, error) {
- _, err := os.Stat(path)
- if err == nil {
- return true, nil
- }
- if os.IsNotExist(err) {
- return false, nil
- }
- return true, err
-}
-
-// This function does a filewalk, calculates the size of vlog and sst files and stores it in
-// y.LSMSize and y.VlogSize.
-func (db *DB) calculateSize() {
- if db.opt.InMemory {
- return
- }
- newInt := func(val int64) *expvar.Int {
- v := new(expvar.Int)
- v.Add(val)
- return v
- }
-
- totalSize := func(dir string) (int64, int64) {
- var lsmSize, vlogSize int64
- err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- ext := filepath.Ext(path)
- switch ext {
- case ".sst":
- lsmSize += info.Size()
- case ".vlog":
- vlogSize += info.Size()
- }
- return nil
- })
- if err != nil {
- db.opt.Debugf("Got error while calculating total size of directory: %s", dir)
- }
- return lsmSize, vlogSize
- }
-
- lsmSize, vlogSize := totalSize(db.opt.Dir)
- y.LSMSize.Set(db.opt.Dir, newInt(lsmSize))
- // If valueDir is different from dir, we'd have to do another walk.
- if db.opt.ValueDir != db.opt.Dir {
- _, vlogSize = totalSize(db.opt.ValueDir)
- }
- y.VlogSize.Set(db.opt.ValueDir, newInt(vlogSize))
-}
-
-func (db *DB) updateSize(lc *y.Closer) {
- defer lc.Done()
- if db.opt.InMemory {
- return
- }
-
- metricsTicker := time.NewTicker(time.Minute)
- defer metricsTicker.Stop()
-
- for {
- select {
- case <-metricsTicker.C:
- db.calculateSize()
- case <-lc.HasBeenClosed():
- return
- }
- }
-}
-
-// RunValueLogGC triggers a value log garbage collection.
-//
-// It picks value log files to perform GC based on statistics that are collected
-// during compactions. If no such statistics are available, then log files are
-// picked in random order. The process stops as soon as the first log file is
-// encountered which does not result in garbage collection.
-//
-// When a log file is picked, it is first sampled. If the sample shows that we
-// can discard at least discardRatio space of that file, it would be rewritten.
-//
-// If a call to RunValueLogGC results in no rewrites, then an ErrNoRewrite is
-// thrown indicating that the call resulted in no file rewrites.
-//
-// We recommend setting discardRatio to 0.5, thus indicating that a file be
-// rewritten if half the space can be discarded. This results in a lifetime
-// value log write amplification of 2 (1 from original write + 0.5 rewrite +
-// 0.25 + 0.125 + ... = 2). Setting it to higher value would result in fewer
-// space reclaims, while setting it to a lower value would result in more space
-// reclaims at the cost of increased activity on the LSM tree. discardRatio
-// must be in the range (0.0, 1.0), both endpoints excluded, otherwise an
-// ErrInvalidRequest is returned.
-//
-// Only one GC is allowed at a time. If another value log GC is running, or DB
-// has been closed, this would return an ErrRejected.
-//
-// Note: Every time GC is run, it would produce a spike of activity on the LSM
-// tree.
-func (db *DB) RunValueLogGC(discardRatio float64) error {
- if db.opt.InMemory {
- return ErrGCInMemoryMode
- }
- if discardRatio >= 1.0 || discardRatio <= 0.0 {
- return ErrInvalidRequest
- }
-
- // startLevel is the level from which we should search for the head key. When badger is running
- // with KeepL0InMemory flag, all tables on L0 are kept in memory. This means we should pick head
- // key from Level 1 onwards because if we pick the headkey from Level 0 we might end up losing
- // data. See test TestL0GCBug.
- startLevel := 0
- if db.opt.KeepL0InMemory {
- startLevel = 1
- }
- // Find head on disk
- headKey := y.KeyWithTs(head, math.MaxUint64)
- // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
- val, err := db.lc.get(headKey, nil, startLevel)
- if err != nil {
- return errors.Wrap(err, "Retrieving head from on-disk LSM")
- }
-
- var head valuePointer
- if len(val.Value) > 0 {
- head.Decode(val.Value)
- }
-
- // Pick a log file and run GC
- return db.vlog.runGC(discardRatio, head)
-}
-
-// Size returns the size of lsm and value log files in bytes. It can be used to decide how often to
-// call RunValueLogGC.
-func (db *DB) Size() (lsm, vlog int64) {
- if y.LSMSize.Get(db.opt.Dir) == nil {
- lsm, vlog = 0, 0
- return
- }
- lsm = y.LSMSize.Get(db.opt.Dir).(*expvar.Int).Value()
- vlog = y.VlogSize.Get(db.opt.ValueDir).(*expvar.Int).Value()
- return
-}
-
-// Sequence represents a Badger sequence.
-type Sequence struct {
- sync.Mutex
- db *DB
- key []byte
- next uint64
- leased uint64
- bandwidth uint64
-}
-
-// Next would return the next integer in the sequence, updating the lease by running a transaction
-// if needed.
-func (seq *Sequence) Next() (uint64, error) {
- seq.Lock()
- defer seq.Unlock()
- if seq.next >= seq.leased {
- if err := seq.updateLease(); err != nil {
- return 0, err
- }
- }
- val := seq.next
- seq.next++
- return val, nil
-}
-
-// Release the leased sequence to avoid wasted integers. This should be done right
-// before closing the associated DB. However it is valid to use the sequence after
-// it was released, causing a new lease with full bandwidth.
-func (seq *Sequence) Release() error {
- seq.Lock()
- defer seq.Unlock()
- err := seq.db.Update(func(txn *Txn) error {
- item, err := txn.Get(seq.key)
- if err != nil {
- return err
- }
-
- var num uint64
- if err := item.Value(func(v []byte) error {
- num = binary.BigEndian.Uint64(v)
- return nil
- }); err != nil {
- return err
- }
-
- if num == seq.leased {
- var buf [8]byte
- binary.BigEndian.PutUint64(buf[:], seq.next)
- return txn.SetEntry(NewEntry(seq.key, buf[:]))
- }
-
- return nil
- })
- if err != nil {
- return err
- }
- seq.leased = seq.next
- return nil
-}
-
-func (seq *Sequence) updateLease() error {
- return seq.db.Update(func(txn *Txn) error {
- item, err := txn.Get(seq.key)
- switch {
- case err == ErrKeyNotFound:
- seq.next = 0
- case err != nil:
- return err
- default:
- var num uint64
- if err := item.Value(func(v []byte) error {
- num = binary.BigEndian.Uint64(v)
- return nil
- }); err != nil {
- return err
- }
- seq.next = num
- }
-
- lease := seq.next + seq.bandwidth
- var buf [8]byte
- binary.BigEndian.PutUint64(buf[:], lease)
- if err = txn.SetEntry(NewEntry(seq.key, buf[:])); err != nil {
- return err
- }
- seq.leased = lease
- return nil
- })
-}
-
-// GetSequence would initiate a new sequence object, generating it from the stored lease, if
-// available, in the database. Sequence can be used to get a list of monotonically increasing
-// integers. Multiple sequences can be created by providing different keys. Bandwidth sets the
-// size of the lease, determining how many Next() requests can be served from memory.
-//
-// GetSequence is not supported on ManagedDB. Calling this would result in a panic.
-func (db *DB) GetSequence(key []byte, bandwidth uint64) (*Sequence, error) {
- if db.opt.managedTxns {
- panic("Cannot use GetSequence with managedDB=true.")
- }
-
- switch {
- case len(key) == 0:
- return nil, ErrEmptyKey
- case bandwidth == 0:
- return nil, ErrZeroBandwidth
- }
- seq := &Sequence{
- db: db,
- key: key,
- next: 0,
- leased: 0,
- bandwidth: bandwidth,
- }
- err := seq.updateLease()
- return seq, err
-}
-
-// Tables gets the TableInfo objects from the level controller. If withKeysCount
-// is true, TableInfo objects also contain counts of keys for the tables.
-func (db *DB) Tables(withKeysCount bool) []TableInfo {
- return db.lc.getTableInfo(withKeysCount)
-}
-
-// KeySplits can be used to get rough key ranges to divide up iteration over
-// the DB.
-func (db *DB) KeySplits(prefix []byte) []string {
- var splits []string
- // We just want table ranges here and not keys count.
- for _, ti := range db.Tables(false) {
- // We don't use ti.Left, because that has a tendency to store !badger
- // keys.
- if bytes.HasPrefix(ti.Right, prefix) {
- splits = append(splits, string(ti.Right))
- }
- }
- sort.Strings(splits)
- return splits
-}
-
-// MaxBatchCount returns max possible entries in batch
-func (db *DB) MaxBatchCount() int64 {
- return db.opt.maxBatchCount
-}
-
-// MaxBatchSize returns max possible batch size
-func (db *DB) MaxBatchSize() int64 {
- return db.opt.maxBatchSize
-}
-
-func (db *DB) stopMemoryFlush() {
- // Stop memtable flushes.
- if db.closers.memtable != nil {
- close(db.flushChan)
- db.closers.memtable.SignalAndWait()
- }
-}
-
-func (db *DB) stopCompactions() {
- // Stop compactions.
- if db.closers.compactors != nil {
- db.closers.compactors.SignalAndWait()
- }
-}
-
-func (db *DB) startCompactions() {
- // Resume compactions.
- if db.closers.compactors != nil {
- db.closers.compactors = y.NewCloser(1)
- db.lc.startCompact(db.closers.compactors)
- }
-}
-
-func (db *DB) startMemoryFlush() {
- // Start memory fluhser.
- if db.closers.memtable != nil {
- db.flushChan = make(chan flushTask, db.opt.NumMemtables)
- db.closers.memtable = y.NewCloser(1)
- go func() {
- _ = db.flushMemtable(db.closers.memtable)
- }()
- }
-}
-
-// Flatten can be used to force compactions on the LSM tree so all the tables fall on the same
-// level. This ensures that all the versions of keys are colocated and not split across multiple
-// levels, which is necessary after a restore from backup. During Flatten, live compactions are
-// stopped. Ideally, no writes are going on during Flatten. Otherwise, it would create competition
-// between flattening the tree and new tables being created at level zero.
-func (db *DB) Flatten(workers int) error {
- db.stopCompactions()
- defer db.startCompactions()
-
- compactAway := func(cp compactionPriority) error {
- db.opt.Infof("Attempting to compact with %+v\n", cp)
- errCh := make(chan error, 1)
- for i := 0; i < workers; i++ {
- go func() {
- errCh <- db.lc.doCompact(175, cp)
- }()
- }
- var success int
- var rerr error
- for i := 0; i < workers; i++ {
- err := <-errCh
- if err != nil {
- rerr = err
- db.opt.Warningf("While running doCompact with %+v. Error: %v\n", cp, err)
- } else {
- success++
- }
- }
- if success == 0 {
- return rerr
- }
- // We could do at least one successful compaction. So, we'll consider this a success.
- db.opt.Infof("%d compactor(s) succeeded. One or more tables from level %d compacted.\n",
- success, cp.level)
- return nil
- }
-
- hbytes := func(sz int64) string {
- return humanize.Bytes(uint64(sz))
- }
-
- for {
- db.opt.Infof("\n")
- var levels []int
- for i, l := range db.lc.levels {
- sz := l.getTotalSize()
- db.opt.Infof("Level: %d. %8s Size. %8s Max.\n",
- i, hbytes(l.getTotalSize()), hbytes(l.maxTotalSize))
- if sz > 0 {
- levels = append(levels, i)
- }
- }
- if len(levels) <= 1 {
- prios := db.lc.pickCompactLevels()
- if len(prios) == 0 || prios[0].score <= 1.0 {
- db.opt.Infof("All tables consolidated into one level. Flattening done.\n")
- return nil
- }
- if err := compactAway(prios[0]); err != nil {
- return err
- }
- continue
- }
- // Create an artificial compaction priority, to ensure that we compact the level.
- cp := compactionPriority{level: levels[0], score: 1.71}
- if err := compactAway(cp); err != nil {
- return err
- }
- }
-}
-
-func (db *DB) blockWrite() error {
- // Stop accepting new writes.
- if !atomic.CompareAndSwapInt32(&db.blockWrites, 0, 1) {
- return ErrBlockedWrites
- }
-
- // Make all pending writes finish. The following will also close writeCh.
- db.closers.writes.SignalAndWait()
- db.opt.Infof("Writes flushed. Stopping compactions now...")
- return nil
-}
-
-func (db *DB) unblockWrite() {
- db.closers.writes = y.NewCloser(1)
- go db.doWrites(db.closers.writes)
-
- // Resume writes.
- atomic.StoreInt32(&db.blockWrites, 0)
-}
-
-func (db *DB) prepareToDrop() (func(), error) {
- if db.opt.ReadOnly {
- panic("Attempting to drop data in read-only mode.")
- }
- // In order prepare for drop, we need to block the incoming writes and
- // write it to db. Then, flush all the pending flushtask. So that, we
- // don't miss any entries.
- if err := db.blockWrite(); err != nil {
- return nil, err
- }
- reqs := make([]*request, 0, 10)
- for {
- select {
- case r := <-db.writeCh:
- reqs = append(reqs, r)
- default:
- if err := db.writeRequests(reqs); err != nil {
- db.opt.Errorf("writeRequests: %v", err)
- }
- db.stopMemoryFlush()
- return func() {
- db.opt.Infof("Resuming writes")
- db.startMemoryFlush()
- db.unblockWrite()
- }, nil
- }
- }
-}
-
-// DropAll would drop all the data stored in Badger. It does this in the following way.
-// - Stop accepting new writes.
-// - Pause memtable flushes and compactions.
-// - Pick all tables from all levels, create a changeset to delete all these
-// tables and apply it to manifest.
-// - Pick all log files from value log, and delete all of them. Restart value log files from zero.
-// - Resume memtable flushes and compactions.
-//
-// NOTE: DropAll is resilient to concurrent writes, but not to reads. It is up to the user to not do
-// any reads while DropAll is going on, otherwise they may result in panics. Ideally, both reads and
-// writes are paused before running DropAll, and resumed after it is finished.
-func (db *DB) DropAll() error {
- f, err := db.dropAll()
- if f != nil {
- f()
- }
- return err
-}
-
-func (db *DB) dropAll() (func(), error) {
- db.opt.Infof("DropAll called. Blocking writes...")
- f, err := db.prepareToDrop()
- if err != nil {
- return f, err
- }
- // prepareToDrop will stop all the incomming write and flushes any pending flush tasks.
- // Before we drop, we'll stop the compaction because anyways all the datas are going to
- // be deleted.
- db.stopCompactions()
- resume := func() {
- db.startCompactions()
- f()
- }
- // Block all foreign interactions with memory tables.
- db.Lock()
- defer db.Unlock()
-
- // Remove inmemory tables. Calling DecrRef for safety. Not sure if they're absolutely needed.
- db.mt.DecrRef()
- for _, mt := range db.imm {
- mt.DecrRef()
- }
- db.imm = db.imm[:0]
- db.mt = skl.NewSkiplist(arenaSize(db.opt)) // Set it up for future writes.
-
- num, err := db.lc.dropTree()
- if err != nil {
- return resume, err
- }
- db.opt.Infof("Deleted %d SSTables. Now deleting value logs...\n", num)
-
- num, err = db.vlog.dropAll()
- if err != nil {
- return resume, err
- }
- db.vhead = valuePointer{} // Zero it out.
- db.lc.nextFileID = 1
- db.opt.Infof("Deleted %d value log files. DropAll done.\n", num)
- db.blockCache.Clear()
- db.indexCache.Clear()
-
- return resume, nil
-}
-
-// DropPrefix would drop all the keys with the provided prefix. It does this in the following way:
-// - Stop accepting new writes.
-// - Stop memtable flushes before acquiring lock. Because we're acquring lock here
-// and memtable flush stalls for lock, which leads to deadlock
-// - Flush out all memtables, skipping over keys with the given prefix, Kp.
-// - Write out the value log header to memtables when flushing, so we don't accidentally bring Kp
-// back after a restart.
-// - Stop compaction.
-// - Compact L0->L1, skipping over Kp.
-// - Compact rest of the levels, Li->Li, picking tables which have Kp.
-// - Resume memtable flushes, compactions and writes.
-func (db *DB) DropPrefix(prefixes ...[]byte) error {
- db.opt.Infof("DropPrefix Called")
- f, err := db.prepareToDrop()
- if err != nil {
- return err
- }
- defer f()
- // Block all foreign interactions with memory tables.
- db.Lock()
- defer db.Unlock()
-
- db.imm = append(db.imm, db.mt)
- for _, memtable := range db.imm {
- if memtable.Empty() {
- memtable.DecrRef()
- continue
- }
- task := flushTask{
- mt: memtable,
- // Ensure that the head of value log gets persisted to disk.
- vptr: db.vhead,
- dropPrefixes: prefixes,
- }
- db.opt.Debugf("Flushing memtable")
- if err := db.handleFlushTask(task); err != nil {
- db.opt.Errorf("While trying to flush memtable: %v", err)
- return err
- }
- memtable.DecrRef()
- }
- db.stopCompactions()
- defer db.startCompactions()
- db.imm = db.imm[:0]
- db.mt = skl.NewSkiplist(arenaSize(db.opt))
-
- // Drop prefixes from the levels.
- if err := db.lc.dropPrefixes(prefixes); err != nil {
- return err
- }
- db.opt.Infof("DropPrefix done")
- return nil
-}
-
-// KVList contains a list of key-value pairs.
-type KVList = pb.KVList
-
-// Subscribe can be used to watch key changes for the given key prefixes.
-// At least one prefix should be passed, or an error will be returned.
-// You can use an empty prefix to monitor all changes to the DB.
-// This function blocks until the given context is done or an error occurs.
-// The given function will be called with a new KVList containing the modified keys and the
-// corresponding values.
-func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, prefixes ...[]byte) error {
- if cb == nil {
- return ErrNilCallback
- }
-
- c := y.NewCloser(1)
- recvCh, id := db.pub.newSubscriber(c, prefixes...)
- slurp := func(batch *pb.KVList) error {
- for {
- select {
- case kvs := <-recvCh:
- batch.Kv = append(batch.Kv, kvs.Kv...)
- default:
- if len(batch.GetKv()) > 0 {
- return cb(batch)
- }
- return nil
- }
- }
- }
- for {
- select {
- case <-c.HasBeenClosed():
- // No need to delete here. Closer will be called only while
- // closing DB. Subscriber will be deleted by cleanSubscribers.
- err := slurp(new(pb.KVList))
- // Drain if any pending updates.
- c.Done()
- return err
- case <-ctx.Done():
- c.Done()
- db.pub.deleteSubscriber(id)
- // Delete the subscriber to avoid further updates.
- return ctx.Err()
- case batch := <-recvCh:
- err := slurp(batch)
- if err != nil {
- c.Done()
- // Delete the subscriber if there is an error by the callback.
- db.pub.deleteSubscriber(id)
- return err
- }
- }
- }
-}
-
-// shouldEncrypt returns bool, which tells whether to encrypt or not.
-func (db *DB) shouldEncrypt() bool {
- return len(db.opt.EncryptionKey) > 0
-}
-
-func (db *DB) syncDir(dir string) error {
- if db.opt.InMemory {
- return nil
- }
- return syncDir(dir)
-}
-
-func createDirs(opt Options) error {
- for _, path := range []string{opt.Dir, opt.ValueDir} {
- dirExists, err := exists(path)
- if err != nil {
- return y.Wrapf(err, "Invalid Dir: %q", path)
- }
- if !dirExists {
- if opt.ReadOnly {
- return errors.Errorf("Cannot find directory %q for read-only open", path)
- }
- // Try to create the directory
- err = os.Mkdir(path, 0700)
- if err != nil {
- return y.Wrapf(err, "Error Creating Dir: %q", path)
- }
- }
- }
- return nil
-}
-
-// Stream the contents of this DB to a new DB with options outOptions that will be
-// created in outDir.
-func (db *DB) StreamDB(outOptions Options) error {
- outDir := outOptions.Dir
-
- // Open output DB.
- outDB, err := OpenManaged(outOptions)
- if err != nil {
- return errors.Wrapf(err, "cannot open out DB at %s", outDir)
- }
- defer outDB.Close()
- writer := outDB.NewStreamWriter()
- if err := writer.Prepare(); err != nil {
- errors.Wrapf(err, "cannot create stream writer in out DB at %s", outDir)
- }
-
- // Stream contents of DB to the output DB.
- stream := db.NewStreamAt(math.MaxUint64)
- stream.LogPrefix = fmt.Sprintf("Streaming DB to new DB at %s", outDir)
- stream.Send = func(kvs *pb.KVList) error {
- return writer.Write(kvs)
- }
- if err := stream.Orchestrate(context.Background()); err != nil {
- return errors.Wrapf(err, "cannot stream DB to out DB at %s", outDir)
- }
- if err := writer.Flush(); err != nil {
- return errors.Wrapf(err, "cannot flush writer")
- }
- return nil
-}
-
-// MaxVersion returns the maximum commited version across all keys in the DB. It
-// uses the stream framework to find the maximum version.
-func (db *DB) MaxVersion() (uint64, error) {
- maxVersion := uint64(0)
- var mu sync.Mutex
- var stream *Stream
- if db.opt.managedTxns {
- stream = db.NewStreamAt(math.MaxUint64)
- } else {
- stream = db.NewStream()
- }
-
- stream.ChooseKey = func(item *Item) bool {
- mu.Lock()
- if item.Version() > maxVersion {
- maxVersion = item.Version()
- }
- mu.Unlock()
- return false
- }
- stream.KeyToList = nil
- stream.Send = nil
- if err := stream.Orchestrate(context.Background()); err != nil {
- return 0, err
- }
- return maxVersion, nil
-
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/dir_plan9.go b/vendor/github.com/dgraph-io/badger/v2/dir_plan9.go
deleted file mode 100644
index ad323d70..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/dir_plan9.go
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/pkg/errors"
-)
-
-// directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part
-// of the locking mechanism, it's just advisory.
-type directoryLockGuard struct {
- // File handle on the directory, which we've locked.
- f *os.File
- // The absolute path to our pid file.
- path string
-}
-
-// acquireDirectoryLock gets a lock on the directory.
-// It will also write our pid to dirPath/pidFileName for convenience.
-// readOnly is not supported on Plan 9.
-func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (
- *directoryLockGuard, error) {
- if readOnly {
- return nil, ErrPlan9NotSupported
- }
-
- // Convert to absolute path so that Release still works even if we do an unbalanced
- // chdir in the meantime.
- absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName))
- if err != nil {
- return nil, errors.Wrap(err, "cannot get absolute path for pid lock file")
- }
-
- // If the file was unpacked or created by some other program, it might not
- // have the ModeExclusive bit set. Set it before we call OpenFile, so that we
- // can be confident that a successful OpenFile implies exclusive use.
- //
- // OpenFile fails if the file ModeExclusive bit set *and* the file is already open.
- // So, if the file is closed when the DB crashed, we're fine. When the process
- // that was managing the DB crashes, the OS will close the file for us.
- //
- // This bit of code is copied from Go's lockedfile internal package:
- // https://github.com/golang/go/blob/go1.15rc1/src/cmd/go/internal/lockedfile/lockedfile_plan9.go#L58
- if fi, err := os.Stat(absPidFilePath); err == nil {
- if fi.Mode()&os.ModeExclusive == 0 {
- if err := os.Chmod(absPidFilePath, fi.Mode()|os.ModeExclusive); err != nil {
- return nil, errors.Wrapf(err, "could not set exclusive mode bit")
- }
- }
- } else if !os.IsNotExist(err) {
- return nil, err
- }
- f, err := os.OpenFile(absPidFilePath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666|os.ModeExclusive)
- if err != nil {
- if isLocked(err) {
- return nil, errors.Wrapf(err,
- "Cannot open pid lock file %q. Another process is using this Badger database",
- absPidFilePath)
- }
- return nil, errors.Wrapf(err, "Cannot open pid lock file %q", absPidFilePath)
- }
-
- if _, err = fmt.Fprintf(f, "%d\n", os.Getpid()); err != nil {
- f.Close()
- return nil, errors.Wrapf(err, "could not write pid")
- }
- return &directoryLockGuard{f, absPidFilePath}, nil
-}
-
-// Release deletes the pid file and releases our lock on the directory.
-func (guard *directoryLockGuard) release() error {
- // It's important that we remove the pid file first.
- err := os.Remove(guard.path)
-
- if closeErr := guard.f.Close(); err == nil {
- err = closeErr
- }
- guard.path = ""
- guard.f = nil
-
- return err
-}
-
-// openDir opens a directory for syncing.
-func openDir(path string) (*os.File, error) { return os.Open(path) }
-
-// When you create or delete a file, you have to ensure the directory entry for the file is synced
-// in order to guarantee the file is visible (if the system crashes). (See the man page for fsync,
-// or see https://github.com/coreos/etcd/issues/6368 for an example.)
-func syncDir(dir string) error {
- f, err := openDir(dir)
- if err != nil {
- return errors.Wrapf(err, "While opening directory: %s.", dir)
- }
-
- err = f.Sync()
- closeErr := f.Close()
- if err != nil {
- return errors.Wrapf(err, "While syncing directory: %s.", dir)
- }
- return errors.Wrapf(closeErr, "While closing directory: %s.", dir)
-}
-
-// Opening an exclusive-use file returns an error.
-// The expected error strings are:
-//
-// - "open/create -- file is locked" (cwfs, kfs)
-// - "exclusive lock" (fossil)
-// - "exclusive use file already open" (ramfs)
-//
-// See https://github.com/golang/go/blob/go1.15rc1/src/cmd/go/internal/lockedfile/lockedfile_plan9.go#L16
-var lockedErrStrings = [...]string{
- "file is locked",
- "exclusive lock",
- "exclusive use file already open",
-}
-
-// Even though plan9 doesn't support the Lock/RLock/Unlock functions to
-// manipulate already-open files, IsLocked is still meaningful: os.OpenFile
-// itself may return errors that indicate that a file with the ModeExclusive bit
-// set is already open.
-func isLocked(err error) bool {
- s := err.Error()
-
- for _, frag := range lockedErrStrings {
- if strings.Contains(s, frag) {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/dir_unix.go b/vendor/github.com/dgraph-io/badger/v2/dir_unix.go
deleted file mode 100644
index f8457b0b..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/dir_unix.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// +build !windows,!plan9
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
-
- "github.com/pkg/errors"
- "golang.org/x/sys/unix"
-)
-
-// directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part
-// of the locking mechanism, it's just advisory.
-type directoryLockGuard struct {
- // File handle on the directory, which we've flocked.
- f *os.File
- // The absolute path to our pid file.
- path string
- // Was this a shared lock for a read-only database?
- readOnly bool
-}
-
-// acquireDirectoryLock gets a lock on the directory (using flock). If
-// this is not read-only, it will also write our pid to
-// dirPath/pidFileName for convenience.
-func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (
- *directoryLockGuard, error) {
- // Convert to absolute path so that Release still works even if we do an unbalanced
- // chdir in the meantime.
- absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName))
- if err != nil {
- return nil, errors.Wrap(err, "cannot get absolute path for pid lock file")
- }
- f, err := os.Open(dirPath)
- if err != nil {
- return nil, errors.Wrapf(err, "cannot open directory %q", dirPath)
- }
- opts := unix.LOCK_EX | unix.LOCK_NB
- if readOnly {
- opts = unix.LOCK_SH | unix.LOCK_NB
- }
-
- err = unix.Flock(int(f.Fd()), opts)
- if err != nil {
- f.Close()
- return nil, errors.Wrapf(err,
- "Cannot acquire directory lock on %q. Another process is using this Badger database.",
- dirPath)
- }
-
- if !readOnly {
- // Yes, we happily overwrite a pre-existing pid file. We're the
- // only read-write badger process using this directory.
- err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666)
- if err != nil {
- f.Close()
- return nil, errors.Wrapf(err,
- "Cannot write pid file %q", absPidFilePath)
- }
- }
- return &directoryLockGuard{f, absPidFilePath, readOnly}, nil
-}
-
-// Release deletes the pid file and releases our lock on the directory.
-func (guard *directoryLockGuard) release() error {
- var err error
- if !guard.readOnly {
- // It's important that we remove the pid file first.
- err = os.Remove(guard.path)
- }
-
- if closeErr := guard.f.Close(); err == nil {
- err = closeErr
- }
- guard.path = ""
- guard.f = nil
-
- return err
-}
-
-// openDir opens a directory for syncing.
-func openDir(path string) (*os.File, error) { return os.Open(path) }
-
-// When you create or delete a file, you have to ensure the directory entry for the file is synced
-// in order to guarantee the file is visible (if the system crashes). (See the man page for fsync,
-// or see https://github.com/coreos/etcd/issues/6368 for an example.)
-func syncDir(dir string) error {
- f, err := openDir(dir)
- if err != nil {
- return errors.Wrapf(err, "While opening directory: %s.", dir)
- }
-
- err = f.Sync()
- closeErr := f.Close()
- if err != nil {
- return errors.Wrapf(err, "While syncing directory: %s.", dir)
- }
- return errors.Wrapf(closeErr, "While closing directory: %s.", dir)
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/dir_windows.go b/vendor/github.com/dgraph-io/badger/v2/dir_windows.go
deleted file mode 100644
index 60f982e2..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/dir_windows.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// +build windows
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-// OpenDir opens a directory in windows with write access for syncing.
-import (
- "os"
- "path/filepath"
- "syscall"
-
- "github.com/pkg/errors"
-)
-
-// FILE_ATTRIBUTE_TEMPORARY - A file that is being used for temporary storage.
-// FILE_FLAG_DELETE_ON_CLOSE - The file is to be deleted immediately after all of its handles are
-// closed, which includes the specified handle and any other open or duplicated handles.
-// See: https://docs.microsoft.com/en-us/windows/desktop/FileIO/file-attribute-constants
-// NOTE: Added here to avoid importing golang.org/x/sys/windows
-const (
- FILE_ATTRIBUTE_TEMPORARY = 0x00000100
- FILE_FLAG_DELETE_ON_CLOSE = 0x04000000
-)
-
-func openDir(path string) (*os.File, error) {
- fd, err := openDirWin(path)
- if err != nil {
- return nil, err
- }
- return os.NewFile(uintptr(fd), path), nil
-}
-
-func openDirWin(path string) (fd syscall.Handle, err error) {
- if len(path) == 0 {
- return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
- }
- pathp, err := syscall.UTF16PtrFromString(path)
- if err != nil {
- return syscall.InvalidHandle, err
- }
- access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
- sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
- createmode := uint32(syscall.OPEN_EXISTING)
- fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
- return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
-}
-
-// DirectoryLockGuard holds a lock on the directory.
-type directoryLockGuard struct {
- h syscall.Handle
- path string
-}
-
-// AcquireDirectoryLock acquires exclusive access to a directory.
-func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) {
- if readOnly {
- return nil, ErrWindowsNotSupported
- }
-
- // Convert to absolute path so that Release still works even if we do an unbalanced
- // chdir in the meantime.
- absLockFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName))
- if err != nil {
- return nil, errors.Wrap(err, "Cannot get absolute path for pid lock file")
- }
-
- // This call creates a file handler in memory that only one process can use at a time. When
- // that process ends, the file is deleted by the system.
- // FILE_ATTRIBUTE_TEMPORARY is used to tell Windows to try to create the handle in memory.
- // FILE_FLAG_DELETE_ON_CLOSE is not specified in syscall_windows.go but tells Windows to delete
- // the file when all processes holding the handler are closed.
- // XXX: this works but it's a bit klunky. i'd prefer to use LockFileEx but it needs unsafe pkg.
- h, err := syscall.CreateFile(
- syscall.StringToUTF16Ptr(absLockFilePath), 0, 0, nil,
- syscall.OPEN_ALWAYS,
- uint32(FILE_ATTRIBUTE_TEMPORARY|FILE_FLAG_DELETE_ON_CLOSE),
- 0)
- if err != nil {
- return nil, errors.Wrapf(err,
- "Cannot create lock file %q. Another process is using this Badger database",
- absLockFilePath)
- }
-
- return &directoryLockGuard{h: h, path: absLockFilePath}, nil
-}
-
-// Release removes the directory lock.
-func (g *directoryLockGuard) release() error {
- g.path = ""
- return syscall.CloseHandle(g.h)
-}
-
-// Windows doesn't support syncing directories to the file system. See
-// https://github.com/dgraph-io/badger/issues/699#issuecomment-504133587 for more details.
-func syncDir(dir string) error { return nil }
diff --git a/vendor/github.com/dgraph-io/badger/v2/doc.go b/vendor/github.com/dgraph-io/badger/v2/doc.go
deleted file mode 100644
index 83dc9a28..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/doc.go
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
-Package badger implements an embeddable, simple and fast key-value database,
-written in pure Go. It is designed to be highly performant for both reads and
-writes simultaneously. Badger uses Multi-Version Concurrency Control (MVCC), and
-supports transactions. It runs transactions concurrently, with serializable
-snapshot isolation guarantees.
-
-Badger uses an LSM tree along with a value log to separate keys from values,
-hence reducing both write amplification and the size of the LSM tree. This
-allows LSM tree to be served entirely from RAM, while the values are served
-from SSD.
-
-
-Usage
-
-Badger has the following main types: DB, Txn, Item and Iterator. DB contains
-keys that are associated with values. It must be opened with the appropriate
-options before it can be accessed.
-
-All operations happen inside a Txn. Txn represents a transaction, which can
-be read-only or read-write. Read-only transactions can read values for a
-given key (which are returned inside an Item), or iterate over a set of
-key-value pairs using an Iterator (which are returned as Item type values as
-well). Read-write transactions can also update and delete keys from the DB.
-
-See the examples for more usage details.
-*/
-package badger
diff --git a/vendor/github.com/dgraph-io/badger/v2/errors.go b/vendor/github.com/dgraph-io/badger/v2/errors.go
deleted file mode 100644
index fed827ab..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/errors.go
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "math"
-
- "github.com/pkg/errors"
-)
-
-const (
- // ValueThresholdLimit is the maximum permissible value of opt.ValueThreshold.
- ValueThresholdLimit = math.MaxUint16 - 16 + 1
-)
-
-var (
- // ErrValueLogSize is returned when opt.ValueLogFileSize option is not within the valid
- // range.
- ErrValueLogSize = errors.New("Invalid ValueLogFileSize, must be between 1MB and 2GB")
-
- // ErrKeyNotFound is returned when key isn't found on a txn.Get.
- ErrKeyNotFound = errors.New("Key not found")
-
- // ErrTxnTooBig is returned if too many writes are fit into a single transaction.
- ErrTxnTooBig = errors.New("Txn is too big to fit into one request")
-
- // ErrConflict is returned when a transaction conflicts with another transaction. This can
- // happen if the read rows had been updated concurrently by another transaction.
- ErrConflict = errors.New("Transaction Conflict. Please retry")
-
- // ErrReadOnlyTxn is returned if an update function is called on a read-only transaction.
- ErrReadOnlyTxn = errors.New("No sets or deletes are allowed in a read-only transaction")
-
- // ErrDiscardedTxn is returned if a previously discarded transaction is re-used.
- ErrDiscardedTxn = errors.New("This transaction has been discarded. Create a new one")
-
- // ErrEmptyKey is returned if an empty key is passed on an update function.
- ErrEmptyKey = errors.New("Key cannot be empty")
-
- // ErrInvalidKey is returned if the key has a special !badger! prefix,
- // reserved for internal usage.
- ErrInvalidKey = errors.New("Key is using a reserved !badger! prefix")
-
- // ErrRetry is returned when a log file containing the value is not found.
- // This usually indicates that it may have been garbage collected, and the
- // operation needs to be retried.
- ErrRetry = errors.New("Unable to find log file. Please retry")
-
- // ErrThresholdZero is returned if threshold is set to zero, and value log GC is called.
- // In such a case, GC can't be run.
- ErrThresholdZero = errors.New(
- "Value log GC can't run because threshold is set to zero")
-
- // ErrNoRewrite is returned if a call for value log GC doesn't result in a log file rewrite.
- ErrNoRewrite = errors.New(
- "Value log GC attempt didn't result in any cleanup")
-
- // ErrRejected is returned if a value log GC is called either while another GC is running, or
- // after DB::Close has been called.
- ErrRejected = errors.New("Value log GC request rejected")
-
- // ErrInvalidRequest is returned if the user request is invalid.
- ErrInvalidRequest = errors.New("Invalid request")
-
- // ErrManagedTxn is returned if the user tries to use an API which isn't
- // allowed due to external management of transactions, when using ManagedDB.
- ErrManagedTxn = errors.New(
- "Invalid API request. Not allowed to perform this action using ManagedDB")
-
- // ErrInvalidDump if a data dump made previously cannot be loaded into the database.
- ErrInvalidDump = errors.New("Data dump cannot be read")
-
- // ErrZeroBandwidth is returned if the user passes in zero bandwidth for sequence.
- ErrZeroBandwidth = errors.New("Bandwidth must be greater than zero")
-
- // ErrInvalidLoadingMode is returned when opt.ValueLogLoadingMode option is not
- // within the valid range
- ErrInvalidLoadingMode = errors.New("Invalid ValueLogLoadingMode, must be FileIO or MemoryMap")
-
- // ErrReplayNeeded is returned when opt.ReadOnly is set but the
- // database requires a value log replay.
- ErrReplayNeeded = errors.New("Database was not properly closed, cannot open read-only")
-
- // ErrWindowsNotSupported is returned when opt.ReadOnly is used on Windows
- ErrWindowsNotSupported = errors.New("Read-only mode is not supported on Windows")
-
- // ErrPlan9NotSupported is returned when opt.ReadOnly is used on Plan 9
- ErrPlan9NotSupported = errors.New("Read-only mode is not supported on Plan 9")
-
- // ErrTruncateNeeded is returned when the value log gets corrupt, and requires truncation of
- // corrupt data to allow Badger to run properly.
- ErrTruncateNeeded = errors.New(
- "Value log truncate required to run DB. This might result in data loss")
-
- // ErrBlockedWrites is returned if the user called DropAll. During the process of dropping all
- // data from Badger, we stop accepting new writes, by returning this error.
- ErrBlockedWrites = errors.New("Writes are blocked, possibly due to DropAll or Close")
-
- // ErrNilCallback is returned when subscriber's callback is nil.
- ErrNilCallback = errors.New("Callback cannot be nil")
-
- // ErrEncryptionKeyMismatch is returned when the storage key is not
- // matched with the key previously given.
- ErrEncryptionKeyMismatch = errors.New("Encryption key mismatch")
-
- // ErrInvalidDataKeyID is returned if the datakey id is invalid.
- ErrInvalidDataKeyID = errors.New("Invalid datakey id")
-
- // ErrInvalidEncryptionKey is returned if length of encryption keys is invalid.
- ErrInvalidEncryptionKey = errors.New("Encryption key's length should be" +
- "either 16, 24, or 32 bytes")
- // ErrGCInMemoryMode is returned when db.RunValueLogGC is called in in-memory mode.
- ErrGCInMemoryMode = errors.New("Cannot run value log GC when DB is opened in InMemory mode")
-
- // ErrDBClosed is returned when a get operation is performed after closing the DB.
- ErrDBClosed = errors.New("DB Closed")
-)
diff --git a/vendor/github.com/dgraph-io/badger/v2/histogram.go b/vendor/github.com/dgraph-io/badger/v2/histogram.go
deleted file mode 100644
index d8c94bb7..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/histogram.go
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "math"
-)
-
-// PrintHistogram builds and displays the key-value size histogram.
-// When keyPrefix is set, only the keys that have prefix "keyPrefix" are
-// considered for creating the histogram
-func (db *DB) PrintHistogram(keyPrefix []byte) {
- if db == nil {
- fmt.Println("\nCannot build histogram: DB is nil.")
- return
- }
- histogram := db.buildHistogram(keyPrefix)
- fmt.Printf("Histogram of key sizes (in bytes)\n")
- histogram.keySizeHistogram.printHistogram()
- fmt.Printf("Histogram of value sizes (in bytes)\n")
- histogram.valueSizeHistogram.printHistogram()
-}
-
-// histogramData stores information about a histogram
-type histogramData struct {
- bins []int64
- countPerBin []int64
- totalCount int64
- min int64
- max int64
- sum int64
-}
-
-// sizeHistogram contains keySize histogram and valueSize histogram
-type sizeHistogram struct {
- keySizeHistogram, valueSizeHistogram histogramData
-}
-
-// newSizeHistogram returns a new instance of keyValueSizeHistogram with
-// properly initialized fields.
-func newSizeHistogram() *sizeHistogram {
- // TODO(ibrahim): find appropriate bin size.
- keyBins := createHistogramBins(1, 16)
- valueBins := createHistogramBins(1, 30)
- return &sizeHistogram{
- keySizeHistogram: histogramData{
- bins: keyBins,
- countPerBin: make([]int64, len(keyBins)+1),
- max: math.MinInt64,
- min: math.MaxInt64,
- sum: 0,
- },
- valueSizeHistogram: histogramData{
- bins: valueBins,
- countPerBin: make([]int64, len(valueBins)+1),
- max: math.MinInt64,
- min: math.MaxInt64,
- sum: 0,
- },
- }
-}
-
-// createHistogramBins creates bins for an histogram. The bin sizes are powers
-// of two of the form [2^min_exponent, ..., 2^max_exponent].
-func createHistogramBins(minExponent, maxExponent uint32) []int64 {
- var bins []int64
- for i := minExponent; i <= maxExponent; i++ {
- bins = append(bins, int64(1)< histogram.max {
- histogram.max = value
- }
- if value < histogram.min {
- histogram.min = value
- }
-
- histogram.sum += value
- histogram.totalCount++
-
- for index := 0; index <= len(histogram.bins); index++ {
- // Allocate value in the last buckets if we reached the end of the Bounds array.
- if index == len(histogram.bins) {
- histogram.countPerBin[index]++
- break
- }
-
- // Check if the value should be added to the "index" bin
- if value < int64(histogram.bins[index]) {
- histogram.countPerBin[index]++
- break
- }
- }
-}
-
-// buildHistogram builds the key-value size histogram.
-// When keyPrefix is set, only the keys that have prefix "keyPrefix" are
-// considered for creating the histogram
-func (db *DB) buildHistogram(keyPrefix []byte) *sizeHistogram {
- txn := db.NewTransaction(false)
- defer txn.Discard()
-
- itr := txn.NewIterator(DefaultIteratorOptions)
- defer itr.Close()
-
- badgerHistogram := newSizeHistogram()
-
- // Collect key and value sizes.
- for itr.Seek(keyPrefix); itr.ValidForPrefix(keyPrefix); itr.Next() {
- item := itr.Item()
- badgerHistogram.keySizeHistogram.Update(item.KeySize())
- badgerHistogram.valueSizeHistogram.Update(item.ValueSize())
- }
- return badgerHistogram
-}
-
-// printHistogram prints the histogram data in a human-readable format.
-func (histogram histogramData) printHistogram() {
- fmt.Printf("Total count: %d\n", histogram.totalCount)
- fmt.Printf("Min value: %d\n", histogram.min)
- fmt.Printf("Max value: %d\n", histogram.max)
- fmt.Printf("Mean: %.2f\n", float64(histogram.sum)/float64(histogram.totalCount))
- fmt.Printf("%24s %9s\n", "Range", "Count")
-
- numBins := len(histogram.bins)
- for index, count := range histogram.countPerBin {
- if count == 0 {
- continue
- }
-
- // The last bin represents the bin that contains the range from
- // the last bin up to infinity so it's processed differently than the
- // other bins.
- if index == len(histogram.countPerBin)-1 {
- lowerBound := int(histogram.bins[numBins-1])
- fmt.Printf("[%10d, %10s) %9d\n", lowerBound, "infinity", count)
- continue
- }
-
- upperBound := int(histogram.bins[index])
- lowerBound := 0
- if index > 0 {
- lowerBound = int(histogram.bins[index-1])
- }
-
- fmt.Printf("[%10d, %10d) %9d\n", lowerBound, upperBound, count)
- }
- fmt.Println()
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/iterator.go b/vendor/github.com/dgraph-io/badger/v2/iterator.go
deleted file mode 100644
index 11d0c27a..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/iterator.go
+++ /dev/null
@@ -1,756 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "fmt"
- "hash/crc32"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/v2/options"
- "github.com/dgraph-io/badger/v2/table"
- "github.com/dgryski/go-farm"
-
- "github.com/dgraph-io/badger/v2/y"
-)
-
-type prefetchStatus uint8
-
-const (
- prefetched prefetchStatus = iota + 1
-)
-
-// Item is returned during iteration. Both the Key() and Value() output is only valid until
-// iterator.Next() is called.
-type Item struct {
- status prefetchStatus
- err error
- wg sync.WaitGroup
- db *DB
- key []byte
- vptr []byte
- meta byte // We need to store meta to know about bitValuePointer.
- userMeta byte
- expiresAt uint64
- val []byte
- slice *y.Slice // Used only during prefetching.
- next *Item
- version uint64
- txn *Txn
-}
-
-// String returns a string representation of Item
-func (item *Item) String() string {
- return fmt.Sprintf("key=%q, version=%d, meta=%x", item.Key(), item.Version(), item.meta)
-}
-
-// Key returns the key.
-//
-// Key is only valid as long as item is valid, or transaction is valid. If you need to use it
-// outside its validity, please use KeyCopy.
-func (item *Item) Key() []byte {
- return item.key
-}
-
-// KeyCopy returns a copy of the key of the item, writing it to dst slice.
-// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and
-// returned.
-func (item *Item) KeyCopy(dst []byte) []byte {
- return y.SafeCopy(dst, item.key)
-}
-
-// Version returns the commit timestamp of the item.
-func (item *Item) Version() uint64 {
- return item.version
-}
-
-// Value retrieves the value of the item from the value log.
-//
-// This method must be called within a transaction. Calling it outside a
-// transaction is considered undefined behavior. If an iterator is being used,
-// then Item.Value() is defined in the current iteration only, because items are
-// reused.
-//
-// If you need to use a value outside a transaction, please use Item.ValueCopy
-// instead, or copy it yourself. Value might change once discard or commit is called.
-// Use ValueCopy if you want to do a Set after Get.
-func (item *Item) Value(fn func(val []byte) error) error {
- item.wg.Wait()
- if item.status == prefetched {
- if item.err == nil && fn != nil {
- if err := fn(item.val); err != nil {
- return err
- }
- }
- return item.err
- }
- buf, cb, err := item.yieldItemValue()
- defer runCallback(cb)
- if err != nil {
- return err
- }
- if fn != nil {
- return fn(buf)
- }
- return nil
-}
-
-// ValueCopy returns a copy of the value of the item from the value log, writing it to dst slice.
-// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and
-// returned. Tip: It might make sense to reuse the returned slice as dst argument for the next call.
-//
-// This function is useful in long running iterate/update transactions to avoid a write deadlock.
-// See Github issue: https://github.com/dgraph-io/badger/issues/315
-func (item *Item) ValueCopy(dst []byte) ([]byte, error) {
- item.wg.Wait()
- if item.status == prefetched {
- return y.SafeCopy(dst, item.val), item.err
- }
- buf, cb, err := item.yieldItemValue()
- defer runCallback(cb)
- return y.SafeCopy(dst, buf), err
-}
-
-func (item *Item) hasValue() bool {
- if item.meta == 0 && item.vptr == nil {
- // key not found
- return false
- }
- return true
-}
-
-// IsDeletedOrExpired returns true if item contains deleted or expired value.
-func (item *Item) IsDeletedOrExpired() bool {
- return isDeletedOrExpired(item.meta, item.expiresAt)
-}
-
-// DiscardEarlierVersions returns whether the item was created with the
-// option to discard earlier versions of a key when multiple are available.
-func (item *Item) DiscardEarlierVersions() bool {
- return item.meta&bitDiscardEarlierVersions > 0
-}
-
-func (item *Item) yieldItemValue() ([]byte, func(), error) {
- key := item.Key() // No need to copy.
- for {
- if !item.hasValue() {
- return nil, nil, nil
- }
-
- if item.slice == nil {
- item.slice = new(y.Slice)
- }
-
- if (item.meta & bitValuePointer) == 0 {
- val := item.slice.Resize(len(item.vptr))
- copy(val, item.vptr)
- return val, nil, nil
- }
-
- var vp valuePointer
- vp.Decode(item.vptr)
- result, cb, err := item.db.vlog.Read(vp, item.slice)
- if err != ErrRetry {
- if err != nil {
- item.db.opt.Logger.Errorf(`Unable to read: Key: %v, Version : %v,
- meta: %v, userMeta: %v`, key, item.version, item.meta, item.userMeta)
- }
- return result, cb, err
- }
- if bytes.HasPrefix(key, badgerMove) {
- // err == ErrRetry
- // Error is retry even after checking the move keyspace. So, let's
- // just assume that value is not present.
- return nil, cb, nil
- }
-
- // The value pointer is pointing to a deleted value log. Look for the
- // move key and read that instead.
- runCallback(cb)
- // Do not put badgerMove on the left in append. It seems to cause some sort of manipulation.
- keyTs := y.KeyWithTs(item.Key(), item.Version())
- key = make([]byte, len(badgerMove)+len(keyTs))
- n := copy(key, badgerMove)
- copy(key[n:], keyTs)
- // Note that we can't set item.key to move key, because that would
- // change the key user sees before and after this call. Also, this move
- // logic is internal logic and should not impact the external behavior
- // of the retrieval.
- vs, err := item.db.get(key)
- if err != nil {
- return nil, nil, err
- }
- if vs.Version != item.Version() {
- return nil, nil, nil
- }
- // Bug fix: Always copy the vs.Value into vptr here. Otherwise, when item is reused this
- // slice gets overwritten.
- item.vptr = y.SafeCopy(item.vptr, vs.Value)
- item.meta &^= bitValuePointer // Clear the value pointer bit.
- if vs.Meta&bitValuePointer > 0 {
- item.meta |= bitValuePointer // This meta would only be about value pointer.
- }
- }
-}
-
-func runCallback(cb func()) {
- if cb != nil {
- cb()
- }
-}
-
-func (item *Item) prefetchValue() {
- val, cb, err := item.yieldItemValue()
- defer runCallback(cb)
-
- item.err = err
- item.status = prefetched
- if val == nil {
- return
- }
- if item.db.opt.ValueLogLoadingMode == options.MemoryMap {
- buf := item.slice.Resize(len(val))
- copy(buf, val)
- item.val = buf
- } else {
- item.val = val
- }
-}
-
-// EstimatedSize returns the approximate size of the key-value pair.
-//
-// This can be called while iterating through a store to quickly estimate the
-// size of a range of key-value pairs (without fetching the corresponding
-// values).
-func (item *Item) EstimatedSize() int64 {
- if !item.hasValue() {
- return 0
- }
- if (item.meta & bitValuePointer) == 0 {
- return int64(len(item.key) + len(item.vptr))
- }
- var vp valuePointer
- vp.Decode(item.vptr)
- return int64(vp.Len) // includes key length.
-}
-
-// KeySize returns the size of the key.
-// Exact size of the key is key + 8 bytes of timestamp
-func (item *Item) KeySize() int64 {
- return int64(len(item.key))
-}
-
-// ValueSize returns the approximate size of the value.
-//
-// This can be called to quickly estimate the size of a value without fetching
-// it.
-func (item *Item) ValueSize() int64 {
- if !item.hasValue() {
- return 0
- }
- if (item.meta & bitValuePointer) == 0 {
- return int64(len(item.vptr))
- }
- var vp valuePointer
- vp.Decode(item.vptr)
-
- klen := int64(len(item.key) + 8) // 8 bytes for timestamp.
- // 6 bytes are for the approximate length of the header. Since header is encoded in varint, we
- // cannot find the exact length of header without fetching it.
- return int64(vp.Len) - klen - 6 - crc32.Size
-}
-
-// UserMeta returns the userMeta set by the user. Typically, this byte, optionally set by the user
-// is used to interpret the value.
-func (item *Item) UserMeta() byte {
- return item.userMeta
-}
-
-// ExpiresAt returns a Unix time value indicating when the item will be
-// considered expired. 0 indicates that the item will never expire.
-func (item *Item) ExpiresAt() uint64 {
- return item.expiresAt
-}
-
-// TODO: Switch this to use linked list container in Go.
-type list struct {
- head *Item
- tail *Item
-}
-
-func (l *list) push(i *Item) {
- i.next = nil
- if l.tail == nil {
- l.head = i
- l.tail = i
- return
- }
- l.tail.next = i
- l.tail = i
-}
-
-func (l *list) pop() *Item {
- if l.head == nil {
- return nil
- }
- i := l.head
- if l.head == l.tail {
- l.tail = nil
- l.head = nil
- } else {
- l.head = i.next
- }
- i.next = nil
- return i
-}
-
-// IteratorOptions is used to set options when iterating over Badger key-value
-// stores.
-//
-// This package provides DefaultIteratorOptions which contains options that
-// should work for most applications. Consider using that as a starting point
-// before customizing it for your own needs.
-type IteratorOptions struct {
- // Indicates whether we should prefetch values during iteration and store them.
- PrefetchValues bool
- // How many KV pairs to prefetch while iterating. Valid only if PrefetchValues is true.
- PrefetchSize int
- Reverse bool // Direction of iteration. False is forward, true is backward.
- AllVersions bool // Fetch all valid versions of the same key.
-
- // The following option is used to narrow down the SSTables that iterator picks up. If
- // Prefix is specified, only tables which could have this prefix are picked based on their range
- // of keys.
- Prefix []byte // Only iterate over this given prefix.
- prefixIsKey bool // If set, use the prefix for bloom filter lookup.
-
- InternalAccess bool // Used to allow internal access to badger keys.
-}
-
-func (opt *IteratorOptions) compareToPrefix(key []byte) int {
- // We should compare key without timestamp. For example key - a[TS] might be > "aa" prefix.
- key = y.ParseKey(key)
- if len(key) > len(opt.Prefix) {
- key = key[:len(opt.Prefix)]
- }
- return bytes.Compare(key, opt.Prefix)
-}
-
-func (opt *IteratorOptions) pickTable(t table.TableInterface) bool {
- if len(opt.Prefix) == 0 {
- return true
- }
- if opt.compareToPrefix(t.Smallest()) > 0 {
- return false
- }
- if opt.compareToPrefix(t.Biggest()) < 0 {
- return false
- }
- // Bloom filter lookup would only work if opt.Prefix does NOT have the read
- // timestamp as part of the key.
- if opt.prefixIsKey && t.DoesNotHave(farm.Fingerprint64(opt.Prefix)) {
- return false
- }
- return true
-}
-
-// pickTables picks the necessary table for the iterator. This function also assumes
-// that the tables are sorted in the right order.
-func (opt *IteratorOptions) pickTables(all []*table.Table) []*table.Table {
- if len(opt.Prefix) == 0 {
- out := make([]*table.Table, len(all))
- copy(out, all)
- return out
- }
- sIdx := sort.Search(len(all), func(i int) bool {
- return opt.compareToPrefix(all[i].Biggest()) >= 0
- })
- if sIdx == len(all) {
- // Not found.
- return []*table.Table{}
- }
-
- filtered := all[sIdx:]
- if !opt.prefixIsKey {
- eIdx := sort.Search(len(filtered), func(i int) bool {
- return opt.compareToPrefix(filtered[i].Smallest()) > 0
- })
- out := make([]*table.Table, len(filtered[:eIdx]))
- copy(out, filtered[:eIdx])
- return out
- }
-
- var out []*table.Table
- hash := farm.Fingerprint64(opt.Prefix)
- for _, t := range filtered {
- // When we encounter the first table whose smallest key is higher than
- // opt.Prefix, we can stop.
- if opt.compareToPrefix(t.Smallest()) > 0 {
- return out
- }
- // opt.Prefix is actually the key. So, we can run bloom filter checks
- // as well.
- if t.DoesNotHave(hash) {
- continue
- }
- out = append(out, t)
- }
- return out
-}
-
-// DefaultIteratorOptions contains default options when iterating over Badger key-value stores.
-var DefaultIteratorOptions = IteratorOptions{
- PrefetchValues: true,
- PrefetchSize: 100,
- Reverse: false,
- AllVersions: false,
-}
-
-// Iterator helps iterating over the KV pairs in a lexicographically sorted order.
-type Iterator struct {
- iitr y.Iterator
- txn *Txn
- readTs uint64
-
- opt IteratorOptions
- item *Item
- data list
- waste list
-
- lastKey []byte // Used to skip over multiple versions of the same key.
-
- closed bool
-
- // ThreadId is an optional value that can be set to identify which goroutine created
- // the iterator. It can be used, for example, to uniquely identify each of the
- // iterators created by the stream interface
- ThreadId int
-}
-
-// NewIterator returns a new iterator. Depending upon the options, either only keys, or both
-// key-value pairs would be fetched. The keys are returned in lexicographically sorted order.
-// Using prefetch is recommended if you're doing a long running iteration, for performance.
-//
-// Multiple Iterators:
-// For a read-only txn, multiple iterators can be running simultaneously. However, for a read-write
-// txn, iterators have the nuance of being a snapshot of the writes for the transaction at the time
-// iterator was created. If writes are performed after an iterator is created, then that iterator
-// will not be able to see those writes. Only writes performed before an iterator was created can be
-// viewed.
-func (txn *Txn) NewIterator(opt IteratorOptions) *Iterator {
- if txn.discarded {
- panic("Transaction has already been discarded")
- }
- if txn.db.IsClosed() {
- panic(ErrDBClosed.Error())
- }
-
- // Keep track of the number of active iterators.
- atomic.AddInt32(&txn.numIterators, 1)
-
- // TODO: If Prefix is set, only pick those memtables which have keys with
- // the prefix.
- tables, decr := txn.db.getMemTables()
- defer decr()
- txn.db.vlog.incrIteratorCount()
- var iters []y.Iterator
- if itr := txn.newPendingWritesIterator(opt.Reverse); itr != nil {
- iters = append(iters, itr)
- }
- for i := 0; i < len(tables); i++ {
- iters = append(iters, tables[i].NewUniIterator(opt.Reverse))
- }
- iters = txn.db.lc.appendIterators(iters, &opt) // This will increment references.
-
- res := &Iterator{
- txn: txn,
- iitr: table.NewMergeIterator(iters, opt.Reverse),
- opt: opt,
- readTs: txn.readTs,
- }
- return res
-}
-
-// NewKeyIterator is just like NewIterator, but allows the user to iterate over all versions of a
-// single key. Internally, it sets the Prefix option in provided opt, and uses that prefix to
-// additionally run bloom filter lookups before picking tables from the LSM tree.
-func (txn *Txn) NewKeyIterator(key []byte, opt IteratorOptions) *Iterator {
- if len(opt.Prefix) > 0 {
- panic("opt.Prefix should be nil for NewKeyIterator.")
- }
- opt.Prefix = key // This key must be without the timestamp.
- opt.prefixIsKey = true
- opt.AllVersions = true
- return txn.NewIterator(opt)
-}
-
-func (it *Iterator) newItem() *Item {
- item := it.waste.pop()
- if item == nil {
- item = &Item{slice: new(y.Slice), db: it.txn.db, txn: it.txn}
- }
- return item
-}
-
-// Item returns pointer to the current key-value pair.
-// This item is only valid until it.Next() gets called.
-func (it *Iterator) Item() *Item {
- tx := it.txn
- tx.addReadKey(it.item.Key())
- return it.item
-}
-
-// Valid returns false when iteration is done.
-func (it *Iterator) Valid() bool {
- if it.item == nil {
- return false
- }
- if it.opt.prefixIsKey {
- return bytes.Equal(it.item.key, it.opt.Prefix)
- }
- return bytes.HasPrefix(it.item.key, it.opt.Prefix)
-}
-
-// ValidForPrefix returns false when iteration is done
-// or when the current key is not prefixed by the specified prefix.
-func (it *Iterator) ValidForPrefix(prefix []byte) bool {
- return it.Valid() && bytes.HasPrefix(it.item.key, prefix)
-}
-
-// Close would close the iterator. It is important to call this when you're done with iteration.
-func (it *Iterator) Close() {
- if it.closed {
- return
- }
- it.closed = true
-
- it.iitr.Close()
- // It is important to wait for the fill goroutines to finish. Otherwise, we might leave zombie
- // goroutines behind, which are waiting to acquire file read locks after DB has been closed.
- waitFor := func(l list) {
- item := l.pop()
- for item != nil {
- item.wg.Wait()
- item = l.pop()
- }
- }
- waitFor(it.waste)
- waitFor(it.data)
-
- // TODO: We could handle this error.
- _ = it.txn.db.vlog.decrIteratorCount()
- atomic.AddInt32(&it.txn.numIterators, -1)
-}
-
-// Next would advance the iterator by one. Always check it.Valid() after a Next()
-// to ensure you have access to a valid it.Item().
-func (it *Iterator) Next() {
- // Reuse current item
- it.item.wg.Wait() // Just cleaner to wait before pushing to avoid doing ref counting.
- it.waste.push(it.item)
-
- // Set next item to current
- it.item = it.data.pop()
-
- for it.iitr.Valid() {
- if it.parseItem() {
- // parseItem calls one extra next.
- // This is used to deal with the complexity of reverse iteration.
- break
- }
- }
-}
-
-func isDeletedOrExpired(meta byte, expiresAt uint64) bool {
- if meta&bitDelete > 0 {
- return true
- }
- if expiresAt == 0 {
- return false
- }
- return expiresAt <= uint64(time.Now().Unix())
-}
-
-// parseItem is a complex function because it needs to handle both forward and reverse iteration
-// implementation. We store keys such that their versions are sorted in descending order. This makes
-// forward iteration efficient, but revese iteration complicated. This tradeoff is better because
-// forward iteration is more common than reverse.
-//
-// This function advances the iterator.
-func (it *Iterator) parseItem() bool {
- mi := it.iitr
- key := mi.Key()
-
- setItem := func(item *Item) {
- if it.item == nil {
- it.item = item
- } else {
- it.data.push(item)
- }
- }
-
- // Skip badger keys.
- if !it.opt.InternalAccess && bytes.HasPrefix(key, badgerPrefix) {
- mi.Next()
- return false
- }
-
- // Skip any versions which are beyond the readTs.
- version := y.ParseTs(key)
- if version > it.readTs {
- mi.Next()
- return false
- }
-
- if it.opt.AllVersions {
- // Return deleted or expired values also, otherwise user can't figure out
- // whether the key was deleted.
- item := it.newItem()
- it.fill(item)
- setItem(item)
- mi.Next()
- return true
- }
-
- // If iterating in forward direction, then just checking the last key against current key would
- // be sufficient.
- if !it.opt.Reverse {
- if y.SameKey(it.lastKey, key) {
- mi.Next()
- return false
- }
- // Only track in forward direction.
- // We should update lastKey as soon as we find a different key in our snapshot.
- // Consider keys: a 5, b 7 (del), b 5. When iterating, lastKey = a.
- // Then we see b 7, which is deleted. If we don't store lastKey = b, we'll then return b 5,
- // which is wrong. Therefore, update lastKey here.
- it.lastKey = y.SafeCopy(it.lastKey, mi.Key())
- }
-
-FILL:
- // If deleted, advance and return.
- vs := mi.Value()
- if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
- mi.Next()
- return false
- }
-
- item := it.newItem()
- it.fill(item)
- // fill item based on current cursor position. All Next calls have returned, so reaching here
- // means no Next was called.
-
- mi.Next() // Advance but no fill item yet.
- if !it.opt.Reverse || !mi.Valid() { // Forward direction, or invalid.
- setItem(item)
- return true
- }
-
- // Reverse direction.
- nextTs := y.ParseTs(mi.Key())
- mik := y.ParseKey(mi.Key())
- if nextTs <= it.readTs && bytes.Equal(mik, item.key) {
- // This is a valid potential candidate.
- goto FILL
- }
- // Ignore the next candidate. Return the current one.
- setItem(item)
- return true
-}
-
-func (it *Iterator) fill(item *Item) {
- vs := it.iitr.Value()
- item.meta = vs.Meta
- item.userMeta = vs.UserMeta
- item.expiresAt = vs.ExpiresAt
-
- item.version = y.ParseTs(it.iitr.Key())
- item.key = y.SafeCopy(item.key, y.ParseKey(it.iitr.Key()))
-
- item.vptr = y.SafeCopy(item.vptr, vs.Value)
- item.val = nil
- if it.opt.PrefetchValues {
- item.wg.Add(1)
- go func() {
- // FIXME we are not handling errors here.
- item.prefetchValue()
- item.wg.Done()
- }()
- }
-}
-
-func (it *Iterator) prefetch() {
- prefetchSize := 2
- if it.opt.PrefetchValues && it.opt.PrefetchSize > 1 {
- prefetchSize = it.opt.PrefetchSize
- }
-
- i := it.iitr
- var count int
- it.item = nil
- for i.Valid() {
- if !it.parseItem() {
- continue
- }
- count++
- if count == prefetchSize {
- break
- }
- }
-}
-
-// Seek would seek to the provided key if present. If absent, it would seek to the next
-// smallest key greater than the provided key if iterating in the forward direction.
-// Behavior would be reversed if iterating backwards.
-func (it *Iterator) Seek(key []byte) {
- if len(key) > 0 {
- it.txn.addReadKey(key)
- }
- for i := it.data.pop(); i != nil; i = it.data.pop() {
- i.wg.Wait()
- it.waste.push(i)
- }
-
- it.lastKey = it.lastKey[:0]
- if len(key) == 0 {
- key = it.opt.Prefix
- }
- if len(key) == 0 {
- it.iitr.Rewind()
- it.prefetch()
- return
- }
-
- if !it.opt.Reverse {
- key = y.KeyWithTs(key, it.txn.readTs)
- } else {
- key = y.KeyWithTs(key, 0)
- }
- it.iitr.Seek(key)
- it.prefetch()
-}
-
-// Rewind would rewind the iterator cursor all the way to zero-th position, which would be the
-// smallest key if iterating forward, and largest if iterating backward. It does not keep track of
-// whether the cursor started with a Seek().
-func (it *Iterator) Rewind() {
- it.Seek(nil)
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/key_registry.go b/vendor/github.com/dgraph-io/badger/v2/key_registry.go
deleted file mode 100644
index db32acd1..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/key_registry.go
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "crypto/aes"
- "crypto/rand"
- "encoding/binary"
- "hash/crc32"
- "io"
- "os"
- "path/filepath"
- "sync"
- "time"
-
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/y"
-)
-
-const (
- // KeyRegistryFileName is the file name for the key registry file.
- KeyRegistryFileName = "KEYREGISTRY"
- // KeyRegistryRewriteFileName is the file name for the rewrite key registry file.
- KeyRegistryRewriteFileName = "REWRITE-KEYREGISTRY"
-)
-
-// SanityText is used to check whether the given user provided storage key is valid or not
-var sanityText = []byte("Hello Badger")
-
-// KeyRegistry used to maintain all the data keys.
-type KeyRegistry struct {
- sync.RWMutex
- dataKeys map[uint64]*pb.DataKey
- lastCreated int64 //lastCreated is the timestamp(seconds) of the last data key generated.
- nextKeyID uint64
- fp *os.File
- opt KeyRegistryOptions
-}
-
-type KeyRegistryOptions struct {
- Dir string
- ReadOnly bool
- EncryptionKey []byte
- EncryptionKeyRotationDuration time.Duration
- InMemory bool
-}
-
-// newKeyRegistry returns KeyRegistry.
-func newKeyRegistry(opt KeyRegistryOptions) *KeyRegistry {
- return &KeyRegistry{
- dataKeys: make(map[uint64]*pb.DataKey),
- nextKeyID: 0,
- opt: opt,
- }
-}
-
-// OpenKeyRegistry opens key registry if it exists, otherwise it'll create key registry
-// and returns key registry.
-func OpenKeyRegistry(opt KeyRegistryOptions) (*KeyRegistry, error) {
- // sanity check the encryption key length.
- if len(opt.EncryptionKey) > 0 {
- switch len(opt.EncryptionKey) {
- default:
- return nil, y.Wrapf(ErrInvalidEncryptionKey, "During OpenKeyRegistry")
- case 16, 24, 32:
- break
- }
- }
- // If db is opened in InMemory mode, we don't need to write key registry to the disk.
- if opt.InMemory {
- return newKeyRegistry(opt), nil
- }
- path := filepath.Join(opt.Dir, KeyRegistryFileName)
- var flags uint32
- if opt.ReadOnly {
- flags |= y.ReadOnly
- } else {
- flags |= y.Sync
- }
- fp, err := y.OpenExistingFile(path, flags)
- // OpenExistingFile just open file.
- // So checking whether the file exist or not. If not
- // We'll create new keyregistry.
- if os.IsNotExist(err) {
- // Creating new registry file if not exist.
- kr := newKeyRegistry(opt)
- if opt.ReadOnly {
- return kr, nil
- }
- // Writing the key registry to the file.
- if err := WriteKeyRegistry(kr, opt); err != nil {
- return nil, y.Wrapf(err, "Error while writing key registry.")
- }
- fp, err = y.OpenExistingFile(path, flags)
- if err != nil {
- return nil, y.Wrapf(err, "Error while opening newly created key registry.")
- }
- } else if err != nil {
- return nil, y.Wrapf(err, "Error while opening key registry.")
- }
- kr, err := readKeyRegistry(fp, opt)
- if err != nil {
- // This case happens only if the file is opened properly and
- // not able to read.
- fp.Close()
- return nil, err
- }
- if opt.ReadOnly {
- // We'll close the file in readonly mode.
- return kr, fp.Close()
- }
- kr.fp = fp
- return kr, nil
-}
-
-// keyRegistryIterator reads all the datakey from the key registry
-type keyRegistryIterator struct {
- encryptionKey []byte
- fp *os.File
- // lenCrcBuf contains crc buf and data length to move forward.
- lenCrcBuf [8]byte
-}
-
-// newKeyRegistryIterator returns iterator which will allow you to iterate
-// over the data key of the key registry.
-func newKeyRegistryIterator(fp *os.File, encryptionKey []byte) (*keyRegistryIterator, error) {
- return &keyRegistryIterator{
- encryptionKey: encryptionKey,
- fp: fp,
- lenCrcBuf: [8]byte{},
- }, validRegistry(fp, encryptionKey)
-}
-
-// validRegistry checks that given encryption key is valid or not.
-func validRegistry(fp *os.File, encryptionKey []byte) error {
- iv := make([]byte, aes.BlockSize)
- var err error
- if _, err = fp.Read(iv); err != nil {
- return y.Wrapf(err, "Error while reading IV for key registry.")
- }
- eSanityText := make([]byte, len(sanityText))
- if _, err = fp.Read(eSanityText); err != nil {
- return y.Wrapf(err, "Error while reading sanity text.")
- }
- if len(encryptionKey) > 0 {
- // Decrypting sanity text.
- if eSanityText, err = y.XORBlock(eSanityText, encryptionKey, iv); err != nil {
- return y.Wrapf(err, "During validRegistry")
- }
- }
- // Check the given key is valid or not.
- if !bytes.Equal(eSanityText, sanityText) {
- return ErrEncryptionKeyMismatch
- }
- return nil
-}
-
-func (kri *keyRegistryIterator) next() (*pb.DataKey, error) {
- var err error
- // Read crc buf and data length.
- if _, err = kri.fp.Read(kri.lenCrcBuf[:]); err != nil {
- // EOF means end of the iteration.
- if err != io.EOF {
- return nil, y.Wrapf(err, "While reading crc in keyRegistryIterator.next")
- }
- return nil, err
- }
- l := int64(binary.BigEndian.Uint32(kri.lenCrcBuf[0:4]))
- // Read protobuf data.
- data := make([]byte, l)
- if _, err = kri.fp.Read(data); err != nil {
- // EOF means end of the iteration.
- if err != io.EOF {
- return nil, y.Wrapf(err, "While reading protobuf in keyRegistryIterator.next")
- }
- return nil, err
- }
- // Check checksum.
- if crc32.Checksum(data, y.CastagnoliCrcTable) != binary.BigEndian.Uint32(kri.lenCrcBuf[4:]) {
- return nil, y.Wrapf(y.ErrChecksumMismatch, "Error while checking checksum for data key.")
- }
- dataKey := &pb.DataKey{}
- if err = dataKey.Unmarshal(data); err != nil {
- return nil, y.Wrapf(err, "While unmarshal of datakey in keyRegistryIterator.next")
- }
- if len(kri.encryptionKey) > 0 {
- // Decrypt the key if the storage key exists.
- if dataKey.Data, err = y.XORBlock(dataKey.Data, kri.encryptionKey, dataKey.Iv); err != nil {
- return nil, y.Wrapf(err, "While decrypting datakey in keyRegistryIterator.next")
- }
- }
- return dataKey, nil
-}
-
-// readKeyRegistry will read the key registry file and build the key registry struct.
-func readKeyRegistry(fp *os.File, opt KeyRegistryOptions) (*KeyRegistry, error) {
- itr, err := newKeyRegistryIterator(fp, opt.EncryptionKey)
- if err != nil {
- return nil, err
- }
- kr := newKeyRegistry(opt)
- var dk *pb.DataKey
- dk, err = itr.next()
- for err == nil && dk != nil {
- if dk.KeyId > kr.nextKeyID {
- // Set the maximum key ID for next key ID generation.
- kr.nextKeyID = dk.KeyId
- }
- if dk.CreatedAt > kr.lastCreated {
- // Set the last generated key timestamp.
- kr.lastCreated = dk.CreatedAt
- }
- // No need to lock since we are building the initial state.
- kr.dataKeys[dk.KeyId] = dk
- // Forward the iterator.
- dk, err = itr.next()
- }
- // We read all the key. So, Ignoring this error.
- if err == io.EOF {
- err = nil
- }
- return kr, err
-}
-
-/*
-Structure of Key Registry.
-+-------------------+---------------------+--------------------+--------------+------------------+
-| IV | Sanity Text | DataKey1 | DataKey2 | ... |
-+-------------------+---------------------+--------------------+--------------+------------------+
-*/
-
-// WriteKeyRegistry will rewrite the existing key registry file with new one.
-// It is okay to give closed key registry. Since, it's using only the datakey.
-func WriteKeyRegistry(reg *KeyRegistry, opt KeyRegistryOptions) error {
- buf := &bytes.Buffer{}
- iv, err := y.GenerateIV()
- y.Check(err)
- // Encrypt sanity text if the encryption key is presents.
- eSanity := sanityText
- if len(opt.EncryptionKey) > 0 {
- var err error
- eSanity, err = y.XORBlock(eSanity, opt.EncryptionKey, iv)
- if err != nil {
- return y.Wrapf(err, "Error while encrpting sanity text in WriteKeyRegistry")
- }
- }
- y.Check2(buf.Write(iv))
- y.Check2(buf.Write(eSanity))
- // Write all the datakeys to the buf.
- for _, k := range reg.dataKeys {
- // Writing the datakey to the given buffer.
- if err := storeDataKey(buf, opt.EncryptionKey, k); err != nil {
- return y.Wrapf(err, "Error while storing datakey in WriteKeyRegistry")
- }
- }
- tmpPath := filepath.Join(opt.Dir, KeyRegistryRewriteFileName)
- // Open temporary file to write the data and do atomic rename.
- fp, err := y.OpenTruncFile(tmpPath, true)
- if err != nil {
- return y.Wrapf(err, "Error while opening tmp file in WriteKeyRegistry")
- }
- // Write buf to the disk.
- if _, err = fp.Write(buf.Bytes()); err != nil {
- // close the fd before returning error. We're not using defer
- // because, for windows we need to close the fd explicitly before
- // renaming.
- fp.Close()
- return y.Wrapf(err, "Error while writing buf in WriteKeyRegistry")
- }
- // In Windows the files should be closed before doing a Rename.
- if err = fp.Close(); err != nil {
- return y.Wrapf(err, "Error while closing tmp file in WriteKeyRegistry")
- }
- // Rename to the original file.
- if err = os.Rename(tmpPath, filepath.Join(opt.Dir, KeyRegistryFileName)); err != nil {
- return y.Wrapf(err, "Error while renaming file in WriteKeyRegistry")
- }
- // Sync Dir.
- return syncDir(opt.Dir)
-}
-
-// dataKey returns datakey of the given key id.
-func (kr *KeyRegistry) dataKey(id uint64) (*pb.DataKey, error) {
- kr.RLock()
- defer kr.RUnlock()
- if id == 0 {
- // nil represent plain text.
- return nil, nil
- }
- dk, ok := kr.dataKeys[id]
- if !ok {
- return nil, y.Wrapf(ErrInvalidDataKeyID, "Error for the KEY ID %d", id)
- }
- return dk, nil
-}
-
-// latestDataKey will give you the latest generated datakey based on the rotation
-// period. If the last generated datakey lifetime exceeds the rotation period.
-// It'll create new datakey.
-func (kr *KeyRegistry) latestDataKey() (*pb.DataKey, error) {
- if len(kr.opt.EncryptionKey) == 0 {
- // nil is for no encryption.
- return nil, nil
- }
- // validKey return datakey if the last generated key duration less than
- // rotation duration.
- validKey := func() (*pb.DataKey, bool) {
- // Time diffrence from the last generated time.
- diff := time.Since(time.Unix(kr.lastCreated, 0))
- if diff < kr.opt.EncryptionKeyRotationDuration {
- return kr.dataKeys[kr.nextKeyID], true
- }
- return nil, false
- }
- kr.RLock()
- key, valid := validKey()
- kr.RUnlock()
- if valid {
- // If less than EncryptionKeyRotationDuration, returns the last generated key.
- return key, nil
- }
- kr.Lock()
- defer kr.Unlock()
- // Key might have generated by another go routine. So,
- // checking once again.
- key, valid = validKey()
- if valid {
- return key, nil
- }
- k := make([]byte, len(kr.opt.EncryptionKey))
- iv, err := y.GenerateIV()
- if err != nil {
- return nil, err
- }
- _, err = rand.Read(k)
- if err != nil {
- return nil, err
- }
- // Otherwise Increment the KeyID and generate new datakey.
- kr.nextKeyID++
- dk := &pb.DataKey{
- KeyId: kr.nextKeyID,
- Data: k,
- CreatedAt: time.Now().Unix(),
- Iv: iv,
- }
- // Don't store the datakey on file if badger is running in InMemory mode.
- if !kr.opt.InMemory {
- // Store the datekey.
- buf := &bytes.Buffer{}
- if err = storeDataKey(buf, kr.opt.EncryptionKey, dk); err != nil {
- return nil, err
- }
- // Persist the datakey to the disk
- if _, err = kr.fp.Write(buf.Bytes()); err != nil {
- return nil, err
- }
- }
- // storeDatakey encrypts the datakey So, placing un-encrypted key in the memory.
- dk.Data = k
- kr.lastCreated = dk.CreatedAt
- kr.dataKeys[kr.nextKeyID] = dk
- return dk, nil
-}
-
-// Close closes the key registry.
-func (kr *KeyRegistry) Close() error {
- if !(kr.opt.ReadOnly || kr.opt.InMemory) {
- return kr.fp.Close()
- }
- return nil
-}
-
-// storeDataKey stores datakey in an encrypted format in the given buffer. If storage key preset.
-func storeDataKey(buf *bytes.Buffer, storageKey []byte, k *pb.DataKey) error {
- // xor will encrypt the IV and xor with the given data.
- // It'll used for both encryption and decryption.
- xor := func() error {
- if len(storageKey) == 0 {
- return nil
- }
- var err error
- k.Data, err = y.XORBlock(k.Data, storageKey, k.Iv)
- return err
- }
- // In memory datakey will be plain text so encrypting before storing to the disk.
- var err error
- if err = xor(); err != nil {
- return y.Wrapf(err, "Error while encrypting datakey in storeDataKey")
- }
- var data []byte
- if data, err = k.Marshal(); err != nil {
- err = y.Wrapf(err, "Error while marshaling datakey in storeDataKey")
- var err2 error
- // decrypting the datakey back.
- if err2 = xor(); err2 != nil {
- return y.Wrapf(err,
- y.Wrapf(err2, "Error while decrypting datakey in storeDataKey").Error())
- }
- return err
- }
- var lenCrcBuf [8]byte
- binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(data)))
- binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(data, y.CastagnoliCrcTable))
- y.Check2(buf.Write(lenCrcBuf[:]))
- y.Check2(buf.Write(data))
- // Decrypting the datakey back since we're using the pointer.
- return xor()
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/level_handler.go b/vendor/github.com/dgraph-io/badger/v2/level_handler.go
deleted file mode 100644
index ce48965f..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/level_handler.go
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "sort"
- "sync"
-
- "github.com/dgryski/go-farm"
-
- "github.com/dgraph-io/badger/v2/table"
- "github.com/dgraph-io/badger/v2/y"
- "github.com/pkg/errors"
-)
-
-type levelHandler struct {
- // Guards tables, totalSize.
- sync.RWMutex
-
- // For level >= 1, tables are sorted by key ranges, which do not overlap.
- // For level 0, tables are sorted by time.
- // For level 0, newest table are at the back. Compact the oldest one first, which is at the front.
- tables []*table.Table
- totalSize int64
-
- // The following are initialized once and const.
- level int
- strLevel string
- maxTotalSize int64
- db *DB
-}
-
-func (s *levelHandler) getTotalSize() int64 {
- s.RLock()
- defer s.RUnlock()
- return s.totalSize
-}
-
-// initTables replaces s.tables with given tables. This is done during loading.
-func (s *levelHandler) initTables(tables []*table.Table) {
- s.Lock()
- defer s.Unlock()
-
- s.tables = tables
- s.totalSize = 0
- for _, t := range tables {
- s.totalSize += t.Size()
- }
-
- if s.level == 0 {
- // Key range will overlap. Just sort by fileID in ascending order
- // because newer tables are at the end of level 0.
- sort.Slice(s.tables, func(i, j int) bool {
- return s.tables[i].ID() < s.tables[j].ID()
- })
- } else {
- // Sort tables by keys.
- sort.Slice(s.tables, func(i, j int) bool {
- return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0
- })
- }
-}
-
-// deleteTables remove tables idx0, ..., idx1-1.
-func (s *levelHandler) deleteTables(toDel []*table.Table) error {
- s.Lock() // s.Unlock() below
-
- toDelMap := make(map[uint64]struct{})
- for _, t := range toDel {
- toDelMap[t.ID()] = struct{}{}
- }
-
- // Make a copy as iterators might be keeping a slice of tables.
- var newTables []*table.Table
- for _, t := range s.tables {
- _, found := toDelMap[t.ID()]
- if !found {
- newTables = append(newTables, t)
- continue
- }
- s.totalSize -= t.Size()
- }
- s.tables = newTables
-
- s.Unlock() // Unlock s _before_ we DecrRef our tables, which can be slow.
-
- return decrRefs(toDel)
-}
-
-// replaceTables will replace tables[left:right] with newTables. Note this EXCLUDES tables[right].
-// You must call decr() to delete the old tables _after_ writing the update to the manifest.
-func (s *levelHandler) replaceTables(toDel, toAdd []*table.Table) error {
- // Need to re-search the range of tables in this level to be replaced as other goroutines might
- // be changing it as well. (They can't touch our tables, but if they add/remove other tables,
- // the indices get shifted around.)
- s.Lock() // We s.Unlock() below.
-
- toDelMap := make(map[uint64]struct{})
- for _, t := range toDel {
- toDelMap[t.ID()] = struct{}{}
- }
- var newTables []*table.Table
- for _, t := range s.tables {
- _, found := toDelMap[t.ID()]
- if !found {
- newTables = append(newTables, t)
- continue
- }
- s.totalSize -= t.Size()
- }
-
- // Increase totalSize first.
- for _, t := range toAdd {
- s.totalSize += t.Size()
- t.IncrRef()
- newTables = append(newTables, t)
- }
-
- // Assign tables.
- s.tables = newTables
- sort.Slice(s.tables, func(i, j int) bool {
- return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0
- })
- s.Unlock() // s.Unlock before we DecrRef tables -- that can be slow.
- return decrRefs(toDel)
-}
-
-// addTable adds toAdd table to levelHandler. Normally when we add tables to levelHandler, we sort
-// tables based on table.Smallest. This is required for correctness of the system. But in case of
-// stream writer this can be avoided. We can just add tables to levelHandler's table list
-// and after all addTable calls, we can sort table list(check sortTable method).
-// NOTE: levelHandler.sortTables() should be called after call addTable calls are done.
-func (s *levelHandler) addTable(t *table.Table) {
- s.Lock()
- defer s.Unlock()
-
- s.totalSize += t.Size() // Increase totalSize first.
- t.IncrRef()
- s.tables = append(s.tables, t)
-}
-
-// sortTables sorts tables of levelHandler based on table.Smallest.
-// Normally it should be called after all addTable calls.
-func (s *levelHandler) sortTables() {
- s.RLock()
- defer s.RUnlock()
-
- sort.Slice(s.tables, func(i, j int) bool {
- return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0
- })
-}
-
-func decrRefs(tables []*table.Table) error {
- for _, table := range tables {
- if err := table.DecrRef(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func newLevelHandler(db *DB, level int) *levelHandler {
- return &levelHandler{
- level: level,
- strLevel: fmt.Sprintf("l%d", level),
- db: db,
- }
-}
-
-// tryAddLevel0Table returns true if ok and no stalling.
-func (s *levelHandler) tryAddLevel0Table(t *table.Table) bool {
- y.AssertTrue(s.level == 0)
- // Need lock as we may be deleting the first table during a level 0 compaction.
- s.Lock()
- defer s.Unlock()
- // Stall (by returning false) if we are above the specified stall setting for L0.
- if len(s.tables) >= s.db.opt.NumLevelZeroTablesStall {
- return false
- }
-
- s.tables = append(s.tables, t)
- t.IncrRef()
- s.totalSize += t.Size()
-
- return true
-}
-
-func (s *levelHandler) numTables() int {
- s.RLock()
- defer s.RUnlock()
- return len(s.tables)
-}
-
-func (s *levelHandler) close() error {
- s.RLock()
- defer s.RUnlock()
- var err error
- for _, t := range s.tables {
- if closeErr := t.Close(); closeErr != nil && err == nil {
- err = closeErr
- }
- }
- return errors.Wrap(err, "levelHandler.close")
-}
-
-// getTableForKey acquires a read-lock to access s.tables. It returns a list of tableHandlers.
-func (s *levelHandler) getTableForKey(key []byte) ([]*table.Table, func() error) {
- s.RLock()
- defer s.RUnlock()
-
- if s.level == 0 {
- // For level 0, we need to check every table. Remember to make a copy as s.tables may change
- // once we exit this function, and we don't want to lock s.tables while seeking in tables.
- // CAUTION: Reverse the tables.
- out := make([]*table.Table, 0, len(s.tables))
- for i := len(s.tables) - 1; i >= 0; i-- {
- out = append(out, s.tables[i])
- s.tables[i].IncrRef()
- }
- return out, func() error {
- for _, t := range out {
- if err := t.DecrRef(); err != nil {
- return err
- }
- }
- return nil
- }
- }
- // For level >= 1, we can do a binary search as key range does not overlap.
- idx := sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(s.tables[i].Biggest(), key) >= 0
- })
- if idx >= len(s.tables) {
- // Given key is strictly > than every element we have.
- return nil, func() error { return nil }
- }
- tbl := s.tables[idx]
- tbl.IncrRef()
- return []*table.Table{tbl}, tbl.DecrRef
-}
-
-// get returns value for a given key or the key after that. If not found, return nil.
-func (s *levelHandler) get(key []byte) (y.ValueStruct, error) {
- tables, decr := s.getTableForKey(key)
- keyNoTs := y.ParseKey(key)
-
- hash := farm.Fingerprint64(keyNoTs)
- var maxVs y.ValueStruct
- for _, th := range tables {
- if th.DoesNotHave(hash) {
- y.NumLSMBloomHits.Add(s.strLevel, 1)
- continue
- }
-
- it := th.NewIterator(false)
- defer it.Close()
-
- y.NumLSMGets.Add(s.strLevel, 1)
- it.Seek(key)
- if !it.Valid() {
- continue
- }
- if y.SameKey(key, it.Key()) {
- if version := y.ParseTs(it.Key()); maxVs.Version < version {
- maxVs = it.ValueCopy()
- maxVs.Version = version
- }
- }
- }
- return maxVs, decr()
-}
-
-// appendIterators appends iterators to an array of iterators, for merging.
-// Note: This obtains references for the table handlers. Remember to close these iterators.
-func (s *levelHandler) appendIterators(iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
- s.RLock()
- defer s.RUnlock()
-
- if s.level == 0 {
- // Remember to add in reverse order!
- // The newer table at the end of s.tables should be added first as it takes precedence.
- // Level 0 tables are not in key sorted order, so we need to consider them one by one.
- var out []*table.Table
- for _, t := range s.tables {
- if opt.pickTable(t) {
- out = append(out, t)
- }
- }
- return appendIteratorsReversed(iters, out, opt.Reverse)
- }
-
- tables := opt.pickTables(s.tables)
- if len(tables) == 0 {
- return iters
- }
- return append(iters, table.NewConcatIterator(tables, opt.Reverse))
-}
-
-type levelHandlerRLocked struct{}
-
-// overlappingTables returns the tables that intersect with key range. Returns a half-interval.
-// This function should already have acquired a read lock, and this is so important the caller must
-// pass an empty parameter declaring such.
-func (s *levelHandler) overlappingTables(_ levelHandlerRLocked, kr keyRange) (int, int) {
- if len(kr.left) == 0 || len(kr.right) == 0 {
- return 0, 0
- }
- left := sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(kr.left, s.tables[i].Biggest()) <= 0
- })
- right := sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(kr.right, s.tables[i].Smallest()) < 0
- })
- return left, right
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/levels.go b/vendor/github.com/dgraph-io/badger/v2/levels.go
deleted file mode 100644
index f99ba0a2..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/levels.go
+++ /dev/null
@@ -1,1200 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "os"
- "sort"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/trace"
-
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/table"
- "github.com/dgraph-io/badger/v2/y"
- "github.com/pkg/errors"
-)
-
-type levelsController struct {
- nextFileID uint64 // Atomic
-
- // The following are initialized once and const.
- levels []*levelHandler
- kv *DB
-
- cstatus compactStatus
- // This is for getting timings between stalls.
- lastUnstalled time.Time
-}
-
-// revertToManifest checks that all necessary table files exist and removes all table files not
-// referenced by the manifest. idMap is a set of table file id's that were read from the directory
-// listing.
-func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {
- // 1. Check all files in manifest exist.
- for id := range mf.Tables {
- if _, ok := idMap[id]; !ok {
- return fmt.Errorf("file does not exist for table %d", id)
- }
- }
-
- // 2. Delete files that shouldn't exist.
- for id := range idMap {
- if _, ok := mf.Tables[id]; !ok {
- kv.opt.Debugf("Table file %d not referenced in MANIFEST\n", id)
- filename := table.NewFilename(id, kv.opt.Dir)
- if err := os.Remove(filename); err != nil {
- return y.Wrapf(err, "While removing table %d", id)
- }
- }
- }
-
- return nil
-}
-
-func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) {
- y.AssertTrue(db.opt.NumLevelZeroTablesStall > db.opt.NumLevelZeroTables)
- s := &levelsController{
- kv: db,
- levels: make([]*levelHandler, db.opt.MaxLevels),
- }
- s.cstatus.levels = make([]*levelCompactStatus, db.opt.MaxLevels)
-
- for i := 0; i < db.opt.MaxLevels; i++ {
- s.levels[i] = newLevelHandler(db, i)
- switch i {
- case 0:
- // Do nothing.
- case 1:
- // Level 1 probably shouldn't be too much bigger than level 0.
- s.levels[i].maxTotalSize = db.opt.LevelOneSize
- default:
- s.levels[i].maxTotalSize = s.levels[i-1].maxTotalSize * int64(db.opt.LevelSizeMultiplier)
- }
- s.cstatus.levels[i] = new(levelCompactStatus)
- }
-
- if db.opt.InMemory {
- return s, nil
- }
- // Compare manifest against directory, check for existent/non-existent files, and remove.
- if err := revertToManifest(db, mf, getIDMap(db.opt.Dir)); err != nil {
- return nil, err
- }
-
- // Some files may be deleted. Let's reload.
- var flags uint32 = y.Sync
- if db.opt.ReadOnly {
- flags |= y.ReadOnly
- }
-
- var mu sync.Mutex
- tables := make([][]*table.Table, db.opt.MaxLevels)
- var maxFileID uint64
-
- // We found that using 3 goroutines allows disk throughput to be utilized to its max.
- // Disk utilization is the main thing we should focus on, while trying to read the data. That's
- // the one factor that remains constant between HDD and SSD.
- throttle := y.NewThrottle(3)
-
- start := time.Now()
- var numOpened int32
- tick := time.NewTicker(3 * time.Second)
- defer tick.Stop()
-
- for fileID, tf := range mf.Tables {
- fname := table.NewFilename(fileID, db.opt.Dir)
- select {
- case <-tick.C:
- db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened),
- len(mf.Tables), time.Since(start).Round(time.Millisecond))
- default:
- }
- if err := throttle.Do(); err != nil {
- closeAllTables(tables)
- return nil, err
- }
- if fileID > maxFileID {
- maxFileID = fileID
- }
- go func(fname string, tf TableManifest) {
- var rerr error
- defer func() {
- throttle.Done(rerr)
- atomic.AddInt32(&numOpened, 1)
- }()
- fd, err := y.OpenExistingFile(fname, flags)
- if err != nil {
- rerr = errors.Wrapf(err, "Opening file: %q", fname)
- return
- }
- dk, err := db.registry.dataKey(tf.KeyID)
- if err != nil {
- rerr = errors.Wrapf(err, "Error while reading datakey")
- return
- }
- topt := buildTableOptions(db.opt)
- // Set compression from table manifest.
- topt.Compression = tf.Compression
- topt.DataKey = dk
- topt.BlockCache = db.blockCache
- topt.IndexCache = db.indexCache
- t, err := table.OpenTable(fd, topt)
- if err != nil {
- if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") {
- db.opt.Errorf(err.Error())
- db.opt.Errorf("Ignoring table %s", fd.Name())
- // Do not set rerr. We will continue without this table.
- } else {
- rerr = errors.Wrapf(err, "Opening table: %q", fname)
- }
- return
- }
-
- mu.Lock()
- tables[tf.Level] = append(tables[tf.Level], t)
- mu.Unlock()
- }(fname, tf)
- }
- if err := throttle.Finish(); err != nil {
- closeAllTables(tables)
- return nil, err
- }
- db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened),
- time.Since(start).Round(time.Millisecond))
- s.nextFileID = maxFileID + 1
- for i, tbls := range tables {
- s.levels[i].initTables(tbls)
- }
-
- // Make sure key ranges do not overlap etc.
- if err := s.validate(); err != nil {
- _ = s.cleanupLevels()
- return nil, errors.Wrap(err, "Level validation")
- }
-
- // Sync directory (because we have at least removed some files, or previously created the
- // manifest file).
- if err := syncDir(db.opt.Dir); err != nil {
- _ = s.close()
- return nil, err
- }
-
- return s, nil
-}
-
-// Closes the tables, for cleanup in newLevelsController. (We Close() instead of using DecrRef()
-// because that would delete the underlying files.) We ignore errors, which is OK because tables
-// are read-only.
-func closeAllTables(tables [][]*table.Table) {
- for _, tableSlice := range tables {
- for _, table := range tableSlice {
- _ = table.Close()
- }
- }
-}
-
-func (s *levelsController) cleanupLevels() error {
- var firstErr error
- for _, l := range s.levels {
- if err := l.close(); err != nil && firstErr == nil {
- firstErr = err
- }
- }
- return firstErr
-}
-
-// dropTree picks all tables from all levels, creates a manifest changeset,
-// applies it, and then decrements the refs of these tables, which would result
-// in their deletion.
-func (s *levelsController) dropTree() (int, error) {
- // First pick all tables, so we can create a manifest changelog.
- var all []*table.Table
- for _, l := range s.levels {
- l.RLock()
- all = append(all, l.tables...)
- l.RUnlock()
- }
- if len(all) == 0 {
- return 0, nil
- }
-
- // Generate the manifest changes.
- changes := []*pb.ManifestChange{}
- for _, table := range all {
- // Add a delete change only if the table is not in memory.
- if !table.IsInmemory {
- changes = append(changes, newDeleteChange(table.ID()))
- }
- }
- changeSet := pb.ManifestChangeSet{Changes: changes}
- if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
- return 0, err
- }
-
- // Now that manifest has been successfully written, we can delete the tables.
- for _, l := range s.levels {
- l.Lock()
- l.totalSize = 0
- l.tables = l.tables[:0]
- l.Unlock()
- }
- for _, table := range all {
- if err := table.DecrRef(); err != nil {
- return 0, err
- }
- }
- return len(all), nil
-}
-
-// dropPrefix runs a L0->L1 compaction, and then runs same level compaction on the rest of the
-// levels. For L0->L1 compaction, it runs compactions normally, but skips over all the keys with the
-// provided prefix and also the internal move keys for the same prefix.
-// For Li->Li compactions, it picks up the tables which would have the prefix. The
-// tables who only have keys with this prefix are quickly dropped. The ones which have other keys
-// are run through MergeIterator and compacted to create new tables. All the mechanisms of
-// compactions apply, i.e. level sizes and MANIFEST are updated as in the normal flow.
-func (s *levelsController) dropPrefixes(prefixes [][]byte) error {
- // Internal move keys related to the given prefix should also be skipped.
- for _, prefix := range prefixes {
- key := make([]byte, 0, len(badgerMove)+len(prefix))
- key = append(key, badgerMove...)
- key = append(key, prefix...)
- prefixes = append(prefixes, key)
- }
-
- opt := s.kv.opt
- // Iterate levels in the reverse order because if we were to iterate from
- // lower level (say level 0) to a higher level (say level 3) we could have
- // a state in which level 0 is compacted and an older version of a key exists in lower level.
- // At this point, if someone creates an iterator, they would see an old
- // value for a key from lower levels. Iterating in reverse order ensures we
- // drop the oldest data first so that lookups never return stale data.
- for i := len(s.levels) - 1; i >= 0; i-- {
- l := s.levels[i]
-
- l.RLock()
- if l.level == 0 {
- size := len(l.tables)
- l.RUnlock()
-
- if size > 0 {
- cp := compactionPriority{
- level: 0,
- score: 1.74,
- // A unique number greater than 1.0 does two things. Helps identify this
- // function in logs, and forces a compaction.
- dropPrefixes: prefixes,
- }
- if err := s.doCompact(174, cp); err != nil {
- opt.Warningf("While compacting level 0: %v", err)
- return nil
- }
- }
- continue
- }
-
- // Build a list of compaction tableGroups affecting all the prefixes we
- // need to drop. We need to build tableGroups that satisfy the invariant that
- // bottom tables are consecutive.
- // tableGroup contains groups of consecutive tables.
- var tableGroups [][]*table.Table
- var tableGroup []*table.Table
-
- finishGroup := func() {
- if len(tableGroup) > 0 {
- tableGroups = append(tableGroups, tableGroup)
- tableGroup = nil
- }
- }
-
- for _, table := range l.tables {
- if containsAnyPrefixes(table.Smallest(), table.Biggest(), prefixes) {
- tableGroup = append(tableGroup, table)
- } else {
- finishGroup()
- }
- }
- finishGroup()
-
- l.RUnlock()
-
- if len(tableGroups) == 0 {
- continue
- }
-
- opt.Infof("Dropping prefix at level %d (%d tableGroups)", l.level, len(tableGroups))
- for _, operation := range tableGroups {
- cd := compactDef{
- elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"),
- thisLevel: l,
- nextLevel: l,
- top: nil,
- bot: operation,
- dropPrefixes: prefixes,
- }
- if err := s.runCompactDef(l.level, cd); err != nil {
- opt.Warningf("While running compact def: %+v. Error: %v", cd, err)
- return err
- }
- }
- }
- return nil
-}
-
-func (s *levelsController) startCompact(lc *y.Closer) {
- n := s.kv.opt.NumCompactors
- lc.AddRunning(n - 1)
- for i := 0; i < n; i++ {
- // The worker with id=0 is dedicated to L0 and L1. This is not counted
- // towards the user specified NumCompactors.
- go s.runCompactor(i, lc)
- }
-}
-
-func (s *levelsController) runCompactor(id int, lc *y.Closer) {
- defer lc.Done()
-
- randomDelay := time.NewTimer(time.Duration(rand.Int31n(1000)) * time.Millisecond)
- select {
- case <-randomDelay.C:
- case <-lc.HasBeenClosed():
- randomDelay.Stop()
- return
- }
-
- ticker := time.NewTicker(100 * time.Millisecond)
- defer ticker.Stop()
-
- for {
- select {
- // Can add a done channel or other stuff.
- case <-ticker.C:
- prios := s.pickCompactLevels()
- loop:
- for _, p := range prios {
- if id == 0 && p.level > 1 {
- // If I'm ID zero, I only compact L0 and L1.
- continue
- }
- if id != 0 && p.level <= 1 {
- // If I'm ID non-zero, I do NOT compact L0 and L1.
- continue
- }
- err := s.doCompact(id, p)
- switch err {
- case nil:
- break loop
- case errFillTables:
- // pass
- default:
- s.kv.opt.Warningf("While running doCompact: %v\n", err)
- }
- }
- case <-lc.HasBeenClosed():
- return
- }
- }
-}
-
-// Returns true if level zero may be compacted, without accounting for compactions that already
-// might be happening.
-func (s *levelsController) isLevel0Compactable() bool {
- return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables
-}
-
-// Returns true if the non-zero level may be compacted. delSize provides the size of the tables
-// which are currently being compacted so that we treat them as already having started being
-// compacted (because they have been, yet their size is already counted in getTotalSize).
-func (l *levelHandler) isCompactable(delSize int64) bool {
- return l.getTotalSize()-delSize >= l.maxTotalSize
-}
-
-type compactionPriority struct {
- level int
- score float64
- dropPrefixes [][]byte
-}
-
-// pickCompactLevel determines which level to compact.
-// Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction
-func (s *levelsController) pickCompactLevels() (prios []compactionPriority) {
- // This function must use identical criteria for guaranteeing compaction's progress that
- // addLevel0Table uses.
-
- // cstatus is checked to see if level 0's tables are already being compacted
- if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() {
- pri := compactionPriority{
- level: 0,
- score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables),
- }
- prios = append(prios, pri)
- }
-
- for i, l := range s.levels[1:] {
- // Don't consider those tables that are already being compacted right now.
- delSize := s.cstatus.delSize(i + 1)
-
- if l.isCompactable(delSize) {
- pri := compactionPriority{
- level: i + 1,
- score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize),
- }
- prios = append(prios, pri)
- }
- }
- // We should continue to sort the compaction priorities by score. Now that we have a dedicated
- // compactor for L0 and L1, we don't need to sort by level here.
- sort.Slice(prios, func(i, j int) bool {
- return prios[i].score > prios[j].score
- })
- return prios
-}
-
-// checkOverlap checks if the given tables overlap with any level from the given "lev" onwards.
-func (s *levelsController) checkOverlap(tables []*table.Table, lev int) bool {
- kr := getKeyRange(tables...)
- for i, lh := range s.levels {
- if i < lev { // Skip upper levels.
- continue
- }
- lh.RLock()
- left, right := lh.overlappingTables(levelHandlerRLocked{}, kr)
- lh.RUnlock()
- if right-left > 0 {
- return true
- }
- }
- return false
-}
-
-// compactBuildTables merges topTables and botTables to form a list of new tables.
-func (s *levelsController) compactBuildTables(
- lev int, cd compactDef) ([]*table.Table, func() error, error) {
- topTables := cd.top
- botTables := cd.bot
-
- // Check overlap of the top level with the levels which are not being
- // compacted in this compaction.
- hasOverlap := s.checkOverlap(cd.allTables(), cd.nextLevel.level+1)
-
- // Try to collect stats so that we can inform value log about GC. That would help us find which
- // value log file should be GCed.
- discardStats := make(map[uint32]int64)
- updateStats := func(vs y.ValueStruct) {
- // We don't need to store/update discard stats when badger is running in Disk-less mode.
- if s.kv.opt.InMemory {
- return
- }
- if vs.Meta&bitValuePointer > 0 {
- var vp valuePointer
- vp.Decode(vs.Value)
- discardStats[vp.Fid] += int64(vp.Len)
- }
- }
-
- // Create iterators across all the tables involved first.
- var iters []y.Iterator
- switch {
- case lev == 0:
- iters = appendIteratorsReversed(iters, topTables, false)
- case len(topTables) > 0:
- y.AssertTrue(len(topTables) == 1)
- iters = []y.Iterator{topTables[0].NewIterator(false)}
- }
-
- // Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap.
- var valid []*table.Table
-
-nextTable:
- for _, table := range botTables {
- if len(cd.dropPrefixes) > 0 {
- for _, prefix := range cd.dropPrefixes {
- if bytes.HasPrefix(table.Smallest(), prefix) &&
- bytes.HasPrefix(table.Biggest(), prefix) {
- // All the keys in this table have the dropPrefix. So, this
- // table does not need to be in the iterator and can be
- // dropped immediately.
- continue nextTable
- }
- }
- }
- valid = append(valid, table)
- }
- iters = append(iters, table.NewConcatIterator(valid, false))
- it := table.NewMergeIterator(iters, false)
- defer it.Close() // Important to close the iterator to do ref counting.
-
- it.Rewind()
-
- // Pick a discard ts, so we can discard versions below this ts. We should
- // never discard any versions starting from above this timestamp, because
- // that would affect the snapshot view guarantee provided by transactions.
- discardTs := s.kv.orc.discardAtOrBelow()
-
- var numBuilds, numVersions int
- var lastKey, skipKey []byte
- var vp valuePointer
- var newTables []*table.Table
- mu := new(sync.Mutex) // Guards newTables
-
- inflightBuilders := y.NewThrottle(5)
- for it.Valid() {
- timeStart := time.Now()
- dk, err := s.kv.registry.latestDataKey()
- if err != nil {
- return nil, nil,
- y.Wrapf(err, "Error while retrieving datakey in levelsController.compactBuildTables")
- }
- bopts := buildTableOptions(s.kv.opt)
- bopts.DataKey = dk
- // Builder does not need cache but the same options are used for opening table.
- bopts.BlockCache = s.kv.blockCache
- bopts.IndexCache = s.kv.indexCache
- builder := table.NewTableBuilder(bopts)
- var numKeys, numSkips uint64
- for ; it.Valid(); it.Next() {
- // See if we need to skip the prefix.
- if len(cd.dropPrefixes) > 0 && hasAnyPrefixes(it.Key(), cd.dropPrefixes) {
- numSkips++
- updateStats(it.Value())
- continue
- }
-
- // See if we need to skip this key.
- if len(skipKey) > 0 {
- if y.SameKey(it.Key(), skipKey) {
- numSkips++
- updateStats(it.Value())
- continue
- } else {
- skipKey = skipKey[:0]
- }
- }
-
- if !y.SameKey(it.Key(), lastKey) {
- if builder.ReachedCapacity(s.kv.opt.MaxTableSize) {
- // Only break if we are on a different key, and have reached capacity. We want
- // to ensure that all versions of the key are stored in the same sstable, and
- // not divided across multiple tables at the same level.
- break
- }
- lastKey = y.SafeCopy(lastKey, it.Key())
- numVersions = 0
- }
-
- vs := it.Value()
- version := y.ParseTs(it.Key())
- // Do not discard entries inserted by merge operator. These entries will be
- // discarded once they're merged
- if version <= discardTs && vs.Meta&bitMergeEntry == 0 {
- // Keep track of the number of versions encountered for this key. Only consider the
- // versions which are below the minReadTs, otherwise, we might end up discarding the
- // only valid version for a running transaction.
- numVersions++
-
- // Keep the current version and discard all the next versions if
- // - The `discardEarlierVersions` bit is set OR
- // - We've already processed `NumVersionsToKeep` number of versions
- // (including the current item being processed)
- lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0 ||
- numVersions == s.kv.opt.NumVersionsToKeep
-
- isExpired := isDeletedOrExpired(vs.Meta, vs.ExpiresAt)
-
- if isExpired || lastValidVersion {
- // If this version of the key is deleted or expired, skip all the rest of the
- // versions. Ensure that we're only removing versions below readTs.
- skipKey = y.SafeCopy(skipKey, it.Key())
-
- switch {
- // Add the key to the table only if it has not expired.
- // We don't want to add the deleted/expired keys.
- case !isExpired && lastValidVersion:
- // Add this key. We have set skipKey, so the following key versions
- // would be skipped.
- case hasOverlap:
- // If this key range has overlap with lower levels, then keep the deletion
- // marker with the latest version, discarding the rest. We have set skipKey,
- // so the following key versions would be skipped.
- default:
- // If no overlap, we can skip all the versions, by continuing here.
- numSkips++
- updateStats(vs)
- continue // Skip adding this key.
- }
- }
- }
- numKeys++
- if vs.Meta&bitValuePointer > 0 {
- vp.Decode(vs.Value)
- }
- builder.Add(it.Key(), vs, vp.Len)
- }
- // It was true that it.Valid() at least once in the loop above, which means we
- // called Add() at least once, and builder is not Empty().
- s.kv.opt.Debugf("LOG Compact. Added %d keys. Skipped %d keys. Iteration took: %v",
- numKeys, numSkips, time.Since(timeStart))
- if builder.Empty() {
- continue
- }
- numBuilds++
- fileID := s.reserveFileID()
- if err := inflightBuilders.Do(); err != nil {
- // Can't return from here, until I decrRef all the tables that I built so far.
- break
- }
- go func(builder *table.Builder) {
- defer builder.Close()
- defer inflightBuilders.Done(err)
-
- build := func(fileID uint64) (*table.Table, error) {
- fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true)
- if err != nil {
- return nil, errors.Wrapf(err, "While opening new table: %d", fileID)
- }
-
- if _, err := fd.Write(builder.Finish()); err != nil {
- return nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)
- }
- tbl, err := table.OpenTable(fd, bopts)
- // decrRef is added below.
- return tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())
- }
-
- var tbl *table.Table
- var err error
- if s.kv.opt.InMemory {
- tbl, err = table.OpenInMemoryTable(builder.Finish(), fileID, &bopts)
- } else {
- tbl, err = build(fileID)
- }
-
- // If we couldn't build the table, return fast.
- if err != nil {
- return
- }
-
- mu.Lock()
- newTables = append(newTables, tbl)
- mu.Unlock()
- }(builder)
- }
-
- // Wait for all table builders to finish and also for newTables accumulator to finish.
- err := inflightBuilders.Finish()
- if err == nil {
- // Ensure created files' directory entries are visible. We don't mind the extra latency
- // from not doing this ASAP after all file creation has finished because this is a
- // background operation.
- err = s.kv.syncDir(s.kv.opt.Dir)
- }
-
- if err != nil {
- // An error happened. Delete all the newly created table files (by calling DecrRef
- // -- we're the only holders of a ref).
- _ = decrRefs(newTables)
- return nil, nil, errors.Wrapf(err, "while running compactions for: %+v", cd)
- }
-
- sort.Slice(newTables, func(i, j int) bool {
- return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0
- })
- s.kv.vlog.updateDiscardStats(discardStats)
- s.kv.opt.Debugf("Discard stats: %v", discardStats)
- return newTables, func() error { return decrRefs(newTables) }, nil
-}
-
-func buildChangeSet(cd *compactDef, newTables []*table.Table) pb.ManifestChangeSet {
- changes := []*pb.ManifestChange{}
- for _, table := range newTables {
- changes = append(changes,
- newCreateChange(table.ID(), cd.nextLevel.level, table.KeyID(), table.CompressionType()))
- }
- for _, table := range cd.top {
- // Add a delete change only if the table is not in memory.
- if !table.IsInmemory {
- changes = append(changes, newDeleteChange(table.ID()))
- }
- }
- for _, table := range cd.bot {
- changes = append(changes, newDeleteChange(table.ID()))
- }
- return pb.ManifestChangeSet{Changes: changes}
-}
-
-func hasAnyPrefixes(s []byte, listOfPrefixes [][]byte) bool {
- for _, prefix := range listOfPrefixes {
- if bytes.HasPrefix(s, prefix) {
- return true
- }
- }
-
- return false
-}
-
-func containsPrefix(smallValue, largeValue, prefix []byte) bool {
- if bytes.HasPrefix(smallValue, prefix) {
- return true
- }
- if bytes.HasPrefix(largeValue, prefix) {
- return true
- }
- if bytes.Compare(prefix, smallValue) > 0 &&
- bytes.Compare(prefix, largeValue) < 0 {
- return true
- }
-
- return false
-}
-
-func containsAnyPrefixes(smallValue, largeValue []byte, listOfPrefixes [][]byte) bool {
- for _, prefix := range listOfPrefixes {
- if containsPrefix(smallValue, largeValue, prefix) {
- return true
- }
- }
-
- return false
-}
-
-type compactDef struct {
- elog trace.Trace
-
- thisLevel *levelHandler
- nextLevel *levelHandler
-
- top []*table.Table
- bot []*table.Table
-
- thisRange keyRange
- nextRange keyRange
-
- thisSize int64
-
- dropPrefixes [][]byte
-}
-
-func (cd *compactDef) lockLevels() {
- cd.thisLevel.RLock()
- cd.nextLevel.RLock()
-}
-
-func (cd *compactDef) unlockLevels() {
- cd.nextLevel.RUnlock()
- cd.thisLevel.RUnlock()
-}
-
-func (cd *compactDef) allTables() []*table.Table {
- ret := make([]*table.Table, 0, len(cd.top)+len(cd.bot))
- ret = append(ret, cd.top...)
- ret = append(ret, cd.bot...)
- return ret
-}
-
-func (s *levelsController) fillTablesL0(cd *compactDef) bool {
- cd.lockLevels()
- defer cd.unlockLevels()
-
- cd.top = make([]*table.Table, len(cd.thisLevel.tables))
- copy(cd.top, cd.thisLevel.tables)
- if len(cd.top) == 0 {
- return false
- }
- cd.thisRange = infRange
-
- kr := getKeyRange(cd.top...)
- left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, kr)
- cd.bot = make([]*table.Table, right-left)
- copy(cd.bot, cd.nextLevel.tables[left:right])
-
- if len(cd.bot) == 0 {
- cd.nextRange = kr
- } else {
- cd.nextRange = getKeyRange(cd.bot...)
- }
-
- if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
- return false
- }
-
- return true
-}
-
-// sortByOverlap sorts tables in increasing order of overlap with next level.
-func (s *levelsController) sortByOverlap(tables []*table.Table, cd *compactDef) {
- if len(tables) == 0 || cd.nextLevel == nil {
- return
- }
-
- tableOverlap := make([]int, len(tables))
- for i := range tables {
- // get key range for table
- tableRange := getKeyRange(tables[i])
- // get overlap with next level
- left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, tableRange)
- tableOverlap[i] = right - left
- }
-
- sort.Slice(tables, func(i, j int) bool {
- return tableOverlap[i] < tableOverlap[j]
- })
-}
-
-func (s *levelsController) fillTables(cd *compactDef) bool {
- cd.lockLevels()
- defer cd.unlockLevels()
-
- tables := make([]*table.Table, len(cd.thisLevel.tables))
- copy(tables, cd.thisLevel.tables)
- if len(tables) == 0 {
- return false
- }
-
- // We want to pick files from current level in order of increasing overlap with next level
- // tables. Idea here is to first compact file from current level which has least overlap with
- // next level. This provides us better write amplification.
- s.sortByOverlap(tables, cd)
-
- for _, t := range tables {
- cd.thisSize = t.Size()
- cd.thisRange = getKeyRange(t)
- if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) {
- continue
- }
- cd.top = []*table.Table{t}
- left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, cd.thisRange)
-
- // Sometimes below line(make([]*table.Table, right-left)) panics with error
- // (runtime error: makeslice: len out of range). One of the reason for this can be when
- // right < left. We don't know how to reproduce it as of now. We are just logging it so
- // that we can get more context.
- if right < left {
- s.kv.opt.Errorf("right: %d is less than left: %d in overlappingTables for current "+
- "level: %d, next level: %d, key range(%s, %s)", right, left, cd.thisLevel.level,
- cd.nextLevel.level, cd.thisRange.left, cd.thisRange.right)
-
- continue
- }
-
- cd.bot = make([]*table.Table, right-left)
- copy(cd.bot, cd.nextLevel.tables[left:right])
-
- if len(cd.bot) == 0 {
- cd.bot = []*table.Table{}
- cd.nextRange = cd.thisRange
- if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
- continue
- }
- return true
- }
- cd.nextRange = getKeyRange(cd.bot...)
-
- if s.cstatus.overlapsWith(cd.nextLevel.level, cd.nextRange) {
- continue
- }
- if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
- continue
- }
- return true
- }
- return false
-}
-
-func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) {
- timeStart := time.Now()
-
- thisLevel := cd.thisLevel
- nextLevel := cd.nextLevel
-
- // Table should never be moved directly between levels, always be rewritten to allow discarding
- // invalid versions.
-
- newTables, decr, err := s.compactBuildTables(l, cd)
- if err != nil {
- return err
- }
- defer func() {
- // Only assign to err, if it's not already nil.
- if decErr := decr(); err == nil {
- err = decErr
- }
- }()
- changeSet := buildChangeSet(&cd, newTables)
-
- // We write to the manifest _before_ we delete files (and after we created files)
- if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
- return err
- }
-
- // See comment earlier in this function about the ordering of these ops, and the order in which
- // we access levels when reading.
- if err := nextLevel.replaceTables(cd.bot, newTables); err != nil {
- return err
- }
- if err := thisLevel.deleteTables(cd.top); err != nil {
- return err
- }
-
- // Note: For level 0, while doCompact is running, it is possible that new tables are added.
- // However, the tables are added only to the end, so it is ok to just delete the first table.
-
- s.kv.opt.Infof("LOG Compact %d->%d, del %d tables, add %d tables, took %v\n",
- thisLevel.level, nextLevel.level, len(cd.top)+len(cd.bot),
- len(newTables), time.Since(timeStart))
- return nil
-}
-
-var errFillTables = errors.New("Unable to fill tables")
-
-// doCompact picks some table on level l and compacts it away to the next level.
-func (s *levelsController) doCompact(id int, p compactionPriority) error {
- l := p.level
- y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check.
-
- cd := compactDef{
- elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"),
- thisLevel: s.levels[l],
- nextLevel: s.levels[l+1],
- dropPrefixes: p.dropPrefixes,
- }
- cd.elog.SetMaxEvents(100)
- defer cd.elog.Finish()
-
- s.kv.opt.Debugf("[Compactor: %d] Attempting to run compaction: %+v", id, p)
-
- // While picking tables to be compacted, both levels' tables are expected to
- // remain unchanged.
- if l == 0 {
- if !s.fillTablesL0(&cd) {
- return errFillTables
- }
-
- } else {
- if !s.fillTables(&cd) {
- return errFillTables
- }
- }
- defer s.cstatus.delete(cd) // Remove the ranges from compaction status.
-
- s.kv.opt.Infof("[Compactor: %d] Running compaction: %+v for level: %d\n",
- id, p, cd.thisLevel.level)
- s.cstatus.toLog(cd.elog)
- if err := s.runCompactDef(l, cd); err != nil {
- // This compaction couldn't be done successfully.
- s.kv.opt.Warningf("[Compactor: %d] LOG Compact FAILED with error: %+v: %+v", id, err, cd)
- return err
- }
-
- s.cstatus.toLog(cd.elog)
- s.kv.opt.Infof("[Compactor: %d] Compaction for level: %d DONE", id, cd.thisLevel.level)
- return nil
-}
-
-func (s *levelsController) addLevel0Table(t *table.Table) error {
- // Add table to manifest file only if it is not opened in memory. We don't want to add a table
- // to the manifest file if it exists only in memory.
- if !t.IsInmemory {
- // We update the manifest _before_ the table becomes part of a levelHandler, because at that
- // point it could get used in some compaction. This ensures the manifest file gets updated in
- // the proper order. (That means this update happens before that of some compaction which
- // deletes the table.)
- err := s.kv.manifest.addChanges([]*pb.ManifestChange{
- newCreateChange(t.ID(), 0, t.KeyID(), t.CompressionType()),
- })
- if err != nil {
- return err
- }
- }
-
- for !s.levels[0].tryAddLevel0Table(t) {
- // Stall. Make sure all levels are healthy before we unstall.
- var timeStart time.Time
- {
- s.kv.opt.Infof("STALLED STALLED STALLED: %v\n", time.Since(s.lastUnstalled))
- s.cstatus.RLock()
- for i := 0; i < s.kv.opt.MaxLevels; i++ {
- s.kv.opt.Debugf("level=%d. Status=%s Size=%d\n",
- i, s.cstatus.levels[i].debug(), s.levels[i].getTotalSize())
- }
- s.cstatus.RUnlock()
- timeStart = time.Now()
- }
- // Before we unstall, we need to make sure that level 0 is healthy. Otherwise, we
- // will very quickly fill up level 0 again.
- for i := 0; ; i++ {
- // It's crucial that this behavior replicates pickCompactLevels' behavior in
- // computing compactability in order to guarantee progress.
- // Break the loop once L0 has enough space to accommodate new tables.
- if !s.isLevel0Compactable() {
- break
- }
- time.Sleep(10 * time.Millisecond)
- if i%100 == 0 {
- prios := s.pickCompactLevels()
- s.kv.opt.Debugf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios)
- i = 0
- }
- }
- {
- s.kv.opt.Debugf("UNSTALLED UNSTALLED UNSTALLED: %v\n", time.Since(timeStart))
- s.lastUnstalled = time.Now()
- }
- }
-
- return nil
-}
-
-func (s *levelsController) close() error {
- err := s.cleanupLevels()
- return errors.Wrap(err, "levelsController.Close")
-}
-
-// get returns the found value if any. If not found, we return nil.
-func (s *levelsController) get(key []byte, maxVs *y.ValueStruct, startLevel int) (
- y.ValueStruct, error) {
- if s.kv.IsClosed() {
- return y.ValueStruct{}, ErrDBClosed
- }
- // It's important that we iterate the levels from 0 on upward. The reason is, if we iterated
- // in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could
- // read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do
- // parallelize this, we will need to call the h.RLock() function by increasing order of level
- // number.)
- version := y.ParseTs(key)
- for _, h := range s.levels {
- // Ignore all levels below startLevel. This is useful for GC when L0 is kept in memory.
- if h.level < startLevel {
- continue
- }
- vs, err := h.get(key) // Calls h.RLock() and h.RUnlock().
- if err != nil {
- return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key)
- }
- if vs.Value == nil && vs.Meta == 0 {
- continue
- }
- if maxVs == nil || vs.Version == version {
- return vs, nil
- }
- if maxVs.Version < vs.Version {
- *maxVs = vs
- }
- }
- if maxVs != nil {
- return *maxVs, nil
- }
- return y.ValueStruct{}, nil
-}
-
-func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator {
- for i := len(th) - 1; i >= 0; i-- {
- // This will increment the reference of the table handler.
- out = append(out, th[i].NewIterator(reversed))
- }
- return out
-}
-
-// appendIterators appends iterators to an array of iterators, for merging.
-// Note: This obtains references for the table handlers. Remember to close these iterators.
-func (s *levelsController) appendIterators(
- iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
- // Just like with get, it's important we iterate the levels from 0 on upward, to avoid missing
- // data when there's a compaction.
- for _, level := range s.levels {
- iters = level.appendIterators(iters, opt)
- }
- return iters
-}
-
-// TableInfo represents the information about a table.
-type TableInfo struct {
- ID uint64
- Level int
- Left []byte
- Right []byte
- KeyCount uint64 // Number of keys in the table
- EstimatedSz uint64
-}
-
-func (s *levelsController) getTableInfo(withKeysCount bool) (result []TableInfo) {
- for _, l := range s.levels {
- l.RLock()
- for _, t := range l.tables {
- var count uint64
- if withKeysCount {
- it := t.NewIterator(false)
- for it.Rewind(); it.Valid(); it.Next() {
- count++
- }
- it.Close()
- }
-
- info := TableInfo{
- ID: t.ID(),
- Level: l.level,
- Left: t.Smallest(),
- Right: t.Biggest(),
- KeyCount: count,
- EstimatedSz: t.EstimatedSize(),
- }
- result = append(result, info)
- }
- l.RUnlock()
- }
- sort.Slice(result, func(i, j int) bool {
- if result[i].Level != result[j].Level {
- return result[i].Level < result[j].Level
- }
- return result[i].ID < result[j].ID
- })
- return
-}
-
-// verifyChecksum verifies checksum for all tables on all levels.
-func (s *levelsController) verifyChecksum() error {
- var tables []*table.Table
- for _, l := range s.levels {
- l.RLock()
- tables = tables[:0]
- for _, t := range l.tables {
- tables = append(tables, t)
- t.IncrRef()
- }
- l.RUnlock()
-
- for _, t := range tables {
- errChkVerify := t.VerifyChecksum()
- if err := t.DecrRef(); err != nil {
- s.kv.opt.Errorf("unable to decrease reference of table: %s while "+
- "verifying checksum with error: %s", t.Filename(), err)
- }
-
- if errChkVerify != nil {
- return errChkVerify
- }
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/logger.go b/vendor/github.com/dgraph-io/badger/v2/logger.go
deleted file mode 100644
index c7b4cd6c..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/logger.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "log"
- "os"
-)
-
-// Logger is implemented by any logging system that is used for standard logs.
-type Logger interface {
- Errorf(string, ...interface{})
- Warningf(string, ...interface{})
- Infof(string, ...interface{})
- Debugf(string, ...interface{})
-}
-
-// Errorf logs an ERROR log message to the logger specified in opts or to the
-// global logger if no logger is specified in opts.
-func (opt *Options) Errorf(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Errorf(format, v...)
-}
-
-// Infof logs an INFO message to the logger specified in opts.
-func (opt *Options) Infof(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Infof(format, v...)
-}
-
-// Warningf logs a WARNING message to the logger specified in opts.
-func (opt *Options) Warningf(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Warningf(format, v...)
-}
-
-// Debugf logs a DEBUG message to the logger specified in opts.
-func (opt *Options) Debugf(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Debugf(format, v...)
-}
-
-type loggingLevel int
-
-const (
- DEBUG loggingLevel = iota
- INFO
- WARNING
- ERROR
-)
-
-type defaultLog struct {
- *log.Logger
- level loggingLevel
-}
-
-func defaultLogger(level loggingLevel) *defaultLog {
- return &defaultLog{Logger: log.New(os.Stderr, "badger ", log.LstdFlags), level: level}
-}
-
-func (l *defaultLog) Errorf(f string, v ...interface{}) {
- if l.level <= ERROR {
- l.Printf("ERROR: "+f, v...)
- }
-}
-
-func (l *defaultLog) Warningf(f string, v ...interface{}) {
- if l.level <= WARNING {
- l.Printf("WARNING: "+f, v...)
- }
-}
-
-func (l *defaultLog) Infof(f string, v ...interface{}) {
- if l.level <= INFO {
- l.Printf("INFO: "+f, v...)
- }
-}
-
-func (l *defaultLog) Debugf(f string, v ...interface{}) {
- if l.level <= DEBUG {
- l.Printf("DEBUG: "+f, v...)
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/managed_db.go b/vendor/github.com/dgraph-io/badger/v2/managed_db.go
deleted file mode 100644
index 23c79884..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/managed_db.go
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-// OpenManaged returns a new DB, which allows more control over setting
-// transaction timestamps, aka managed mode.
-//
-// This is only useful for databases built on top of Badger (like Dgraph), and
-// can be ignored by most users.
-func OpenManaged(opts Options) (*DB, error) {
- opts.managedTxns = true
- return Open(opts)
-}
-
-// NewTransactionAt follows the same logic as DB.NewTransaction(), but uses the
-// provided read timestamp.
-//
-// This is only useful for databases built on top of Badger (like Dgraph), and
-// can be ignored by most users.
-func (db *DB) NewTransactionAt(readTs uint64, update bool) *Txn {
- if !db.opt.managedTxns {
- panic("Cannot use NewTransactionAt with managedDB=false. Use NewTransaction instead.")
- }
- txn := db.newTransaction(update, true)
- txn.readTs = readTs
- return txn
-}
-
-// NewWriteBatchAt is similar to NewWriteBatch but it allows user to set the commit timestamp.
-// NewWriteBatchAt is supposed to be used only in the managed mode.
-func (db *DB) NewWriteBatchAt(commitTs uint64) *WriteBatch {
- if !db.opt.managedTxns {
- panic("cannot use NewWriteBatchAt with managedDB=false. Use NewWriteBatch instead")
- }
-
- wb := db.newWriteBatch(true)
- wb.commitTs = commitTs
- wb.txn.commitTs = commitTs
- return wb
-}
-func (db *DB) NewManagedWriteBatch() *WriteBatch {
- if !db.opt.managedTxns {
- panic("cannot use NewManagedWriteBatch with managedDB=false. Use NewWriteBatch instead")
- }
-
- wb := db.newWriteBatch(true)
- return wb
-}
-
-// CommitAt commits the transaction, following the same logic as Commit(), but
-// at the given commit timestamp. This will panic if not used with managed transactions.
-//
-// This is only useful for databases built on top of Badger (like Dgraph), and
-// can be ignored by most users.
-func (txn *Txn) CommitAt(commitTs uint64, callback func(error)) error {
- if !txn.db.opt.managedTxns {
- panic("Cannot use CommitAt with managedDB=false. Use Commit instead.")
- }
- txn.commitTs = commitTs
- if callback == nil {
- return txn.Commit()
- }
- txn.CommitWith(callback)
- return nil
-}
-
-// SetDiscardTs sets a timestamp at or below which, any invalid or deleted
-// versions can be discarded from the LSM tree, and thence from the value log to
-// reclaim disk space. Can only be used with managed transactions.
-func (db *DB) SetDiscardTs(ts uint64) {
- if !db.opt.managedTxns {
- panic("Cannot use SetDiscardTs with managedDB=false.")
- }
- db.orc.setDiscardTs(ts)
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/manifest.go b/vendor/github.com/dgraph-io/badger/v2/manifest.go
deleted file mode 100644
index e987c12c..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/manifest.go
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bufio"
- "bytes"
- "encoding/binary"
- "fmt"
- "hash/crc32"
- "io"
- "os"
- "path/filepath"
- "sync"
-
- "github.com/dgraph-io/badger/v2/options"
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/y"
- "github.com/golang/protobuf/proto"
- "github.com/pkg/errors"
-)
-
-// Manifest represents the contents of the MANIFEST file in a Badger store.
-//
-// The MANIFEST file describes the startup state of the db -- all LSM files and what level they're
-// at.
-//
-// It consists of a sequence of ManifestChangeSet objects. Each of these is treated atomically,
-// and contains a sequence of ManifestChange's (file creations/deletions) which we use to
-// reconstruct the manifest at startup.
-type Manifest struct {
- Levels []levelManifest
- Tables map[uint64]TableManifest
-
- // Contains total number of creation and deletion changes in the manifest -- used to compute
- // whether it'd be useful to rewrite the manifest.
- Creations int
- Deletions int
-}
-
-func createManifest() Manifest {
- levels := make([]levelManifest, 0)
- return Manifest{
- Levels: levels,
- Tables: make(map[uint64]TableManifest),
- }
-}
-
-// levelManifest contains information about LSM tree levels
-// in the MANIFEST file.
-type levelManifest struct {
- Tables map[uint64]struct{} // Set of table id's
-}
-
-// TableManifest contains information about a specific table
-// in the LSM tree.
-type TableManifest struct {
- Level uint8
- KeyID uint64
- Compression options.CompressionType
-}
-
-// manifestFile holds the file pointer (and other info) about the manifest file, which is a log
-// file we append to.
-type manifestFile struct {
- fp *os.File
- directory string
- // We make this configurable so that unit tests can hit rewrite() code quickly
- deletionsRewriteThreshold int
-
- // Guards appends, which includes access to the manifest field.
- appendLock sync.Mutex
-
- // Used to track the current state of the manifest, used when rewriting.
- manifest Manifest
-
- // Used to indicate if badger was opened in InMemory mode.
- inMemory bool
-}
-
-const (
- // ManifestFilename is the filename for the manifest file.
- ManifestFilename = "MANIFEST"
- manifestRewriteFilename = "MANIFEST-REWRITE"
- manifestDeletionsRewriteThreshold = 10000
- manifestDeletionsRatio = 10
-)
-
-// asChanges returns a sequence of changes that could be used to recreate the Manifest in its
-// present state.
-func (m *Manifest) asChanges() []*pb.ManifestChange {
- changes := make([]*pb.ManifestChange, 0, len(m.Tables))
- for id, tm := range m.Tables {
- changes = append(changes, newCreateChange(id, int(tm.Level), tm.KeyID, tm.Compression))
- }
- return changes
-}
-
-func (m *Manifest) clone() Manifest {
- changeSet := pb.ManifestChangeSet{Changes: m.asChanges()}
- ret := createManifest()
- y.Check(applyChangeSet(&ret, &changeSet))
- return ret
-}
-
-// openOrCreateManifestFile opens a Badger manifest file if it exists, or creates one if
-// doesn’t exists.
-func openOrCreateManifestFile(opt Options) (
- ret *manifestFile, result Manifest, err error) {
- if opt.InMemory {
- return &manifestFile{inMemory: true}, Manifest{}, nil
- }
- return helpOpenOrCreateManifestFile(opt.Dir, opt.ReadOnly, manifestDeletionsRewriteThreshold)
-}
-
-func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold int) (
- *manifestFile, Manifest, error) {
-
- path := filepath.Join(dir, ManifestFilename)
- var flags uint32
- if readOnly {
- flags |= y.ReadOnly
- }
- fp, err := y.OpenExistingFile(path, flags) // We explicitly sync in addChanges, outside the lock.
- if err != nil {
- if !os.IsNotExist(err) {
- return nil, Manifest{}, err
- }
- if readOnly {
- return nil, Manifest{}, fmt.Errorf("no manifest found, required for read-only db")
- }
- m := createManifest()
- fp, netCreations, err := helpRewrite(dir, &m)
- if err != nil {
- return nil, Manifest{}, err
- }
- y.AssertTrue(netCreations == 0)
- mf := &manifestFile{
- fp: fp,
- directory: dir,
- manifest: m.clone(),
- deletionsRewriteThreshold: deletionsThreshold,
- }
- return mf, m, nil
- }
-
- manifest, truncOffset, err := ReplayManifestFile(fp)
- if err != nil {
- _ = fp.Close()
- return nil, Manifest{}, err
- }
-
- if !readOnly {
- // Truncate file so we don't have a half-written entry at the end.
- if err := fp.Truncate(truncOffset); err != nil {
- _ = fp.Close()
- return nil, Manifest{}, err
- }
- }
- if _, err = fp.Seek(0, io.SeekEnd); err != nil {
- _ = fp.Close()
- return nil, Manifest{}, err
- }
-
- mf := &manifestFile{
- fp: fp,
- directory: dir,
- manifest: manifest.clone(),
- deletionsRewriteThreshold: deletionsThreshold,
- }
- return mf, manifest, nil
-}
-
-func (mf *manifestFile) close() error {
- if mf.inMemory {
- return nil
- }
- return mf.fp.Close()
-}
-
-// addChanges writes a batch of changes, atomically, to the file. By "atomically" that means when
-// we replay the MANIFEST file, we'll either replay all the changes or none of them. (The truth of
-// this depends on the filesystem -- some might append garbage data if a system crash happens at
-// the wrong time.)
-func (mf *manifestFile) addChanges(changesParam []*pb.ManifestChange) error {
- if mf.inMemory {
- return nil
- }
- changes := pb.ManifestChangeSet{Changes: changesParam}
- buf, err := proto.Marshal(&changes)
- if err != nil {
- return err
- }
-
- // Maybe we could use O_APPEND instead (on certain file systems)
- mf.appendLock.Lock()
- if err := applyChangeSet(&mf.manifest, &changes); err != nil {
- mf.appendLock.Unlock()
- return err
- }
- // Rewrite manifest if it'd shrink by 1/10 and it's big enough to care
- if mf.manifest.Deletions > mf.deletionsRewriteThreshold &&
- mf.manifest.Deletions > manifestDeletionsRatio*(mf.manifest.Creations-mf.manifest.Deletions) {
- if err := mf.rewrite(); err != nil {
- mf.appendLock.Unlock()
- return err
- }
- } else {
- var lenCrcBuf [8]byte
- binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(buf)))
- binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(buf, y.CastagnoliCrcTable))
- buf = append(lenCrcBuf[:], buf...)
- if _, err := mf.fp.Write(buf); err != nil {
- mf.appendLock.Unlock()
- return err
- }
- }
-
- mf.appendLock.Unlock()
- return mf.fp.Sync()
-}
-
-// Has to be 4 bytes. The value can never change, ever, anyway.
-var magicText = [4]byte{'B', 'd', 'g', 'r'}
-
-// The magic version number.
-const magicVersion = 7
-
-func helpRewrite(dir string, m *Manifest) (*os.File, int, error) {
- rewritePath := filepath.Join(dir, manifestRewriteFilename)
- // We explicitly sync.
- fp, err := y.OpenTruncFile(rewritePath, false)
- if err != nil {
- return nil, 0, err
- }
-
- buf := make([]byte, 8)
- copy(buf[0:4], magicText[:])
- binary.BigEndian.PutUint32(buf[4:8], magicVersion)
-
- netCreations := len(m.Tables)
- changes := m.asChanges()
- set := pb.ManifestChangeSet{Changes: changes}
-
- changeBuf, err := proto.Marshal(&set)
- if err != nil {
- fp.Close()
- return nil, 0, err
- }
- var lenCrcBuf [8]byte
- binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(changeBuf)))
- binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(changeBuf, y.CastagnoliCrcTable))
- buf = append(buf, lenCrcBuf[:]...)
- buf = append(buf, changeBuf...)
- if _, err := fp.Write(buf); err != nil {
- fp.Close()
- return nil, 0, err
- }
- if err := fp.Sync(); err != nil {
- fp.Close()
- return nil, 0, err
- }
-
- // In Windows the files should be closed before doing a Rename.
- if err = fp.Close(); err != nil {
- return nil, 0, err
- }
- manifestPath := filepath.Join(dir, ManifestFilename)
- if err := os.Rename(rewritePath, manifestPath); err != nil {
- return nil, 0, err
- }
- fp, err = y.OpenExistingFile(manifestPath, 0)
- if err != nil {
- return nil, 0, err
- }
- if _, err := fp.Seek(0, io.SeekEnd); err != nil {
- fp.Close()
- return nil, 0, err
- }
- if err := syncDir(dir); err != nil {
- fp.Close()
- return nil, 0, err
- }
-
- return fp, netCreations, nil
-}
-
-// Must be called while appendLock is held.
-func (mf *manifestFile) rewrite() error {
- // In Windows the files should be closed before doing a Rename.
- if err := mf.fp.Close(); err != nil {
- return err
- }
- fp, netCreations, err := helpRewrite(mf.directory, &mf.manifest)
- if err != nil {
- return err
- }
- mf.fp = fp
- mf.manifest.Creations = netCreations
- mf.manifest.Deletions = 0
-
- return nil
-}
-
-type countingReader struct {
- wrapped *bufio.Reader
- count int64
-}
-
-func (r *countingReader) Read(p []byte) (n int, err error) {
- n, err = r.wrapped.Read(p)
- r.count += int64(n)
- return
-}
-
-func (r *countingReader) ReadByte() (b byte, err error) {
- b, err = r.wrapped.ReadByte()
- if err == nil {
- r.count++
- }
- return
-}
-
-var (
- errBadMagic = errors.New("manifest has bad magic")
- errBadChecksum = errors.New("manifest has checksum mismatch")
-)
-
-// ReplayManifestFile reads the manifest file and constructs two manifest objects. (We need one
-// immutable copy and one mutable copy of the manifest. Easiest way is to construct two of them.)
-// Also, returns the last offset after a completely read manifest entry -- the file must be
-// truncated at that point before further appends are made (if there is a partial entry after
-// that). In normal conditions, truncOffset is the file size.
-func ReplayManifestFile(fp *os.File) (Manifest, int64, error) {
- r := countingReader{wrapped: bufio.NewReader(fp)}
-
- var magicBuf [8]byte
- if _, err := io.ReadFull(&r, magicBuf[:]); err != nil {
- return Manifest{}, 0, errBadMagic
- }
- if !bytes.Equal(magicBuf[0:4], magicText[:]) {
- return Manifest{}, 0, errBadMagic
- }
- version := y.BytesToU32(magicBuf[4:8])
- if version != magicVersion {
- return Manifest{}, 0,
- //nolint:lll
- fmt.Errorf("manifest has unsupported version: %d (we support %d).\n"+
- "Please see https://github.com/dgraph-io/badger/blob/master/README.md#i-see-manifest-has-unsupported-version-x-we-support-y-error"+
- " on how to fix this.",
- version, magicVersion)
- }
-
- stat, err := fp.Stat()
- if err != nil {
- return Manifest{}, 0, err
- }
-
- build := createManifest()
- var offset int64
- for {
- offset = r.count
- var lenCrcBuf [8]byte
- _, err := io.ReadFull(&r, lenCrcBuf[:])
- if err != nil {
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- break
- }
- return Manifest{}, 0, err
- }
- length := y.BytesToU32(lenCrcBuf[0:4])
- // Sanity check to ensure we don't over-allocate memory.
- if length > uint32(stat.Size()) {
- return Manifest{}, 0, errors.Errorf(
- "Buffer length: %d greater than file size: %d. Manifest file might be corrupted",
- length, stat.Size())
- }
- var buf = make([]byte, length)
- if _, err := io.ReadFull(&r, buf); err != nil {
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- break
- }
- return Manifest{}, 0, err
- }
- if crc32.Checksum(buf, y.CastagnoliCrcTable) != y.BytesToU32(lenCrcBuf[4:8]) {
- return Manifest{}, 0, errBadChecksum
- }
-
- var changeSet pb.ManifestChangeSet
- if err := proto.Unmarshal(buf, &changeSet); err != nil {
- return Manifest{}, 0, err
- }
-
- if err := applyChangeSet(&build, &changeSet); err != nil {
- return Manifest{}, 0, err
- }
- }
-
- return build, offset, nil
-}
-
-func applyManifestChange(build *Manifest, tc *pb.ManifestChange) error {
- switch tc.Op {
- case pb.ManifestChange_CREATE:
- if _, ok := build.Tables[tc.Id]; ok {
- return fmt.Errorf("MANIFEST invalid, table %d exists", tc.Id)
- }
- build.Tables[tc.Id] = TableManifest{
- Level: uint8(tc.Level),
- KeyID: tc.KeyId,
- Compression: options.CompressionType(tc.Compression),
- }
- for len(build.Levels) <= int(tc.Level) {
- build.Levels = append(build.Levels, levelManifest{make(map[uint64]struct{})})
- }
- build.Levels[tc.Level].Tables[tc.Id] = struct{}{}
- build.Creations++
- case pb.ManifestChange_DELETE:
- tm, ok := build.Tables[tc.Id]
- if !ok {
- return fmt.Errorf("MANIFEST removes non-existing table %d", tc.Id)
- }
- delete(build.Levels[tm.Level].Tables, tc.Id)
- delete(build.Tables, tc.Id)
- build.Deletions++
- default:
- return fmt.Errorf("MANIFEST file has invalid manifestChange op")
- }
- return nil
-}
-
-// This is not a "recoverable" error -- opening the KV store fails because the MANIFEST file is
-// just plain broken.
-func applyChangeSet(build *Manifest, changeSet *pb.ManifestChangeSet) error {
- for _, change := range changeSet.Changes {
- if err := applyManifestChange(build, change); err != nil {
- return err
- }
- }
- return nil
-}
-
-func newCreateChange(
- id uint64, level int, keyID uint64, c options.CompressionType) *pb.ManifestChange {
- return &pb.ManifestChange{
- Id: id,
- Op: pb.ManifestChange_CREATE,
- Level: uint32(level),
- KeyId: keyID,
- // Hard coding it, since we're supporting only AES for now.
- EncryptionAlgo: pb.EncryptionAlgo_aes,
- Compression: uint32(c),
- }
-}
-
-func newDeleteChange(id uint64) *pb.ManifestChange {
- return &pb.ManifestChange{
- Id: id,
- Op: pb.ManifestChange_DELETE,
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/merge.go b/vendor/github.com/dgraph-io/badger/v2/merge.go
deleted file mode 100644
index 569b297d..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/merge.go
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "sync"
- "time"
-
- "github.com/dgraph-io/badger/v2/y"
- "github.com/pkg/errors"
-)
-
-// MergeOperator represents a Badger merge operator.
-type MergeOperator struct {
- sync.RWMutex
- f MergeFunc
- db *DB
- key []byte
- closer *y.Closer
-}
-
-// MergeFunc accepts two byte slices, one representing an existing value, and
-// another representing a new value that needs to be ‘merged’ into it. MergeFunc
-// contains the logic to perform the ‘merge’ and return an updated value.
-// MergeFunc could perform operations like integer addition, list appends etc.
-// Note that the ordering of the operands is maintained.
-type MergeFunc func(existingVal, newVal []byte) []byte
-
-// GetMergeOperator creates a new MergeOperator for a given key and returns a
-// pointer to it. It also fires off a goroutine that performs a compaction using
-// the merge function that runs periodically, as specified by dur.
-func (db *DB) GetMergeOperator(key []byte,
- f MergeFunc, dur time.Duration) *MergeOperator {
- op := &MergeOperator{
- f: f,
- db: db,
- key: key,
- closer: y.NewCloser(1),
- }
-
- go op.runCompactions(dur)
- return op
-}
-
-var errNoMerge = errors.New("No need for merge")
-
-func (op *MergeOperator) iterateAndMerge() (newVal []byte, latest uint64, err error) {
- txn := op.db.NewTransaction(false)
- defer txn.Discard()
- opt := DefaultIteratorOptions
- opt.AllVersions = true
- it := txn.NewKeyIterator(op.key, opt)
- defer it.Close()
-
- var numVersions int
- for it.Rewind(); it.Valid(); it.Next() {
- item := it.Item()
- numVersions++
- if numVersions == 1 {
- // This should be the newVal, considering this is the latest version.
- newVal, err = item.ValueCopy(newVal)
- if err != nil {
- return nil, 0, err
- }
- latest = item.Version()
- } else {
- if err := item.Value(func(oldVal []byte) error {
- // The merge should always be on the newVal considering it has the merge result of
- // the latest version. The value read should be the oldVal.
- newVal = op.f(oldVal, newVal)
- return nil
- }); err != nil {
- return nil, 0, err
- }
- }
- if item.DiscardEarlierVersions() {
- break
- }
- }
- if numVersions == 0 {
- return nil, latest, ErrKeyNotFound
- } else if numVersions == 1 {
- return newVal, latest, errNoMerge
- }
- return newVal, latest, nil
-}
-
-func (op *MergeOperator) compact() error {
- op.Lock()
- defer op.Unlock()
- val, version, err := op.iterateAndMerge()
- if err == ErrKeyNotFound || err == errNoMerge {
- return nil
- } else if err != nil {
- return err
- }
- entries := []*Entry{
- {
- Key: y.KeyWithTs(op.key, version),
- Value: val,
- meta: bitDiscardEarlierVersions,
- },
- }
- // Write value back to the DB. It is important that we do not set the bitMergeEntry bit
- // here. When compaction happens, all the older merged entries will be removed.
- return op.db.batchSetAsync(entries, func(err error) {
- if err != nil {
- op.db.opt.Errorf("failed to insert the result of merge compaction: %s", err)
- }
- })
-}
-
-func (op *MergeOperator) runCompactions(dur time.Duration) {
- ticker := time.NewTicker(dur)
- defer op.closer.Done()
- var stop bool
- for {
- select {
- case <-op.closer.HasBeenClosed():
- stop = true
- case <-ticker.C: // wait for tick
- }
- if err := op.compact(); err != nil {
- op.db.opt.Errorf("failure while running merge operation: %s", err)
- }
- if stop {
- ticker.Stop()
- break
- }
- }
-}
-
-// Add records a value in Badger which will eventually be merged by a background
-// routine into the values that were recorded by previous invocations to Add().
-func (op *MergeOperator) Add(val []byte) error {
- return op.db.Update(func(txn *Txn) error {
- return txn.SetEntry(NewEntry(op.key, val).withMergeBit())
- })
-}
-
-// Get returns the latest value for the merge operator, which is derived by
-// applying the merge function to all the values added so far.
-//
-// If Add has not been called even once, Get will return ErrKeyNotFound.
-func (op *MergeOperator) Get() ([]byte, error) {
- op.RLock()
- defer op.RUnlock()
- var existing []byte
- err := op.db.View(func(txn *Txn) (err error) {
- existing, _, err = op.iterateAndMerge()
- return err
- })
- if err == errNoMerge {
- return existing, nil
- }
- return existing, err
-}
-
-// Stop waits for any pending merge to complete and then stops the background
-// goroutine.
-func (op *MergeOperator) Stop() {
- op.closer.SignalAndWait()
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/options.go b/vendor/github.com/dgraph-io/badger/v2/options.go
deleted file mode 100644
index 700c8747..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/options.go
+++ /dev/null
@@ -1,657 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "time"
-
- "github.com/dgraph-io/badger/v2/options"
- "github.com/dgraph-io/badger/v2/table"
-)
-
-// Note: If you add a new option X make sure you also add a WithX method on Options.
-
-// Options are params for creating DB object.
-//
-// This package provides DefaultOptions which contains options that should
-// work for most applications. Consider using that as a starting point before
-// customizing it for your own needs.
-//
-// Each option X is documented on the WithX method.
-type Options struct {
- // Required options.
-
- Dir string
- ValueDir string
-
- // Usually modified options.
-
- SyncWrites bool
- TableLoadingMode options.FileLoadingMode
- ValueLogLoadingMode options.FileLoadingMode
- NumVersionsToKeep int
- ReadOnly bool
- Truncate bool
- Logger Logger
- Compression options.CompressionType
- InMemory bool
-
- // Fine tuning options.
-
- MaxTableSize int64
- LevelSizeMultiplier int
- MaxLevels int
- ValueThreshold int
- NumMemtables int
- // Changing BlockSize across DB runs will not break badger. The block size is
- // read from the block index stored at the end of the table.
- BlockSize int
- BloomFalsePositive float64
- KeepL0InMemory bool
- BlockCacheSize int64
- IndexCacheSize int64
- LoadBloomsOnOpen bool
-
- NumLevelZeroTables int
- NumLevelZeroTablesStall int
-
- LevelOneSize int64
- ValueLogFileSize int64
- ValueLogMaxEntries uint32
-
- NumCompactors int
- CompactL0OnClose bool
- LogRotatesToFlush int32
- ZSTDCompressionLevel int
-
- // When set, checksum will be validated for each entry read from the value log file.
- VerifyValueChecksum bool
-
- // Encryption related options.
- EncryptionKey []byte // encryption key
- EncryptionKeyRotationDuration time.Duration // key rotation duration
-
- // BypassLockGaurd will bypass the lock guard on badger. Bypassing lock
- // guard can cause data corruption if multiple badger instances are using
- // the same directory. Use this options with caution.
- BypassLockGuard bool
-
- // ChecksumVerificationMode decides when db should verify checksums for SSTable blocks.
- ChecksumVerificationMode options.ChecksumVerificationMode
-
- // DetectConflicts determines whether the transactions would be checked for
- // conflicts. The transactions can be processed at a higher rate when
- // conflict detection is disabled.
- DetectConflicts bool
-
- // Transaction start and commit timestamps are managed by end-user.
- // This is only useful for databases built on top of Badger (like Dgraph).
- // Not recommended for most users.
- managedTxns bool
-
- // 4. Flags for testing purposes
- // ------------------------------
- maxBatchCount int64 // max entries in batch
- maxBatchSize int64 // max batch size in bytes
-}
-
-// DefaultOptions sets a list of recommended options for good performance.
-// Feel free to modify these to suit your needs with the WithX methods.
-func DefaultOptions(path string) Options {
- return Options{
- Dir: path,
- ValueDir: path,
- LevelOneSize: 256 << 20,
- LevelSizeMultiplier: 10,
- TableLoadingMode: options.MemoryMap,
- ValueLogLoadingMode: options.MemoryMap,
- // table.MemoryMap to mmap() the tables.
- // table.Nothing to not preload the tables.
- MaxLevels: 7,
- MaxTableSize: 64 << 20,
- NumCompactors: 2, // Run at least 2 compactors. One is dedicated for L0.
- NumLevelZeroTables: 5,
- NumLevelZeroTablesStall: 15,
- NumMemtables: 5,
- BloomFalsePositive: 0.01,
- BlockSize: 4 * 1024,
- SyncWrites: true,
- NumVersionsToKeep: 1,
- CompactL0OnClose: true,
- KeepL0InMemory: false,
- VerifyValueChecksum: false,
- Compression: options.None,
- BlockCacheSize: 0,
- IndexCacheSize: 0,
- LoadBloomsOnOpen: true,
-
- // The following benchmarks were done on a 4 KB block size (default block size). The
- // compression is ratio supposed to increase with increasing compression level but since the
- // input for compression algorithm is small (4 KB), we don't get significant benefit at
- // level 3.
- // NOTE: The benchmarks are with DataDog ZSTD that requires CGO. Hence, no longer valid.
- // no_compression-16 10 502848865 ns/op 165.46 MB/s -
- // zstd_compression/level_1-16 7 739037966 ns/op 112.58 MB/s 2.93
- // zstd_compression/level_3-16 7 756950250 ns/op 109.91 MB/s 2.72
- // zstd_compression/level_15-16 1 11135686219 ns/op 7.47 MB/s 4.38
- // Benchmark code can be found in table/builder_test.go file
- ZSTDCompressionLevel: 1,
-
- // Nothing to read/write value log using standard File I/O
- // MemoryMap to mmap() the value log files
- // (2^30 - 1)*2 when mmapping < 2^31 - 1, max int32.
- // -1 so 2*ValueLogFileSize won't overflow on 32-bit systems.
- ValueLogFileSize: 1<<30 - 1,
-
- ValueLogMaxEntries: 1000000,
- ValueThreshold: 1 << 10, // 1 KB.
- Truncate: false,
- Logger: defaultLogger(INFO),
- LogRotatesToFlush: 2,
- EncryptionKey: []byte{},
- EncryptionKeyRotationDuration: 10 * 24 * time.Hour, // Default 10 days.
- DetectConflicts: true,
- }
-}
-
-func buildTableOptions(opt Options) table.Options {
- return table.Options{
- BlockSize: opt.BlockSize,
- BloomFalsePositive: opt.BloomFalsePositive,
- LoadBloomsOnOpen: opt.LoadBloomsOnOpen,
- LoadingMode: opt.TableLoadingMode,
- ChkMode: opt.ChecksumVerificationMode,
- Compression: opt.Compression,
- ZSTDCompressionLevel: opt.ZSTDCompressionLevel,
- }
-}
-
-const (
- maxValueThreshold = (1 << 20) // 1 MB
-)
-
-// LSMOnlyOptions follows from DefaultOptions, but sets a higher ValueThreshold
-// so values would be collocated with the LSM tree, with value log largely acting
-// as a write-ahead log only. These options would reduce the disk usage of value
-// log, and make Badger act more like a typical LSM tree.
-func LSMOnlyOptions(path string) Options {
- // Let's not set any other options, because they can cause issues with the
- // size of key-value a user can pass to Badger. For e.g., if we set
- // ValueLogFileSize to 64MB, a user can't pass a value more than that.
- // Setting it to ValueLogMaxEntries to 1000, can generate too many files.
- // These options are better configured on a usage basis, than broadly here.
- // The ValueThreshold is the most important setting a user needs to do to
- // achieve a heavier usage of LSM tree.
- // NOTE: If a user does not want to set 64KB as the ValueThreshold because
- // of performance reasons, 1KB would be a good option too, allowing
- // values smaller than 1KB to be collocated with the keys in the LSM tree.
- return DefaultOptions(path).WithValueThreshold(maxValueThreshold /* 1 MB */)
-}
-
-// WithDir returns a new Options value with Dir set to the given value.
-//
-// Dir is the path of the directory where key data will be stored in.
-// If it doesn't exist, Badger will try to create it for you.
-// This is set automatically to be the path given to `DefaultOptions`.
-func (opt Options) WithDir(val string) Options {
- opt.Dir = val
- return opt
-}
-
-// WithValueDir returns a new Options value with ValueDir set to the given value.
-//
-// ValueDir is the path of the directory where value data will be stored in.
-// If it doesn't exist, Badger will try to create it for you.
-// This is set automatically to be the path given to `DefaultOptions`.
-func (opt Options) WithValueDir(val string) Options {
- opt.ValueDir = val
- return opt
-}
-
-// WithLoggingLevel returns a new Options value with logging level of the
-// default logger set to the given value.
-// LoggingLevel sets the level of logging. It should be one of DEBUG, INFO,
-// WARNING or ERROR levels.
-//
-// The default value of LoggingLevel is INFO.
-func (opt Options) WithLoggingLevel(val loggingLevel) Options {
- opt.Logger = defaultLogger(val)
- return opt
-}
-
-// WithSyncWrites returns a new Options value with SyncWrites set to the given value.
-//
-// When SyncWrites is true all writes are synced to disk. Setting this to false would achieve better
-// performance, but may cause data loss in case of crash.
-//
-// The default value of SyncWrites is true.
-func (opt Options) WithSyncWrites(val bool) Options {
- opt.SyncWrites = val
- return opt
-}
-
-// WithTableLoadingMode returns a new Options value with TableLoadingMode set to the given value.
-//
-// TableLoadingMode indicates which file loading mode should be used for the LSM tree data files.
-//
-// The default value of TableLoadingMode is options.MemoryMap.
-func (opt Options) WithTableLoadingMode(val options.FileLoadingMode) Options {
- opt.TableLoadingMode = val
- return opt
-}
-
-// WithValueLogLoadingMode returns a new Options value with ValueLogLoadingMode set to the given
-// value.
-//
-// ValueLogLoadingMode indicates which file loading mode should be used for the value log data
-// files.
-//
-// The default value of ValueLogLoadingMode is options.MemoryMap.
-func (opt Options) WithValueLogLoadingMode(val options.FileLoadingMode) Options {
- opt.ValueLogLoadingMode = val
- return opt
-}
-
-// WithNumVersionsToKeep returns a new Options value with NumVersionsToKeep set to the given value.
-//
-// NumVersionsToKeep sets how many versions to keep per key at most.
-//
-// The default value of NumVersionsToKeep is 1.
-func (opt Options) WithNumVersionsToKeep(val int) Options {
- opt.NumVersionsToKeep = val
- return opt
-}
-
-// WithReadOnly returns a new Options value with ReadOnly set to the given value.
-//
-// When ReadOnly is true the DB will be opened on read-only mode.
-// Multiple processes can open the same Badger DB.
-// Note: if the DB being opened had crashed before and has vlog data to be replayed,
-// ReadOnly will cause Open to fail with an appropriate message.
-//
-// The default value of ReadOnly is false.
-func (opt Options) WithReadOnly(val bool) Options {
- opt.ReadOnly = val
- return opt
-}
-
-// WithTruncate returns a new Options value with Truncate set to the given value.
-//
-// Truncate indicates whether value log files should be truncated to delete corrupt data, if any.
-// This option is ignored when ReadOnly is true.
-//
-// The default value of Truncate is false.
-func (opt Options) WithTruncate(val bool) Options {
- opt.Truncate = val
- return opt
-}
-
-// WithLogger returns a new Options value with Logger set to the given value.
-//
-// Logger provides a way to configure what logger each value of badger.DB uses.
-//
-// The default value of Logger writes to stderr using the log package from the Go standard library.
-func (opt Options) WithLogger(val Logger) Options {
- opt.Logger = val
- return opt
-}
-
-// WithMaxTableSize returns a new Options value with MaxTableSize set to the given value.
-//
-// MaxTableSize sets the maximum size in bytes for each LSM table or file.
-//
-// The default value of MaxTableSize is 64MB.
-func (opt Options) WithMaxTableSize(val int64) Options {
- opt.MaxTableSize = val
- return opt
-}
-
-// WithLevelSizeMultiplier returns a new Options value with LevelSizeMultiplier set to the given
-// value.
-//
-// LevelSizeMultiplier sets the ratio between the maximum sizes of contiguous levels in the LSM.
-// Once a level grows to be larger than this ratio allowed, the compaction process will be
-// triggered.
-//
-// The default value of LevelSizeMultiplier is 10.
-func (opt Options) WithLevelSizeMultiplier(val int) Options {
- opt.LevelSizeMultiplier = val
- return opt
-}
-
-// WithMaxLevels returns a new Options value with MaxLevels set to the given value.
-//
-// Maximum number of levels of compaction allowed in the LSM.
-//
-// The default value of MaxLevels is 7.
-func (opt Options) WithMaxLevels(val int) Options {
- opt.MaxLevels = val
- return opt
-}
-
-// WithValueThreshold returns a new Options value with ValueThreshold set to the given value.
-//
-// ValueThreshold sets the threshold used to decide whether a value is stored directly in the LSM
-// tree or separately in the log value files.
-//
-// The default value of ValueThreshold is 1 KB, but LSMOnlyOptions sets it to maxValueThreshold.
-func (opt Options) WithValueThreshold(val int) Options {
- opt.ValueThreshold = val
- return opt
-}
-
-// WithNumMemtables returns a new Options value with NumMemtables set to the given value.
-//
-// NumMemtables sets the maximum number of tables to keep in memory before stalling.
-//
-// The default value of NumMemtables is 5.
-func (opt Options) WithNumMemtables(val int) Options {
- opt.NumMemtables = val
- return opt
-}
-
-// WithBloomFalsePositive returns a new Options value with BloomFalsePositive set
-// to the given value.
-//
-// BloomFalsePositive sets the false positive probability of the bloom filter in any SSTable.
-// Before reading a key from table, the bloom filter is checked for key existence.
-// BloomFalsePositive might impact read performance of DB. Lower BloomFalsePositive value might
-// consume more memory.
-//
-// The default value of BloomFalsePositive is 0.01.
-func (opt Options) WithBloomFalsePositive(val float64) Options {
- opt.BloomFalsePositive = val
- return opt
-}
-
-// WithBlockSize returns a new Options value with BlockSize set to the given value.
-//
-// BlockSize sets the size of any block in SSTable. SSTable is divided into multiple blocks
-// internally. Each block is compressed using prefix diff encoding.
-//
-// The default value of BlockSize is 4KB.
-func (opt Options) WithBlockSize(val int) Options {
- opt.BlockSize = val
- return opt
-}
-
-// WithNumLevelZeroTables returns a new Options value with NumLevelZeroTables set to the given
-// value.
-//
-// NumLevelZeroTables sets the maximum number of Level 0 tables before compaction starts.
-//
-// The default value of NumLevelZeroTables is 5.
-func (opt Options) WithNumLevelZeroTables(val int) Options {
- opt.NumLevelZeroTables = val
- return opt
-}
-
-// WithNumLevelZeroTablesStall returns a new Options value with NumLevelZeroTablesStall set to the
-// given value.
-//
-// NumLevelZeroTablesStall sets the number of Level 0 tables that once reached causes the DB to
-// stall until compaction succeeds.
-//
-// The default value of NumLevelZeroTablesStall is 10.
-func (opt Options) WithNumLevelZeroTablesStall(val int) Options {
- opt.NumLevelZeroTablesStall = val
- return opt
-}
-
-// WithLevelOneSize returns a new Options value with LevelOneSize set to the given value.
-//
-// LevelOneSize sets the maximum total size for Level 1.
-//
-// The default value of LevelOneSize is 20MB.
-func (opt Options) WithLevelOneSize(val int64) Options {
- opt.LevelOneSize = val
- return opt
-}
-
-// WithValueLogFileSize returns a new Options value with ValueLogFileSize set to the given value.
-//
-// ValueLogFileSize sets the maximum size of a single value log file.
-//
-// The default value of ValueLogFileSize is 1GB.
-func (opt Options) WithValueLogFileSize(val int64) Options {
- opt.ValueLogFileSize = val
- return opt
-}
-
-// WithValueLogMaxEntries returns a new Options value with ValueLogMaxEntries set to the given
-// value.
-//
-// ValueLogMaxEntries sets the maximum number of entries a value log file can hold approximately.
-// A actual size limit of a value log file is the minimum of ValueLogFileSize and
-// ValueLogMaxEntries.
-//
-// The default value of ValueLogMaxEntries is one million (1000000).
-func (opt Options) WithValueLogMaxEntries(val uint32) Options {
- opt.ValueLogMaxEntries = val
- return opt
-}
-
-// WithNumCompactors returns a new Options value with NumCompactors set to the given value.
-//
-// NumCompactors sets the number of compaction workers to run concurrently.
-// Setting this to zero stops compactions, which could eventually cause writes to block forever.
-//
-// The default value of NumCompactors is 2. One is dedicated just for L0.
-func (opt Options) WithNumCompactors(val int) Options {
- opt.NumCompactors = val
- return opt
-}
-
-// WithCompactL0OnClose returns a new Options value with CompactL0OnClose set to the given value.
-//
-// CompactL0OnClose determines whether Level 0 should be compacted before closing the DB.
-// This ensures that both reads and writes are efficient when the DB is opened later.
-// CompactL0OnClose is set to true if KeepL0InMemory is set to true.
-//
-// The default value of CompactL0OnClose is true.
-func (opt Options) WithCompactL0OnClose(val bool) Options {
- opt.CompactL0OnClose = val
- return opt
-}
-
-// WithLogRotatesToFlush returns a new Options value with LogRotatesToFlush set to the given value.
-//
-// LogRotatesToFlush sets the number of value log file rotates after which the Memtables are
-// flushed to disk. This is useful in write loads with fewer keys and larger values. This work load
-// would fill up the value logs quickly, while not filling up the Memtables. Thus, on a crash
-// and restart, the value log head could cause the replay of a good number of value log files
-// which can slow things on start.
-//
-// The default value of LogRotatesToFlush is 2.
-func (opt Options) WithLogRotatesToFlush(val int32) Options {
- opt.LogRotatesToFlush = val
- return opt
-}
-
-// WithEncryptionKey return a new Options value with EncryptionKey set to the given value.
-//
-// EncryptionKey is used to encrypt the data with AES. Type of AES is used based on the key
-// size. For example 16 bytes will use AES-128. 24 bytes will use AES-192. 32 bytes will
-// use AES-256.
-func (opt Options) WithEncryptionKey(key []byte) Options {
- opt.EncryptionKey = key
- return opt
-}
-
-// WithEncryptionKeyRotationDuration returns new Options value with the duration set to
-// the given value.
-//
-// Key Registry will use this duration to create new keys. If the previous generated
-// key exceed the given duration. Then the key registry will create new key.
-func (opt Options) WithEncryptionKeyRotationDuration(d time.Duration) Options {
- opt.EncryptionKeyRotationDuration = d
- return opt
-}
-
-// WithKeepL0InMemory returns a new Options value with KeepL0InMemory set to the given value.
-//
-// When KeepL0InMemory is set to true we will keep all Level 0 tables in memory. This leads to
-// better performance in writes as well as compactions. In case of DB crash, the value log replay
-// will take longer to complete since memtables and all level 0 tables will have to be recreated.
-// This option also sets CompactL0OnClose option to true.
-//
-// The default value of KeepL0InMemory is false.
-func (opt Options) WithKeepL0InMemory(val bool) Options {
- opt.KeepL0InMemory = val
- return opt
-}
-
-// WithCompression returns a new Options value with Compression set to the given value.
-//
-// When compression is enabled, every block will be compressed using the specified algorithm.
-// This option doesn't affect existing tables. Only the newly created tables will be compressed.
-//
-// The default compression algorithm used is zstd when built with Cgo. Without Cgo, the default is
-// snappy. Compression is enabled by default.
-func (opt Options) WithCompression(cType options.CompressionType) Options {
- opt.Compression = cType
- return opt
-}
-
-// WithVerifyValueChecksum returns a new Options value with VerifyValueChecksum set to
-// the given value.
-//
-// When VerifyValueChecksum is set to true, checksum will be verified for every entry read
-// from the value log. If the value is stored in SST (value size less than value threshold) then the
-// checksum validation will not be done.
-//
-// The default value of VerifyValueChecksum is False.
-func (opt Options) WithVerifyValueChecksum(val bool) Options {
- opt.VerifyValueChecksum = val
- return opt
-}
-
-// WithChecksumVerificationMode returns a new Options value with ChecksumVerificationMode set to
-// the given value.
-//
-// ChecksumVerificationMode indicates when the db should verify checksums for SSTable blocks.
-//
-// The default value of VerifyValueChecksum is options.NoVerification.
-func (opt Options) WithChecksumVerificationMode(cvMode options.ChecksumVerificationMode) Options {
- opt.ChecksumVerificationMode = cvMode
- return opt
-}
-
-// WithBlockCacheSize returns a new Options value with BlockCacheSize set to the given value.
-//
-// This value specifies how much data cache should hold in memory. A small size
-// of cache means lower memory consumption and lookups/iterations would take
-// longer. It is recommended to use a cache if you're using compression or encryption.
-// If compression and encryption both are disabled, adding a cache will lead to
-// unnecessary overhead which will affect the read performance. Setting size to
-// zero disables the cache altogether.
-//
-// Default value of BlockCacheSize is zero.
-func (opt Options) WithBlockCacheSize(size int64) Options {
- opt.BlockCacheSize = size
- return opt
-}
-
-// WithInMemory returns a new Options value with Inmemory mode set to the given value.
-//
-// When badger is running in InMemory mode, everything is stored in memory. No value/sst files are
-// created. In case of a crash all data will be lost.
-func (opt Options) WithInMemory(b bool) Options {
- opt.InMemory = b
- return opt
-}
-
-// WithZSTDCompressionLevel returns a new Options value with ZSTDCompressionLevel set
-// to the given value.
-//
-// The ZSTD compression algorithm supports 20 compression levels. The higher the compression
-// level, the better is the compression ratio but lower is the performance. Lower levels
-// have better performance and higher levels have better compression ratios.
-// We recommend using level 1 ZSTD Compression Level. Any level higher than 1 seems to
-// deteriorate badger's performance.
-// The following benchmarks were done on a 4 KB block size (default block size). The compression is
-// ratio supposed to increase with increasing compression level but since the input for compression
-// algorithm is small (4 KB), we don't get significant benefit at level 3. It is advised to write
-// your own benchmarks before choosing a compression algorithm or level.
-//
-// NOTE: The benchmarks are with DataDog ZSTD that requires CGO. Hence, no longer valid.
-// no_compression-16 10 502848865 ns/op 165.46 MB/s -
-// zstd_compression/level_1-16 7 739037966 ns/op 112.58 MB/s 2.93
-// zstd_compression/level_3-16 7 756950250 ns/op 109.91 MB/s 2.72
-// zstd_compression/level_15-16 1 11135686219 ns/op 7.47 MB/s 4.38
-// Benchmark code can be found in table/builder_test.go file
-func (opt Options) WithZSTDCompressionLevel(cLevel int) Options {
- opt.ZSTDCompressionLevel = cLevel
- return opt
-}
-
-// WithBypassLockGuard returns a new Options value with BypassLockGuard
-// set to the given value.
-//
-// When BypassLockGuard option is set, badger will not acquire a lock on the
-// directory. This could lead to data corruption if multiple badger instances
-// write to the same data directory. Use this option with caution.
-//
-// The default value of BypassLockGuard is false.
-func (opt Options) WithBypassLockGuard(b bool) Options {
- opt.BypassLockGuard = b
- return opt
-}
-
-// WithLoadBloomsOnOpen returns a new Options value with LoadBloomsOnOpen set to the given value.
-//
-// Badger uses bloom filters to speed up key lookups. When LoadBloomsOnOpen is set
-// to false, bloom filters will be loaded lazily and not on DB open. Set this
-// option to false to reduce the time taken to open the DB.
-//
-// The default value of LoadBloomsOnOpen is true.
-func (opt Options) WithLoadBloomsOnOpen(b bool) Options {
- opt.LoadBloomsOnOpen = b
- return opt
-}
-
-// WithIndexCacheSize returns a new Options value with IndexCacheSize set to
-// the given value.
-//
-// This value specifies how much memory should be used by table indices. These
-// indices include the block offsets and the bloomfilters. Badger uses bloom
-// filters to speed up lookups. Each table has its own bloom
-// filter and each bloom filter is approximately of 5 MB.
-//
-// Zero value for IndexCacheSize means all the indices will be kept in
-// memory and the cache is disabled.
-//
-// The default value of IndexCacheSize is 0 which means all indices are kept in
-// memory.
-func (opt Options) WithIndexCacheSize(size int64) Options {
- opt.IndexCacheSize = size
- return opt
-}
-
-// WithDetectConflicts returns a new Options value with DetectConflicts set to the given value.
-//
-// Detect conflicts options determines if the transactions would be checked for
-// conflicts before committing them. When this option is set to false
-// (detectConflicts=false) badger can process transactions at a higher rate.
-// Setting this options to false might be useful when the user application
-// deals with conflict detection and resolution.
-//
-// The default value of Detect conflicts is True.
-func (opt Options) WithDetectConflicts(b bool) Options {
- opt.DetectConflicts = b
- return opt
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/options/options.go b/vendor/github.com/dgraph-io/badger/v2/options/options.go
deleted file mode 100644
index 564f780f..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/options/options.go
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package options
-
-// FileLoadingMode specifies how data in LSM table files and value log files should
-// be loaded.
-type FileLoadingMode int
-
-const (
- // FileIO indicates that files must be loaded using standard I/O
- FileIO FileLoadingMode = iota
- // LoadToRAM indicates that file must be loaded into RAM
- LoadToRAM
- // MemoryMap indicates that that the file must be memory-mapped
- MemoryMap
-)
-
-// ChecksumVerificationMode tells when should DB verify checksum for SSTable blocks.
-type ChecksumVerificationMode int
-
-const (
- // NoVerification indicates DB should not verify checksum for SSTable blocks.
- NoVerification ChecksumVerificationMode = iota
- // OnTableRead indicates checksum should be verified while opening SSTtable.
- OnTableRead
- // OnBlockRead indicates checksum should be verified on every SSTable block read.
- OnBlockRead
- // OnTableAndBlockRead indicates checksum should be verified
- // on SSTable opening and on every block read.
- OnTableAndBlockRead
-)
-
-// CompressionType specifies how a block should be compressed.
-type CompressionType uint32
-
-const (
- // None mode indicates that a block is not compressed.
- None CompressionType = 0
- // Snappy mode indicates that a block is compressed using Snappy algorithm.
- Snappy CompressionType = 1
- // ZSTD mode indicates that a block is compressed using ZSTD algorithm.
- ZSTD CompressionType = 2
-)
diff --git a/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.pb.go b/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.pb.go
deleted file mode 100644
index edacec75..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.pb.go
+++ /dev/null
@@ -1,2531 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: badgerpb2.proto
-
-package pb
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type EncryptionAlgo int32
-
-const (
- EncryptionAlgo_aes EncryptionAlgo = 0
-)
-
-var EncryptionAlgo_name = map[int32]string{
- 0: "aes",
-}
-
-var EncryptionAlgo_value = map[string]int32{
- "aes": 0,
-}
-
-func (x EncryptionAlgo) String() string {
- return proto.EnumName(EncryptionAlgo_name, int32(x))
-}
-
-func (EncryptionAlgo) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_e63e84f9f0d3998c, []int{0}
-}
-
-type ManifestChange_Operation int32
-
-const (
- ManifestChange_CREATE ManifestChange_Operation = 0
- ManifestChange_DELETE ManifestChange_Operation = 1
-)
-
-var ManifestChange_Operation_name = map[int32]string{
- 0: "CREATE",
- 1: "DELETE",
-}
-
-var ManifestChange_Operation_value = map[string]int32{
- "CREATE": 0,
- "DELETE": 1,
-}
-
-func (x ManifestChange_Operation) String() string {
- return proto.EnumName(ManifestChange_Operation_name, int32(x))
-}
-
-func (ManifestChange_Operation) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_e63e84f9f0d3998c, []int{3, 0}
-}
-
-type Checksum_Algorithm int32
-
-const (
- Checksum_CRC32C Checksum_Algorithm = 0
- Checksum_XXHash64 Checksum_Algorithm = 1
-)
-
-var Checksum_Algorithm_name = map[int32]string{
- 0: "CRC32C",
- 1: "XXHash64",
-}
-
-var Checksum_Algorithm_value = map[string]int32{
- "CRC32C": 0,
- "XXHash64": 1,
-}
-
-func (x Checksum_Algorithm) String() string {
- return proto.EnumName(Checksum_Algorithm_name, int32(x))
-}
-
-func (Checksum_Algorithm) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_e63e84f9f0d3998c, []int{6, 0}
-}
-
-type KV struct {
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- UserMeta []byte `protobuf:"bytes,3,opt,name=user_meta,json=userMeta,proto3" json:"user_meta,omitempty"`
- Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
- ExpiresAt uint64 `protobuf:"varint,5,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"`
- Meta []byte `protobuf:"bytes,6,opt,name=meta,proto3" json:"meta,omitempty"`
- // Stream id is used to identify which stream the KV came from.
- StreamId uint32 `protobuf:"varint,10,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"`
- // Stream done is used to indicate end of stream.
- StreamDone bool `protobuf:"varint,11,opt,name=stream_done,json=streamDone,proto3" json:"stream_done,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *KV) Reset() { *m = KV{} }
-func (m *KV) String() string { return proto.CompactTextString(m) }
-func (*KV) ProtoMessage() {}
-func (*KV) Descriptor() ([]byte, []int) {
- return fileDescriptor_e63e84f9f0d3998c, []int{0}
-}
-func (m *KV) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KV) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KV.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KV) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KV.Merge(m, src)
-}
-func (m *KV) XXX_Size() int {
- return m.Size()
-}
-func (m *KV) XXX_DiscardUnknown() {
- xxx_messageInfo_KV.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KV proto.InternalMessageInfo
-
-func (m *KV) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *KV) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *KV) GetUserMeta() []byte {
- if m != nil {
- return m.UserMeta
- }
- return nil
-}
-
-func (m *KV) GetVersion() uint64 {
- if m != nil {
- return m.Version
- }
- return 0
-}
-
-func (m *KV) GetExpiresAt() uint64 {
- if m != nil {
- return m.ExpiresAt
- }
- return 0
-}
-
-func (m *KV) GetMeta() []byte {
- if m != nil {
- return m.Meta
- }
- return nil
-}
-
-func (m *KV) GetStreamId() uint32 {
- if m != nil {
- return m.StreamId
- }
- return 0
-}
-
-func (m *KV) GetStreamDone() bool {
- if m != nil {
- return m.StreamDone
- }
- return false
-}
-
-type KVList struct {
- Kv []*KV `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *KVList) Reset() { *m = KVList{} }
-func (m *KVList) String() string { return proto.CompactTextString(m) }
-func (*KVList) ProtoMessage() {}
-func (*KVList) Descriptor() ([]byte, []int) {
- return fileDescriptor_e63e84f9f0d3998c, []int{1}
-}
-func (m *KVList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KVList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KVList.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KVList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KVList.Merge(m, src)
-}
-func (m *KVList) XXX_Size() int {
- return m.Size()
-}
-func (m *KVList) XXX_DiscardUnknown() {
- xxx_messageInfo_KVList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KVList proto.InternalMessageInfo
-
-func (m *KVList) GetKv() []*KV {
- if m != nil {
- return m.Kv
- }
- return nil
-}
-
-type ManifestChangeSet struct {
- // A set of changes that are applied atomically.
- Changes []*ManifestChange `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ManifestChangeSet) Reset() { *m = ManifestChangeSet{} }
-func (m *ManifestChangeSet) String() string { return proto.CompactTextString(m) }
-func (*ManifestChangeSet) ProtoMessage() {}
-func (*ManifestChangeSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_e63e84f9f0d3998c, []int{2}
-}
-func (m *ManifestChangeSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ManifestChangeSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ManifestChangeSet.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ManifestChangeSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ManifestChangeSet.Merge(m, src)
-}
-func (m *ManifestChangeSet) XXX_Size() int {
- return m.Size()
-}
-func (m *ManifestChangeSet) XXX_DiscardUnknown() {
- xxx_messageInfo_ManifestChangeSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ManifestChangeSet proto.InternalMessageInfo
-
-func (m *ManifestChangeSet) GetChanges() []*ManifestChange {
- if m != nil {
- return m.Changes
- }
- return nil
-}
-
-type ManifestChange struct {
- Id uint64 `protobuf:"varint,1,opt,name=Id,proto3" json:"Id,omitempty"`
- Op ManifestChange_Operation `protobuf:"varint,2,opt,name=Op,proto3,enum=badgerpb2.ManifestChange_Operation" json:"Op,omitempty"`
- Level uint32 `protobuf:"varint,3,opt,name=Level,proto3" json:"Level,omitempty"`
- KeyId uint64 `protobuf:"varint,4,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"`
- EncryptionAlgo EncryptionAlgo `protobuf:"varint,5,opt,name=encryption_algo,json=encryptionAlgo,proto3,enum=badgerpb2.EncryptionAlgo" json:"encryption_algo,omitempty"`
- Compression uint32 `protobuf:"varint,6,opt,name=compression,proto3" json:"compression,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ManifestChange) Reset() { *m = ManifestChange{} }
-func (m *ManifestChange) String() string { return proto.CompactTextString(m) }
-func (*ManifestChange) ProtoMessage() {}
-func (*ManifestChange) Descriptor() ([]byte, []int) {
- return fileDescriptor_e63e84f9f0d3998c, []int{3}
-}
-func (m *ManifestChange) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ManifestChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ManifestChange.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ManifestChange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ManifestChange.Merge(m, src)
-}
-func (m *ManifestChange) XXX_Size() int {
- return m.Size()
-}
-func (m *ManifestChange) XXX_DiscardUnknown() {
- xxx_messageInfo_ManifestChange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ManifestChange proto.InternalMessageInfo
-
-func (m *ManifestChange) GetId() uint64 {
- if m != nil {
- return m.Id
- }
- return 0
-}
-
-func (m *ManifestChange) GetOp() ManifestChange_Operation {
- if m != nil {
- return m.Op
- }
- return ManifestChange_CREATE
-}
-
-func (m *ManifestChange) GetLevel() uint32 {
- if m != nil {
- return m.Level
- }
- return 0
-}
-
-func (m *ManifestChange) GetKeyId() uint64 {
- if m != nil {
- return m.KeyId
- }
- return 0
-}
-
-func (m *ManifestChange) GetEncryptionAlgo() EncryptionAlgo {
- if m != nil {
- return m.EncryptionAlgo
- }
- return EncryptionAlgo_aes
-}
-
-func (m *ManifestChange) GetCompression() uint32 {
- if m != nil {
- return m.Compression
- }
- return 0
-}
-
-type BlockOffset struct {
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
- Len uint32 `protobuf:"varint,3,opt,name=len,proto3" json:"len,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BlockOffset) Reset() { *m = BlockOffset{} }
-func (m *BlockOffset) String() string { return proto.CompactTextString(m) }
-func (*BlockOffset) ProtoMessage() {}
-func (*BlockOffset) Descriptor() ([]byte, []int) {
- return fileDescriptor_e63e84f9f0d3998c, []int{4}
-}
-func (m *BlockOffset) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *BlockOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_BlockOffset.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *BlockOffset) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BlockOffset.Merge(m, src)
-}
-func (m *BlockOffset) XXX_Size() int {
- return m.Size()
-}
-func (m *BlockOffset) XXX_DiscardUnknown() {
- xxx_messageInfo_BlockOffset.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BlockOffset proto.InternalMessageInfo
-
-func (m *BlockOffset) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *BlockOffset) GetOffset() uint32 {
- if m != nil {
- return m.Offset
- }
- return 0
-}
-
-func (m *BlockOffset) GetLen() uint32 {
- if m != nil {
- return m.Len
- }
- return 0
-}
-
-type TableIndex struct {
- Offsets []*BlockOffset `protobuf:"bytes,1,rep,name=offsets,proto3" json:"offsets,omitempty"`
- BloomFilter []byte `protobuf:"bytes,2,opt,name=bloom_filter,json=bloomFilter,proto3" json:"bloom_filter,omitempty"`
- EstimatedSize uint64 `protobuf:"varint,3,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TableIndex) Reset() { *m = TableIndex{} }
-func (m *TableIndex) String() string { return proto.CompactTextString(m) }
-func (*TableIndex) ProtoMessage() {}
-func (*TableIndex) Descriptor() ([]byte, []int) {
- return fileDescriptor_e63e84f9f0d3998c, []int{5}
-}
-func (m *TableIndex) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TableIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_TableIndex.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *TableIndex) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TableIndex.Merge(m, src)
-}
-func (m *TableIndex) XXX_Size() int {
- return m.Size()
-}
-func (m *TableIndex) XXX_DiscardUnknown() {
- xxx_messageInfo_TableIndex.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TableIndex proto.InternalMessageInfo
-
-func (m *TableIndex) GetOffsets() []*BlockOffset {
- if m != nil {
- return m.Offsets
- }
- return nil
-}
-
-func (m *TableIndex) GetBloomFilter() []byte {
- if m != nil {
- return m.BloomFilter
- }
- return nil
-}
-
-func (m *TableIndex) GetEstimatedSize() uint64 {
- if m != nil {
- return m.EstimatedSize
- }
- return 0
-}
-
-type Checksum struct {
- Algo Checksum_Algorithm `protobuf:"varint,1,opt,name=algo,proto3,enum=badgerpb2.Checksum_Algorithm" json:"algo,omitempty"`
- Sum uint64 `protobuf:"varint,2,opt,name=sum,proto3" json:"sum,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Checksum) Reset() { *m = Checksum{} }
-func (m *Checksum) String() string { return proto.CompactTextString(m) }
-func (*Checksum) ProtoMessage() {}
-func (*Checksum) Descriptor() ([]byte, []int) {
- return fileDescriptor_e63e84f9f0d3998c, []int{6}
-}
-func (m *Checksum) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Checksum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Checksum.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Checksum) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Checksum.Merge(m, src)
-}
-func (m *Checksum) XXX_Size() int {
- return m.Size()
-}
-func (m *Checksum) XXX_DiscardUnknown() {
- xxx_messageInfo_Checksum.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Checksum proto.InternalMessageInfo
-
-func (m *Checksum) GetAlgo() Checksum_Algorithm {
- if m != nil {
- return m.Algo
- }
- return Checksum_CRC32C
-}
-
-func (m *Checksum) GetSum() uint64 {
- if m != nil {
- return m.Sum
- }
- return 0
-}
-
-type DataKey struct {
- KeyId uint64 `protobuf:"varint,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"`
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
- Iv []byte `protobuf:"bytes,3,opt,name=iv,proto3" json:"iv,omitempty"`
- CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DataKey) Reset() { *m = DataKey{} }
-func (m *DataKey) String() string { return proto.CompactTextString(m) }
-func (*DataKey) ProtoMessage() {}
-func (*DataKey) Descriptor() ([]byte, []int) {
- return fileDescriptor_e63e84f9f0d3998c, []int{7}
-}
-func (m *DataKey) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DataKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DataKey.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DataKey) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DataKey.Merge(m, src)
-}
-func (m *DataKey) XXX_Size() int {
- return m.Size()
-}
-func (m *DataKey) XXX_DiscardUnknown() {
- xxx_messageInfo_DataKey.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DataKey proto.InternalMessageInfo
-
-func (m *DataKey) GetKeyId() uint64 {
- if m != nil {
- return m.KeyId
- }
- return 0
-}
-
-func (m *DataKey) GetData() []byte {
- if m != nil {
- return m.Data
- }
- return nil
-}
-
-func (m *DataKey) GetIv() []byte {
- if m != nil {
- return m.Iv
- }
- return nil
-}
-
-func (m *DataKey) GetCreatedAt() int64 {
- if m != nil {
- return m.CreatedAt
- }
- return 0
-}
-
-func init() {
- proto.RegisterEnum("badgerpb2.EncryptionAlgo", EncryptionAlgo_name, EncryptionAlgo_value)
- proto.RegisterEnum("badgerpb2.ManifestChange_Operation", ManifestChange_Operation_name, ManifestChange_Operation_value)
- proto.RegisterEnum("badgerpb2.Checksum_Algorithm", Checksum_Algorithm_name, Checksum_Algorithm_value)
- proto.RegisterType((*KV)(nil), "badgerpb2.KV")
- proto.RegisterType((*KVList)(nil), "badgerpb2.KVList")
- proto.RegisterType((*ManifestChangeSet)(nil), "badgerpb2.ManifestChangeSet")
- proto.RegisterType((*ManifestChange)(nil), "badgerpb2.ManifestChange")
- proto.RegisterType((*BlockOffset)(nil), "badgerpb2.BlockOffset")
- proto.RegisterType((*TableIndex)(nil), "badgerpb2.TableIndex")
- proto.RegisterType((*Checksum)(nil), "badgerpb2.Checksum")
- proto.RegisterType((*DataKey)(nil), "badgerpb2.DataKey")
-}
-
-func init() { proto.RegisterFile("badgerpb2.proto", fileDescriptor_e63e84f9f0d3998c) }
-
-var fileDescriptor_e63e84f9f0d3998c = []byte{
- // 689 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0xcd, 0x6e, 0xda, 0x40,
- 0x10, 0xc6, 0xc6, 0xe1, 0x67, 0x08, 0x84, 0xae, 0xda, 0xc8, 0x51, 0x15, 0x4a, 0x1c, 0x45, 0x45,
- 0x95, 0x0a, 0x2d, 0x54, 0xbd, 0x13, 0x42, 0x15, 0x44, 0x22, 0xa4, 0x4d, 0x14, 0x45, 0xbd, 0xa0,
- 0xc5, 0x1e, 0xc0, 0xc2, 0x7f, 0xf2, 0x2e, 0x56, 0xc8, 0x13, 0xf4, 0xd2, 0x7b, 0x1f, 0xa9, 0xc7,
- 0x1e, 0xfa, 0x00, 0x55, 0xfa, 0x22, 0x95, 0xd7, 0x86, 0x82, 0xd4, 0xde, 0x66, 0xbe, 0xf9, 0x76,
- 0x67, 0xbf, 0x6f, 0xc6, 0x86, 0x83, 0x09, 0xb3, 0x66, 0x18, 0x06, 0x93, 0x76, 0x33, 0x08, 0x7d,
- 0xe1, 0x93, 0xe2, 0x06, 0x30, 0x7e, 0x2a, 0xa0, 0x0e, 0xef, 0x48, 0x15, 0xb2, 0x0b, 0x5c, 0xe9,
- 0x4a, 0x5d, 0x69, 0xec, 0xd3, 0x38, 0x24, 0xcf, 0x61, 0x2f, 0x62, 0xce, 0x12, 0x75, 0x55, 0x62,
- 0x49, 0x42, 0x5e, 0x42, 0x71, 0xc9, 0x31, 0x1c, 0xbb, 0x28, 0x98, 0x9e, 0x95, 0x95, 0x42, 0x0c,
- 0x5c, 0xa3, 0x60, 0x44, 0x87, 0x7c, 0x84, 0x21, 0xb7, 0x7d, 0x4f, 0xd7, 0xea, 0x4a, 0x43, 0xa3,
- 0xeb, 0x94, 0x1c, 0x03, 0xe0, 0x43, 0x60, 0x87, 0xc8, 0xc7, 0x4c, 0xe8, 0x7b, 0xb2, 0x58, 0x4c,
- 0x91, 0xae, 0x20, 0x04, 0x34, 0x79, 0x61, 0x4e, 0x5e, 0x28, 0xe3, 0xb8, 0x13, 0x17, 0x21, 0x32,
- 0x77, 0x6c, 0x5b, 0x3a, 0xd4, 0x95, 0x46, 0x99, 0x16, 0x12, 0x60, 0x60, 0x91, 0x57, 0x50, 0x4a,
- 0x8b, 0x96, 0xef, 0xa1, 0x5e, 0xaa, 0x2b, 0x8d, 0x02, 0x85, 0x04, 0xba, 0xf0, 0x3d, 0x34, 0x5e,
- 0x43, 0x6e, 0x78, 0x77, 0x65, 0x73, 0x41, 0x8e, 0x41, 0x5d, 0x44, 0xba, 0x52, 0xcf, 0x36, 0x4a,
- 0xed, 0x72, 0xf3, 0xaf, 0x13, 0xc3, 0x3b, 0xaa, 0x2e, 0x22, 0xe3, 0x12, 0x9e, 0x5d, 0x33, 0xcf,
- 0x9e, 0x22, 0x17, 0xbd, 0x39, 0xf3, 0x66, 0x78, 0x83, 0x82, 0x74, 0x20, 0x6f, 0xca, 0x84, 0xa7,
- 0x07, 0x8f, 0xb6, 0x0e, 0xee, 0xd2, 0xe9, 0x9a, 0x69, 0x7c, 0x55, 0xa1, 0xb2, 0x5b, 0x23, 0x15,
- 0x50, 0x07, 0x96, 0x34, 0x55, 0xa3, 0xea, 0xc0, 0x22, 0x1d, 0x50, 0x47, 0x81, 0x34, 0xb4, 0xd2,
- 0x3e, 0xfd, 0xef, 0x95, 0xcd, 0x51, 0x80, 0x21, 0x13, 0xb6, 0xef, 0x51, 0x75, 0x14, 0xc4, 0x83,
- 0xb8, 0xc2, 0x08, 0x1d, 0x69, 0x77, 0x99, 0x26, 0x09, 0x79, 0x01, 0xb9, 0x05, 0xae, 0x62, 0x6f,
- 0x12, 0xab, 0xf7, 0x16, 0xb8, 0x1a, 0x58, 0xe4, 0x1c, 0x0e, 0xd0, 0x33, 0xc3, 0x55, 0x10, 0x1f,
- 0x1f, 0x33, 0x67, 0xe6, 0x4b, 0xb7, 0x2b, 0x3b, 0x0a, 0xfa, 0x1b, 0x46, 0xd7, 0x99, 0xf9, 0xb4,
- 0x82, 0x3b, 0x39, 0xa9, 0x43, 0xc9, 0xf4, 0xdd, 0x20, 0x44, 0x2e, 0x47, 0x99, 0x93, 0x6d, 0xb7,
- 0x21, 0xe3, 0x14, 0x8a, 0x9b, 0x37, 0x12, 0x80, 0x5c, 0x8f, 0xf6, 0xbb, 0xb7, 0xfd, 0x6a, 0x26,
- 0x8e, 0x2f, 0xfa, 0x57, 0xfd, 0xdb, 0x7e, 0x55, 0x31, 0x06, 0x50, 0x3a, 0x77, 0x7c, 0x73, 0x31,
- 0x9a, 0x4e, 0x39, 0x8a, 0x7f, 0x6c, 0xd8, 0x21, 0xe4, 0x7c, 0x59, 0x93, 0x8e, 0x94, 0x69, 0x9a,
- 0xc5, 0x4c, 0x07, 0xbd, 0x54, 0x6e, 0x1c, 0x1a, 0x5f, 0x14, 0x80, 0x5b, 0x36, 0x71, 0x70, 0xe0,
- 0x59, 0xf8, 0x40, 0xde, 0x41, 0x3e, 0xa1, 0xae, 0xc7, 0x73, 0xb8, 0x25, 0x6e, 0xab, 0x27, 0x5d,
- 0xd3, 0xc8, 0x09, 0xec, 0x4f, 0x1c, 0xdf, 0x77, 0xc7, 0x53, 0xdb, 0x11, 0x18, 0xa6, 0x3b, 0x5d,
- 0x92, 0xd8, 0x27, 0x09, 0x91, 0x33, 0xa8, 0x20, 0x17, 0xb6, 0xcb, 0x04, 0x5a, 0x63, 0x6e, 0x3f,
- 0xa2, 0x7c, 0x80, 0x46, 0xcb, 0x1b, 0xf4, 0xc6, 0x7e, 0x44, 0x23, 0x82, 0x42, 0x6f, 0x8e, 0xe6,
- 0x82, 0x2f, 0x5d, 0xf2, 0x1e, 0x34, 0xe9, 0xb0, 0x22, 0x1d, 0x3e, 0xde, 0x7a, 0xc4, 0x9a, 0xd2,
- 0x8c, 0x0d, 0x0d, 0x6d, 0x31, 0x77, 0xa9, 0xa4, 0xc6, 0xda, 0xf8, 0xd2, 0x95, 0xfd, 0x35, 0x1a,
- 0x87, 0xc6, 0x19, 0x14, 0x37, 0xa4, 0xc4, 0xcb, 0x5e, 0xa7, 0xdd, 0xab, 0x66, 0xc8, 0x3e, 0x14,
- 0xee, 0xef, 0x2f, 0x19, 0x9f, 0x7f, 0xfc, 0x50, 0x55, 0x0c, 0x13, 0xf2, 0x17, 0x4c, 0xb0, 0x21,
- 0xae, 0xb6, 0x46, 0xaf, 0x6c, 0x8f, 0x9e, 0x80, 0x66, 0x31, 0xc1, 0x52, 0x6d, 0x32, 0x8e, 0x17,
- 0xd0, 0x8e, 0xd2, 0xef, 0x54, 0xb5, 0xa3, 0xf8, 0x3b, 0x34, 0x43, 0x94, 0x12, 0x99, 0x90, 0x9b,
- 0x93, 0xa5, 0xc5, 0x14, 0xe9, 0x8a, 0x37, 0x47, 0x50, 0xd9, 0xdd, 0x0d, 0x92, 0x87, 0x2c, 0x43,
- 0x5e, 0xcd, 0x9c, 0x77, 0xbe, 0x3f, 0xd5, 0x94, 0x1f, 0x4f, 0x35, 0xe5, 0xd7, 0x53, 0x4d, 0xf9,
- 0xf6, 0xbb, 0x96, 0xf9, 0x7c, 0x32, 0xb3, 0xc5, 0x7c, 0x39, 0x69, 0x9a, 0xbe, 0xdb, 0xb2, 0x66,
- 0x21, 0x0b, 0xe6, 0x6f, 0x6d, 0xbf, 0x95, 0x78, 0xd0, 0x8a, 0xda, 0xad, 0x60, 0x32, 0xc9, 0xc9,
- 0xdf, 0x4d, 0xe7, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x88, 0x22, 0x82, 0x98, 0x81, 0x04, 0x00,
- 0x00,
-}
-
-func (m *KV) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KV) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KV) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.StreamDone {
- i--
- if m.StreamDone {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x58
- }
- if m.StreamId != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.StreamId))
- i--
- dAtA[i] = 0x50
- }
- if len(m.Meta) > 0 {
- i -= len(m.Meta)
- copy(dAtA[i:], m.Meta)
- i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Meta)))
- i--
- dAtA[i] = 0x32
- }
- if m.ExpiresAt != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.ExpiresAt))
- i--
- dAtA[i] = 0x28
- }
- if m.Version != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Version))
- i--
- dAtA[i] = 0x20
- }
- if len(m.UserMeta) > 0 {
- i -= len(m.UserMeta)
- copy(dAtA[i:], m.UserMeta)
- i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.UserMeta)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Value) > 0 {
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *KVList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KVList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KVList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Kv) > 0 {
- for iNdEx := len(m.Kv) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Kv[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintBadgerpb2(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ManifestChangeSet) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ManifestChangeSet) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ManifestChangeSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Changes) > 0 {
- for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Changes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintBadgerpb2(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ManifestChange) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ManifestChange) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ManifestChange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Compression != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Compression))
- i--
- dAtA[i] = 0x30
- }
- if m.EncryptionAlgo != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.EncryptionAlgo))
- i--
- dAtA[i] = 0x28
- }
- if m.KeyId != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.KeyId))
- i--
- dAtA[i] = 0x20
- }
- if m.Level != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Level))
- i--
- dAtA[i] = 0x18
- }
- if m.Op != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Op))
- i--
- dAtA[i] = 0x10
- }
- if m.Id != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Id))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *BlockOffset) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *BlockOffset) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *BlockOffset) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Len != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Len))
- i--
- dAtA[i] = 0x18
- }
- if m.Offset != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Offset))
- i--
- dAtA[i] = 0x10
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *TableIndex) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *TableIndex) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TableIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.EstimatedSize != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.EstimatedSize))
- i--
- dAtA[i] = 0x18
- }
- if len(m.BloomFilter) > 0 {
- i -= len(m.BloomFilter)
- copy(dAtA[i:], m.BloomFilter)
- i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.BloomFilter)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Offsets) > 0 {
- for iNdEx := len(m.Offsets) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Offsets[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintBadgerpb2(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Checksum) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Checksum) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Checksum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Sum != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Sum))
- i--
- dAtA[i] = 0x10
- }
- if m.Algo != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Algo))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DataKey) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DataKey) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DataKey) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.CreatedAt != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.CreatedAt))
- i--
- dAtA[i] = 0x20
- }
- if len(m.Iv) > 0 {
- i -= len(m.Iv)
- copy(dAtA[i:], m.Iv)
- i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Iv)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Data) > 0 {
- i -= len(m.Data)
- copy(dAtA[i:], m.Data)
- i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Data)))
- i--
- dAtA[i] = 0x12
- }
- if m.KeyId != 0 {
- i = encodeVarintBadgerpb2(dAtA, i, uint64(m.KeyId))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintBadgerpb2(dAtA []byte, offset int, v uint64) int {
- offset -= sovBadgerpb2(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *KV) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovBadgerpb2(uint64(l))
- }
- l = len(m.Value)
- if l > 0 {
- n += 1 + l + sovBadgerpb2(uint64(l))
- }
- l = len(m.UserMeta)
- if l > 0 {
- n += 1 + l + sovBadgerpb2(uint64(l))
- }
- if m.Version != 0 {
- n += 1 + sovBadgerpb2(uint64(m.Version))
- }
- if m.ExpiresAt != 0 {
- n += 1 + sovBadgerpb2(uint64(m.ExpiresAt))
- }
- l = len(m.Meta)
- if l > 0 {
- n += 1 + l + sovBadgerpb2(uint64(l))
- }
- if m.StreamId != 0 {
- n += 1 + sovBadgerpb2(uint64(m.StreamId))
- }
- if m.StreamDone {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *KVList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Kv) > 0 {
- for _, e := range m.Kv {
- l = e.Size()
- n += 1 + l + sovBadgerpb2(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ManifestChangeSet) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Changes) > 0 {
- for _, e := range m.Changes {
- l = e.Size()
- n += 1 + l + sovBadgerpb2(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ManifestChange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Id != 0 {
- n += 1 + sovBadgerpb2(uint64(m.Id))
- }
- if m.Op != 0 {
- n += 1 + sovBadgerpb2(uint64(m.Op))
- }
- if m.Level != 0 {
- n += 1 + sovBadgerpb2(uint64(m.Level))
- }
- if m.KeyId != 0 {
- n += 1 + sovBadgerpb2(uint64(m.KeyId))
- }
- if m.EncryptionAlgo != 0 {
- n += 1 + sovBadgerpb2(uint64(m.EncryptionAlgo))
- }
- if m.Compression != 0 {
- n += 1 + sovBadgerpb2(uint64(m.Compression))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *BlockOffset) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovBadgerpb2(uint64(l))
- }
- if m.Offset != 0 {
- n += 1 + sovBadgerpb2(uint64(m.Offset))
- }
- if m.Len != 0 {
- n += 1 + sovBadgerpb2(uint64(m.Len))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *TableIndex) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Offsets) > 0 {
- for _, e := range m.Offsets {
- l = e.Size()
- n += 1 + l + sovBadgerpb2(uint64(l))
- }
- }
- l = len(m.BloomFilter)
- if l > 0 {
- n += 1 + l + sovBadgerpb2(uint64(l))
- }
- if m.EstimatedSize != 0 {
- n += 1 + sovBadgerpb2(uint64(m.EstimatedSize))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Checksum) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Algo != 0 {
- n += 1 + sovBadgerpb2(uint64(m.Algo))
- }
- if m.Sum != 0 {
- n += 1 + sovBadgerpb2(uint64(m.Sum))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DataKey) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.KeyId != 0 {
- n += 1 + sovBadgerpb2(uint64(m.KeyId))
- }
- l = len(m.Data)
- if l > 0 {
- n += 1 + l + sovBadgerpb2(uint64(l))
- }
- l = len(m.Iv)
- if l > 0 {
- n += 1 + l + sovBadgerpb2(uint64(l))
- }
- if m.CreatedAt != 0 {
- n += 1 + sovBadgerpb2(uint64(m.CreatedAt))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovBadgerpb2(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozBadgerpb2(x uint64) (n int) {
- return sovBadgerpb2(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *KV) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KV: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
- if m.Value == nil {
- m.Value = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field UserMeta", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.UserMeta = append(m.UserMeta[:0], dAtA[iNdEx:postIndex]...)
- if m.UserMeta == nil {
- m.UserMeta = []byte{}
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- m.Version = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Version |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType)
- }
- m.ExpiresAt = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ExpiresAt |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Meta = append(m.Meta[:0], dAtA[iNdEx:postIndex]...)
- if m.Meta == nil {
- m.Meta = []byte{}
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType)
- }
- m.StreamId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StreamId |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StreamDone", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.StreamDone = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipBadgerpb2(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *KVList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KVList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KVList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Kv = append(m.Kv, &KV{})
- if err := m.Kv[len(m.Kv)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipBadgerpb2(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ManifestChangeSet) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ManifestChangeSet: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ManifestChangeSet: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Changes = append(m.Changes, &ManifestChange{})
- if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipBadgerpb2(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ManifestChange) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ManifestChange: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ManifestChange: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
- }
- m.Id = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Id |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType)
- }
- m.Op = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Op |= ManifestChange_Operation(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType)
- }
- m.Level = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Level |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field KeyId", wireType)
- }
- m.KeyId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.KeyId |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field EncryptionAlgo", wireType)
- }
- m.EncryptionAlgo = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.EncryptionAlgo |= EncryptionAlgo(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Compression", wireType)
- }
- m.Compression = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Compression |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipBadgerpb2(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *BlockOffset) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: BlockOffset: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: BlockOffset: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
- }
- m.Offset = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Offset |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Len", wireType)
- }
- m.Len = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Len |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipBadgerpb2(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *TableIndex) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: TableIndex: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: TableIndex: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Offsets", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Offsets = append(m.Offsets, &BlockOffset{})
- if err := m.Offsets[len(m.Offsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field BloomFilter", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.BloomFilter = append(m.BloomFilter[:0], dAtA[iNdEx:postIndex]...)
- if m.BloomFilter == nil {
- m.BloomFilter = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field EstimatedSize", wireType)
- }
- m.EstimatedSize = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.EstimatedSize |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipBadgerpb2(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Checksum) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Checksum: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Checksum: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Algo", wireType)
- }
- m.Algo = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Algo |= Checksum_Algorithm(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
- }
- m.Sum = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Sum |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipBadgerpb2(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DataKey) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DataKey: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DataKey: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field KeyId", wireType)
- }
- m.KeyId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.KeyId |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
- if m.Data == nil {
- m.Data = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Iv", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Iv = append(m.Iv[:0], dAtA[iNdEx:postIndex]...)
- if m.Iv == nil {
- m.Iv = []byte{}
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType)
- }
- m.CreatedAt = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CreatedAt |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipBadgerpb2(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthBadgerpb2
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipBadgerpb2(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowBadgerpb2
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthBadgerpb2
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupBadgerpb2
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthBadgerpb2
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthBadgerpb2 = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowBadgerpb2 = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupBadgerpb2 = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.proto b/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.proto
deleted file mode 100644
index 00fad8c3..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.proto
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Use protos/gen.sh to generate .pb.go files.
-syntax = "proto3";
-
-package badgerpb2;
-
-option go_package = "github.com/dgraph-io/badger/v2/pb";
-
-message KV {
- bytes key = 1;
- bytes value = 2;
- bytes user_meta = 3;
- uint64 version = 4;
- uint64 expires_at = 5;
- bytes meta = 6;
-
- // Stream id is used to identify which stream the KV came from.
- uint32 stream_id = 10;
- // Stream done is used to indicate end of stream.
- bool stream_done = 11;
-}
-
-message KVList {
- repeated KV kv = 1;
-}
-
-message ManifestChangeSet {
- // A set of changes that are applied atomically.
- repeated ManifestChange changes = 1;
-}
-
-enum EncryptionAlgo {
- aes = 0;
-}
-
-message ManifestChange {
- uint64 Id = 1; // Table ID.
- enum Operation {
- CREATE = 0;
- DELETE = 1;
- }
- Operation Op = 2;
- uint32 Level = 3; // Only used for CREATE.
- uint64 key_id = 4;
- EncryptionAlgo encryption_algo = 5;
- uint32 compression = 6; // Only used for CREATE Op.
-}
-
-message BlockOffset {
- bytes key = 1;
- uint32 offset = 2;
- uint32 len = 3;
-}
-
-message TableIndex {
- repeated BlockOffset offsets = 1;
- bytes bloom_filter = 2;
- uint64 estimated_size = 3;
-}
-
-message Checksum {
- enum Algorithm {
- CRC32C = 0;
- XXHash64 = 1;
- }
- Algorithm algo = 1; // For storing type of Checksum algorithm used
- uint64 sum = 2;
-}
-
-message DataKey {
- uint64 key_id = 1;
- bytes data = 2;
- bytes iv = 3;
- int64 created_at = 4;
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/pb/gen.sh b/vendor/github.com/dgraph-io/badger/v2/pb/gen.sh
deleted file mode 100644
index 0b017692..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/pb/gen.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-# Run this script from its directory, so that badgerpb2.proto is where it's expected to
-# be.
-
-# You might need to go get -v github.com/gogo/protobuf/...
-
-protoc --gofast_out=plugins=grpc:. --gofast_opt=paths=source_relative -I=. badgerpb2.proto
diff --git a/vendor/github.com/dgraph-io/badger/v2/publisher.go b/vendor/github.com/dgraph-io/badger/v2/publisher.go
deleted file mode 100644
index bc5c6e8c..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/publisher.go
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "sync"
-
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/trie"
- "github.com/dgraph-io/badger/v2/y"
-)
-
-type subscriber struct {
- prefixes [][]byte
- sendCh chan<- *pb.KVList
- subCloser *y.Closer
-}
-
-type publisher struct {
- sync.Mutex
- pubCh chan requests
- subscribers map[uint64]subscriber
- nextID uint64
- indexer *trie.Trie
-}
-
-func newPublisher() *publisher {
- return &publisher{
- pubCh: make(chan requests, 1000),
- subscribers: make(map[uint64]subscriber),
- nextID: 0,
- indexer: trie.NewTrie(),
- }
-}
-
-func (p *publisher) listenForUpdates(c *y.Closer) {
- defer func() {
- p.cleanSubscribers()
- c.Done()
- }()
- slurp := func(batch requests) {
- for {
- select {
- case reqs := <-p.pubCh:
- batch = append(batch, reqs...)
- default:
- p.publishUpdates(batch)
- return
- }
- }
- }
- for {
- select {
- case <-c.HasBeenClosed():
- return
- case reqs := <-p.pubCh:
- slurp(reqs)
- }
- }
-}
-
-func (p *publisher) publishUpdates(reqs requests) {
- p.Lock()
- defer func() {
- p.Unlock()
- // Release all the request.
- reqs.DecrRef()
- }()
- batchedUpdates := make(map[uint64]*pb.KVList)
- for _, req := range reqs {
- for _, e := range req.Entries {
- ids := p.indexer.Get(e.Key)
- if len(ids) > 0 {
- k := y.SafeCopy(nil, e.Key)
- kv := &pb.KV{
- Key: y.ParseKey(k),
- Value: y.SafeCopy(nil, e.Value),
- Meta: []byte{e.UserMeta},
- ExpiresAt: e.ExpiresAt,
- Version: y.ParseTs(k),
- }
- for id := range ids {
- if _, ok := batchedUpdates[id]; !ok {
- batchedUpdates[id] = &pb.KVList{}
- }
- batchedUpdates[id].Kv = append(batchedUpdates[id].Kv, kv)
- }
- }
- }
- }
-
- for id, kvs := range batchedUpdates {
- p.subscribers[id].sendCh <- kvs
- }
-}
-
-func (p *publisher) newSubscriber(c *y.Closer, prefixes ...[]byte) (<-chan *pb.KVList, uint64) {
- p.Lock()
- defer p.Unlock()
- ch := make(chan *pb.KVList, 1000)
- id := p.nextID
- // Increment next ID.
- p.nextID++
- p.subscribers[id] = subscriber{
- prefixes: prefixes,
- sendCh: ch,
- subCloser: c,
- }
- for _, prefix := range prefixes {
- p.indexer.Add(prefix, id)
- }
- return ch, id
-}
-
-// cleanSubscribers stops all the subscribers. Ideally, It should be called while closing DB.
-func (p *publisher) cleanSubscribers() {
- p.Lock()
- defer p.Unlock()
- for id, s := range p.subscribers {
- for _, prefix := range s.prefixes {
- p.indexer.Delete(prefix, id)
- }
- delete(p.subscribers, id)
- s.subCloser.SignalAndWait()
- }
-}
-
-func (p *publisher) deleteSubscriber(id uint64) {
- p.Lock()
- defer p.Unlock()
- if s, ok := p.subscribers[id]; ok {
- for _, prefix := range s.prefixes {
- p.indexer.Delete(prefix, id)
- }
- }
- delete(p.subscribers, id)
-}
-
-func (p *publisher) sendUpdates(reqs requests) {
- if p.noOfSubscribers() != 0 {
- reqs.IncrRef()
- p.pubCh <- reqs
- }
-}
-
-func (p *publisher) noOfSubscribers() int {
- p.Lock()
- defer p.Unlock()
- return len(p.subscribers)
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/skl/README.md b/vendor/github.com/dgraph-io/badger/v2/skl/README.md
deleted file mode 100644
index e22e4590..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/skl/README.md
+++ /dev/null
@@ -1,113 +0,0 @@
-This is much better than `skiplist` and `slist`.
-
-```
-BenchmarkReadWrite/frac_0-8 3000000 537 ns/op
-BenchmarkReadWrite/frac_1-8 3000000 503 ns/op
-BenchmarkReadWrite/frac_2-8 3000000 492 ns/op
-BenchmarkReadWrite/frac_3-8 3000000 475 ns/op
-BenchmarkReadWrite/frac_4-8 3000000 440 ns/op
-BenchmarkReadWrite/frac_5-8 5000000 442 ns/op
-BenchmarkReadWrite/frac_6-8 5000000 380 ns/op
-BenchmarkReadWrite/frac_7-8 5000000 338 ns/op
-BenchmarkReadWrite/frac_8-8 5000000 294 ns/op
-BenchmarkReadWrite/frac_9-8 10000000 268 ns/op
-BenchmarkReadWrite/frac_10-8 100000000 26.3 ns/op
-```
-
-And even better than a simple map with read-write lock:
-
-```
-BenchmarkReadWriteMap/frac_0-8 2000000 774 ns/op
-BenchmarkReadWriteMap/frac_1-8 2000000 647 ns/op
-BenchmarkReadWriteMap/frac_2-8 3000000 605 ns/op
-BenchmarkReadWriteMap/frac_3-8 3000000 603 ns/op
-BenchmarkReadWriteMap/frac_4-8 3000000 556 ns/op
-BenchmarkReadWriteMap/frac_5-8 3000000 472 ns/op
-BenchmarkReadWriteMap/frac_6-8 3000000 476 ns/op
-BenchmarkReadWriteMap/frac_7-8 3000000 457 ns/op
-BenchmarkReadWriteMap/frac_8-8 5000000 444 ns/op
-BenchmarkReadWriteMap/frac_9-8 5000000 361 ns/op
-BenchmarkReadWriteMap/frac_10-8 10000000 212 ns/op
-```
-
-# Node Pooling
-
-Command used
-
-```
-rm -Rf tmp && /usr/bin/time -l ./populate -keys_mil 10
-```
-
-For pprof results, we run without using /usr/bin/time. There are four runs below.
-
-Results seem to vary quite a bit between runs.
-
-## Before node pooling
-
-```
-1311.53MB of 1338.69MB total (97.97%)
-Dropped 30 nodes (cum <= 6.69MB)
-Showing top 10 nodes out of 37 (cum >= 12.50MB)
- flat flat% sum% cum cum%
- 523.04MB 39.07% 39.07% 523.04MB 39.07% github.com/dgraph-io/badger/skl.(*Skiplist).Put
- 184.51MB 13.78% 52.85% 184.51MB 13.78% runtime.stringtoslicebyte
- 166.01MB 12.40% 65.25% 689.04MB 51.47% github.com/dgraph-io/badger/mem.(*Table).Put
- 165MB 12.33% 77.58% 165MB 12.33% runtime.convT2E
- 116.92MB 8.73% 86.31% 116.92MB 8.73% bytes.makeSlice
- 62.50MB 4.67% 90.98% 62.50MB 4.67% main.newValue
- 34.50MB 2.58% 93.56% 34.50MB 2.58% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV
- 25.50MB 1.90% 95.46% 100.06MB 7.47% github.com/dgraph-io/badger/y.(*MergeIterator).Next
- 21.06MB 1.57% 97.04% 21.06MB 1.57% github.com/dgraph-io/badger/table.(*Table).read
- 12.50MB 0.93% 97.97% 12.50MB 0.93% github.com/dgraph-io/badger/table.header.Encode
-
- 128.31 real 329.37 user 17.11 sys
-3355660288 maximum resident set size
- 0 average shared memory size
- 0 average unshared data size
- 0 average unshared stack size
- 2203080 page reclaims
- 764 page faults
- 0 swaps
- 275 block input operations
- 76 block output operations
- 0 messages sent
- 0 messages received
- 0 signals received
- 49173 voluntary context switches
- 599922 involuntary context switches
-```
-
-## After node pooling
-
-```
-1963.13MB of 2026.09MB total (96.89%)
-Dropped 29 nodes (cum <= 10.13MB)
-Showing top 10 nodes out of 41 (cum >= 185.62MB)
- flat flat% sum% cum cum%
- 658.05MB 32.48% 32.48% 658.05MB 32.48% github.com/dgraph-io/badger/skl.glob..func1
- 297.51MB 14.68% 47.16% 297.51MB 14.68% runtime.convT2E
- 257.51MB 12.71% 59.87% 257.51MB 12.71% runtime.stringtoslicebyte
- 249.01MB 12.29% 72.16% 1007.06MB 49.70% github.com/dgraph-io/badger/mem.(*Table).Put
- 142.43MB 7.03% 79.19% 142.43MB 7.03% bytes.makeSlice
- 100MB 4.94% 84.13% 758.05MB 37.41% github.com/dgraph-io/badger/skl.newNode
- 99.50MB 4.91% 89.04% 99.50MB 4.91% main.newValue
- 75MB 3.70% 92.74% 75MB 3.70% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV
- 44.62MB 2.20% 94.94% 44.62MB 2.20% github.com/dgraph-io/badger/table.(*Table).read
- 39.50MB 1.95% 96.89% 185.62MB 9.16% github.com/dgraph-io/badger/y.(*MergeIterator).Next
-
- 135.58 real 374.29 user 17.65 sys
-3740614656 maximum resident set size
- 0 average shared memory size
- 0 average unshared data size
- 0 average unshared stack size
- 2276566 page reclaims
- 770 page faults
- 0 swaps
- 128 block input operations
- 90 block output operations
- 0 messages sent
- 0 messages received
- 0 signals received
- 46434 voluntary context switches
- 597049 involuntary context switches
-```
diff --git a/vendor/github.com/dgraph-io/badger/v2/skl/arena.go b/vendor/github.com/dgraph-io/badger/v2/skl/arena.go
deleted file mode 100644
index 9267b158..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/skl/arena.go
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package skl
-
-import (
- "sync/atomic"
- "unsafe"
-
- "github.com/dgraph-io/badger/v2/y"
-)
-
-const (
- offsetSize = int(unsafe.Sizeof(uint32(0)))
-
- // Always align nodes on 64-bit boundaries, even on 32-bit architectures,
- // so that the node.value field is 64-bit aligned. This is necessary because
- // node.getValueOffset uses atomic.LoadUint64, which expects its input
- // pointer to be 64-bit aligned.
- nodeAlign = int(unsafe.Sizeof(uint64(0))) - 1
-)
-
-// Arena should be lock-free.
-type Arena struct {
- n uint32
- buf []byte
-}
-
-// newArena returns a new arena.
-func newArena(n int64) *Arena {
- // Don't store data at position 0 in order to reserve offset=0 as a kind
- // of nil pointer.
- out := &Arena{
- n: 1,
- buf: make([]byte, n),
- }
- return out
-}
-
-func (s *Arena) size() int64 {
- return int64(atomic.LoadUint32(&s.n))
-}
-
-func (s *Arena) reset() {
- atomic.StoreUint32(&s.n, 0)
-}
-
-// putNode allocates a node in the arena. The node is aligned on a pointer-sized
-// boundary. The arena offset of the node is returned.
-func (s *Arena) putNode(height int) uint32 {
- // Compute the amount of the tower that will never be used, since the height
- // is less than maxHeight.
- unusedSize := (maxHeight - height) * offsetSize
-
- // Pad the allocation with enough bytes to ensure pointer alignment.
- l := uint32(MaxNodeSize - unusedSize + nodeAlign)
- n := atomic.AddUint32(&s.n, l)
- y.AssertTruef(int(n) <= len(s.buf),
- "Arena too small, toWrite:%d newTotal:%d limit:%d",
- l, n, len(s.buf))
-
- // Return the aligned offset.
- m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign)
- return m
-}
-
-// Put will *copy* val into arena. To make better use of this, reuse your input
-// val buffer. Returns an offset into buf. User is responsible for remembering
-// size of val. We could also store this size inside arena but the encoding and
-// decoding will incur some overhead.
-func (s *Arena) putVal(v y.ValueStruct) uint32 {
- l := uint32(v.EncodedSize())
- n := atomic.AddUint32(&s.n, l)
- y.AssertTruef(int(n) <= len(s.buf),
- "Arena too small, toWrite:%d newTotal:%d limit:%d",
- l, n, len(s.buf))
- m := n - l
- v.Encode(s.buf[m:])
- return m
-}
-
-func (s *Arena) putKey(key []byte) uint32 {
- l := uint32(len(key))
- n := atomic.AddUint32(&s.n, l)
- y.AssertTruef(int(n) <= len(s.buf),
- "Arena too small, toWrite:%d newTotal:%d limit:%d",
- l, n, len(s.buf))
- m := n - l
- y.AssertTrue(len(key) == copy(s.buf[m:n], key))
- return m
-}
-
-// getNode returns a pointer to the node located at offset. If the offset is
-// zero, then the nil node pointer is returned.
-func (s *Arena) getNode(offset uint32) *node {
- if offset == 0 {
- return nil
- }
-
- return (*node)(unsafe.Pointer(&s.buf[offset]))
-}
-
-// getKey returns byte slice at offset.
-func (s *Arena) getKey(offset uint32, size uint16) []byte {
- return s.buf[offset : offset+uint32(size)]
-}
-
-// getVal returns byte slice at offset. The given size should be just the value
-// size and should NOT include the meta bytes.
-func (s *Arena) getVal(offset uint32, size uint32) (ret y.ValueStruct) {
- ret.Decode(s.buf[offset : offset+size])
- return
-}
-
-// getNodeOffset returns the offset of node in the arena. If the node pointer is
-// nil, then the zero offset is returned.
-func (s *Arena) getNodeOffset(nd *node) uint32 {
- if nd == nil {
- return 0
- }
-
- return uint32(uintptr(unsafe.Pointer(nd)) - uintptr(unsafe.Pointer(&s.buf[0])))
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/skl/skl.go b/vendor/github.com/dgraph-io/badger/v2/skl/skl.go
deleted file mode 100644
index 43694f14..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/skl/skl.go
+++ /dev/null
@@ -1,517 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-Adapted from RocksDB inline skiplist.
-
-Key differences:
-- No optimization for sequential inserts (no "prev").
-- No custom comparator.
-- Support overwrites. This requires care when we see the same key when inserting.
- For RocksDB or LevelDB, overwrites are implemented as a newer sequence number in the key, so
- there is no need for values. We don't intend to support versioning. In-place updates of values
- would be more efficient.
-- We discard all non-concurrent code.
-- We do not support Splices. This simplifies the code a lot.
-- No AllocateNode or other pointer arithmetic.
-- We combine the findLessThan, findGreaterOrEqual, etc into one function.
-*/
-
-package skl
-
-import (
- "math"
- "sync/atomic"
- "unsafe"
-
- "github.com/dgraph-io/badger/v2/y"
- "github.com/dgraph-io/ristretto/z"
-)
-
-const (
- maxHeight = 20
- heightIncrease = math.MaxUint32 / 3
-)
-
-// MaxNodeSize is the memory footprint of a node of maximum height.
-const MaxNodeSize = int(unsafe.Sizeof(node{}))
-
-type node struct {
- // Multiple parts of the value are encoded as a single uint64 so that it
- // can be atomically loaded and stored:
- // value offset: uint32 (bits 0-31)
- // value size : uint16 (bits 32-63)
- value uint64
-
- // A byte slice is 24 bytes. We are trying to save space here.
- keyOffset uint32 // Immutable. No need to lock to access key.
- keySize uint16 // Immutable. No need to lock to access key.
-
- // Height of the tower.
- height uint16
-
- // Most nodes do not need to use the full height of the tower, since the
- // probability of each successive level decreases exponentially. Because
- // these elements are never accessed, they do not need to be allocated.
- // Therefore, when a node is allocated in the arena, its memory footprint
- // is deliberately truncated to not include unneeded tower elements.
- //
- // All accesses to elements should use CAS operations, with no need to lock.
- tower [maxHeight]uint32
-}
-
-// Skiplist maps keys to values (in memory)
-type Skiplist struct {
- height int32 // Current height. 1 <= height <= kMaxHeight. CAS.
- head *node
- ref int32
- arena *Arena
-}
-
-// IncrRef increases the refcount
-func (s *Skiplist) IncrRef() {
- atomic.AddInt32(&s.ref, 1)
-}
-
-// DecrRef decrements the refcount, deallocating the Skiplist when done using it
-func (s *Skiplist) DecrRef() {
- newRef := atomic.AddInt32(&s.ref, -1)
- if newRef > 0 {
- return
- }
-
- s.arena.reset()
- // Indicate we are closed. Good for testing. Also, lets GC reclaim memory. Race condition
- // here would suggest we are accessing skiplist when we are supposed to have no reference!
- s.arena = nil
- // Since the head references the arena's buf, as long as the head is kept around
- // GC can't release the buf.
- s.head = nil
-}
-
-func newNode(arena *Arena, key []byte, v y.ValueStruct, height int) *node {
- // The base level is already allocated in the node struct.
- offset := arena.putNode(height)
- node := arena.getNode(offset)
- node.keyOffset = arena.putKey(key)
- node.keySize = uint16(len(key))
- node.height = uint16(height)
- node.value = encodeValue(arena.putVal(v), v.EncodedSize())
- return node
-}
-
-func encodeValue(valOffset uint32, valSize uint32) uint64 {
- return uint64(valSize)<<32 | uint64(valOffset)
-}
-
-func decodeValue(value uint64) (valOffset uint32, valSize uint32) {
- valOffset = uint32(value)
- valSize = uint32(value >> 32)
- return
-}
-
-// NewSkiplist makes a new empty skiplist, with a given arena size
-func NewSkiplist(arenaSize int64) *Skiplist {
- arena := newArena(arenaSize)
- head := newNode(arena, nil, y.ValueStruct{}, maxHeight)
- return &Skiplist{
- height: 1,
- head: head,
- arena: arena,
- ref: 1,
- }
-}
-
-func (s *node) getValueOffset() (uint32, uint32) {
- value := atomic.LoadUint64(&s.value)
- return decodeValue(value)
-}
-
-func (s *node) key(arena *Arena) []byte {
- return arena.getKey(s.keyOffset, s.keySize)
-}
-
-func (s *node) setValue(arena *Arena, v y.ValueStruct) {
- valOffset := arena.putVal(v)
- value := encodeValue(valOffset, v.EncodedSize())
- atomic.StoreUint64(&s.value, value)
-}
-
-func (s *node) getNextOffset(h int) uint32 {
- return atomic.LoadUint32(&s.tower[h])
-}
-
-func (s *node) casNextOffset(h int, old, val uint32) bool {
- return atomic.CompareAndSwapUint32(&s.tower[h], old, val)
-}
-
-// Returns true if key is strictly > n.key.
-// If n is nil, this is an "end" marker and we return false.
-//func (s *Skiplist) keyIsAfterNode(key []byte, n *node) bool {
-// y.AssertTrue(n != s.head)
-// return n != nil && y.CompareKeys(key, n.key) > 0
-//}
-
-func (s *Skiplist) randomHeight() int {
- h := 1
- for h < maxHeight && z.FastRand() <= heightIncrease {
- h++
- }
- return h
-}
-
-func (s *Skiplist) getNext(nd *node, height int) *node {
- return s.arena.getNode(nd.getNextOffset(height))
-}
-
-// findNear finds the node near to key.
-// If less=true, it finds rightmost node such that node.key < key (if allowEqual=false) or
-// node.key <= key (if allowEqual=true).
-// If less=false, it finds leftmost node such that node.key > key (if allowEqual=false) or
-// node.key >= key (if allowEqual=true).
-// Returns the node found. The bool returned is true if the node has key equal to given key.
-func (s *Skiplist) findNear(key []byte, less bool, allowEqual bool) (*node, bool) {
- x := s.head
- level := int(s.getHeight() - 1)
- for {
- // Assume x.key < key.
- next := s.getNext(x, level)
- if next == nil {
- // x.key < key < END OF LIST
- if level > 0 {
- // Can descend further to iterate closer to the end.
- level--
- continue
- }
- // Level=0. Cannot descend further. Let's return something that makes sense.
- if !less {
- return nil, false
- }
- // Try to return x. Make sure it is not a head node.
- if x == s.head {
- return nil, false
- }
- return x, false
- }
-
- nextKey := next.key(s.arena)
- cmp := y.CompareKeys(key, nextKey)
- if cmp > 0 {
- // x.key < next.key < key. We can continue to move right.
- x = next
- continue
- }
- if cmp == 0 {
- // x.key < key == next.key.
- if allowEqual {
- return next, true
- }
- if !less {
- // We want >, so go to base level to grab the next bigger note.
- return s.getNext(next, 0), false
- }
- // We want <. If not base level, we should go closer in the next level.
- if level > 0 {
- level--
- continue
- }
- // On base level. Return x.
- if x == s.head {
- return nil, false
- }
- return x, false
- }
- // cmp < 0. In other words, x.key < key < next.
- if level > 0 {
- level--
- continue
- }
- // At base level. Need to return something.
- if !less {
- return next, false
- }
- // Try to return x. Make sure it is not a head node.
- if x == s.head {
- return nil, false
- }
- return x, false
- }
-}
-
-// findSpliceForLevel returns (outBefore, outAfter) with outBefore.key <= key <= outAfter.key.
-// The input "before" tells us where to start looking.
-// If we found a node with the same key, then we return outBefore = outAfter.
-// Otherwise, outBefore.key < key < outAfter.key.
-func (s *Skiplist) findSpliceForLevel(key []byte, before *node, level int) (*node, *node) {
- for {
- // Assume before.key < key.
- next := s.getNext(before, level)
- if next == nil {
- return before, next
- }
- nextKey := next.key(s.arena)
- cmp := y.CompareKeys(key, nextKey)
- if cmp == 0 {
- // Equality case.
- return next, next
- }
- if cmp < 0 {
- // before.key < key < next.key. We are done for this level.
- return before, next
- }
- before = next // Keep moving right on this level.
- }
-}
-
-func (s *Skiplist) getHeight() int32 {
- return atomic.LoadInt32(&s.height)
-}
-
-// Put inserts the key-value pair.
-func (s *Skiplist) Put(key []byte, v y.ValueStruct) {
- // Since we allow overwrite, we may not need to create a new node. We might not even need to
- // increase the height. Let's defer these actions.
-
- listHeight := s.getHeight()
- var prev [maxHeight + 1]*node
- var next [maxHeight + 1]*node
- prev[listHeight] = s.head
- next[listHeight] = nil
- for i := int(listHeight) - 1; i >= 0; i-- {
- // Use higher level to speed up for current level.
- prev[i], next[i] = s.findSpliceForLevel(key, prev[i+1], i)
- if prev[i] == next[i] {
- prev[i].setValue(s.arena, v)
- return
- }
- }
-
- // We do need to create a new node.
- height := s.randomHeight()
- x := newNode(s.arena, key, v, height)
-
- // Try to increase s.height via CAS.
- listHeight = s.getHeight()
- for height > int(listHeight) {
- if atomic.CompareAndSwapInt32(&s.height, listHeight, int32(height)) {
- // Successfully increased skiplist.height.
- break
- }
- listHeight = s.getHeight()
- }
-
- // We always insert from the base level and up. After you add a node in base level, we cannot
- // create a node in the level above because it would have discovered the node in the base level.
- for i := 0; i < height; i++ {
- for {
- if prev[i] == nil {
- y.AssertTrue(i > 1) // This cannot happen in base level.
- // We haven't computed prev, next for this level because height exceeds old listHeight.
- // For these levels, we expect the lists to be sparse, so we can just search from head.
- prev[i], next[i] = s.findSpliceForLevel(key, s.head, i)
- // Someone adds the exact same key before we are able to do so. This can only happen on
- // the base level. But we know we are not on the base level.
- y.AssertTrue(prev[i] != next[i])
- }
- nextOffset := s.arena.getNodeOffset(next[i])
- x.tower[i] = nextOffset
- if prev[i].casNextOffset(i, nextOffset, s.arena.getNodeOffset(x)) {
- // Managed to insert x between prev[i] and next[i]. Go to the next level.
- break
- }
- // CAS failed. We need to recompute prev and next.
- // It is unlikely to be helpful to try to use a different level as we redo the search,
- // because it is unlikely that lots of nodes are inserted between prev[i] and next[i].
- prev[i], next[i] = s.findSpliceForLevel(key, prev[i], i)
- if prev[i] == next[i] {
- y.AssertTruef(i == 0, "Equality can happen only on base level: %d", i)
- prev[i].setValue(s.arena, v)
- return
- }
- }
- }
-}
-
-// Empty returns if the Skiplist is empty.
-func (s *Skiplist) Empty() bool {
- return s.findLast() == nil
-}
-
-// findLast returns the last element. If head (empty list), we return nil. All the find functions
-// will NEVER return the head nodes.
-func (s *Skiplist) findLast() *node {
- n := s.head
- level := int(s.getHeight()) - 1
- for {
- next := s.getNext(n, level)
- if next != nil {
- n = next
- continue
- }
- if level == 0 {
- if n == s.head {
- return nil
- }
- return n
- }
- level--
- }
-}
-
-// Get gets the value associated with the key. It returns a valid value if it finds equal or earlier
-// version of the same key.
-func (s *Skiplist) Get(key []byte) y.ValueStruct {
- n, _ := s.findNear(key, false, true) // findGreaterOrEqual.
- if n == nil {
- return y.ValueStruct{}
- }
-
- nextKey := s.arena.getKey(n.keyOffset, n.keySize)
- if !y.SameKey(key, nextKey) {
- return y.ValueStruct{}
- }
-
- valOffset, valSize := n.getValueOffset()
- vs := s.arena.getVal(valOffset, valSize)
- vs.Version = y.ParseTs(nextKey)
- return vs
-}
-
-// NewIterator returns a skiplist iterator. You have to Close() the iterator.
-func (s *Skiplist) NewIterator() *Iterator {
- s.IncrRef()
- return &Iterator{list: s}
-}
-
-// MemSize returns the size of the Skiplist in terms of how much memory is used within its internal
-// arena.
-func (s *Skiplist) MemSize() int64 { return s.arena.size() }
-
-// Iterator is an iterator over skiplist object. For new objects, you just
-// need to initialize Iterator.list.
-type Iterator struct {
- list *Skiplist
- n *node
-}
-
-// Close frees the resources held by the iterator
-func (s *Iterator) Close() error {
- s.list.DecrRef()
- return nil
-}
-
-// Valid returns true iff the iterator is positioned at a valid node.
-func (s *Iterator) Valid() bool { return s.n != nil }
-
-// Key returns the key at the current position.
-func (s *Iterator) Key() []byte {
- return s.list.arena.getKey(s.n.keyOffset, s.n.keySize)
-}
-
-// Value returns value.
-func (s *Iterator) Value() y.ValueStruct {
- valOffset, valSize := s.n.getValueOffset()
- return s.list.arena.getVal(valOffset, valSize)
-}
-
-// Next advances to the next position.
-func (s *Iterator) Next() {
- y.AssertTrue(s.Valid())
- s.n = s.list.getNext(s.n, 0)
-}
-
-// Prev advances to the previous position.
-func (s *Iterator) Prev() {
- y.AssertTrue(s.Valid())
- s.n, _ = s.list.findNear(s.Key(), true, false) // find <. No equality allowed.
-}
-
-// Seek advances to the first entry with a key >= target.
-func (s *Iterator) Seek(target []byte) {
- s.n, _ = s.list.findNear(target, false, true) // find >=.
-}
-
-// SeekForPrev finds an entry with key <= target.
-func (s *Iterator) SeekForPrev(target []byte) {
- s.n, _ = s.list.findNear(target, true, true) // find <=.
-}
-
-// SeekToFirst seeks position at the first entry in list.
-// Final state of iterator is Valid() iff list is not empty.
-func (s *Iterator) SeekToFirst() {
- s.n = s.list.getNext(s.list.head, 0)
-}
-
-// SeekToLast seeks position at the last entry in list.
-// Final state of iterator is Valid() iff list is not empty.
-func (s *Iterator) SeekToLast() {
- s.n = s.list.findLast()
-}
-
-// UniIterator is a unidirectional memtable iterator. It is a thin wrapper around
-// Iterator. We like to keep Iterator as before, because it is more powerful and
-// we might support bidirectional iterators in the future.
-type UniIterator struct {
- iter *Iterator
- reversed bool
-}
-
-// NewUniIterator returns a UniIterator.
-func (s *Skiplist) NewUniIterator(reversed bool) *UniIterator {
- return &UniIterator{
- iter: s.NewIterator(),
- reversed: reversed,
- }
-}
-
-// Next implements y.Interface
-func (s *UniIterator) Next() {
- if !s.reversed {
- s.iter.Next()
- } else {
- s.iter.Prev()
- }
-}
-
-// Rewind implements y.Interface
-func (s *UniIterator) Rewind() {
- if !s.reversed {
- s.iter.SeekToFirst()
- } else {
- s.iter.SeekToLast()
- }
-}
-
-// Seek implements y.Interface
-func (s *UniIterator) Seek(key []byte) {
- if !s.reversed {
- s.iter.Seek(key)
- } else {
- s.iter.SeekForPrev(key)
- }
-}
-
-// Key implements y.Interface
-func (s *UniIterator) Key() []byte { return s.iter.Key() }
-
-// Value implements y.Interface
-func (s *UniIterator) Value() y.ValueStruct { return s.iter.Value() }
-
-// Valid implements y.Interface
-func (s *UniIterator) Valid() bool { return s.iter.Valid() }
-
-// Close implements y.Interface (and frees up the iter's resources)
-func (s *UniIterator) Close() error { return s.iter.Close() }
diff --git a/vendor/github.com/dgraph-io/badger/v2/stream.go b/vendor/github.com/dgraph-io/badger/v2/stream.go
deleted file mode 100644
index e238bcd3..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/stream.go
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * Copyright 2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "context"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/y"
- humanize "github.com/dustin/go-humanize"
- "github.com/golang/protobuf/proto"
-)
-
-const pageSize = 4 << 20 // 4MB
-
-// maxStreamSize is the maximum allowed size of a stream batch. This is a soft limit
-// as a single list that is still over the limit will have to be sent as is since it
-// cannot be split further. This limit prevents the framework from creating batches
-// so big that sending them causes issues (e.g running into the max size gRPC limit).
-var maxStreamSize = uint64(100 << 20) // 100MB
-
-// Stream provides a framework to concurrently iterate over a snapshot of Badger, pick up
-// key-values, batch them up and call Send. Stream does concurrent iteration over many smaller key
-// ranges. It does NOT send keys in lexicographical sorted order. To get keys in sorted
-// order, use Iterator.
-type Stream struct {
- // Prefix to only iterate over certain range of keys. If set to nil (default), Stream would
- // iterate over the entire DB.
- Prefix []byte
-
- // Number of goroutines to use for iterating over key ranges. Defaults to 16.
- NumGo int
-
- // Badger would produce log entries in Infof to indicate the progress of Stream. LogPrefix can
- // be used to help differentiate them from other activities. Default is "Badger.Stream".
- LogPrefix string
-
- // ChooseKey is invoked each time a new key is encountered. Note that this is not called
- // on every version of the value, only the first encountered version (i.e. the highest version
- // of the value a key has). ChooseKey can be left nil to select all keys.
- //
- // Note: Calls to ChooseKey are concurrent.
- ChooseKey func(item *Item) bool
-
- // KeyToList, similar to ChooseKey, is only invoked on the highest version of the value. It
- // is upto the caller to iterate over the versions and generate zero, one or more KVs. It
- // is expected that the user would advance the iterator to go through the versions of the
- // values. However, the user MUST immediately return from this function on the first encounter
- // with a mismatching key. See example usage in ToList function. Can be left nil to use ToList
- // function by default.
- //
- // Note: Calls to KeyToList are concurrent.
- KeyToList func(key []byte, itr *Iterator) (*pb.KVList, error)
-
- // This is the method where Stream sends the final output. All calls to Send are done by a
- // single goroutine, i.e. logic within Send method can expect single threaded execution.
- Send func(*pb.KVList) error
-
- readTs uint64
- db *DB
- rangeCh chan keyRange
- kvChan chan *pb.KVList
- nextStreamId uint32
-}
-
-// ToList is a default implementation of KeyToList. It picks up all valid versions of the key,
-// skipping over deleted or expired keys.
-func (st *Stream) ToList(key []byte, itr *Iterator) (*pb.KVList, error) {
- list := &pb.KVList{}
- for ; itr.Valid(); itr.Next() {
- item := itr.Item()
- if item.IsDeletedOrExpired() {
- break
- }
- if !bytes.Equal(key, item.Key()) {
- // Break out on the first encounter with another key.
- break
- }
-
- valCopy, err := item.ValueCopy(nil)
- if err != nil {
- return nil, err
- }
- kv := &pb.KV{
- Key: item.KeyCopy(nil),
- Value: valCopy,
- UserMeta: []byte{item.UserMeta()},
- Version: item.Version(),
- ExpiresAt: item.ExpiresAt(),
- }
- list.Kv = append(list.Kv, kv)
- if st.db.opt.NumVersionsToKeep == 1 {
- break
- }
-
- if item.DiscardEarlierVersions() {
- break
- }
- }
- return list, nil
-}
-
-// keyRange is [start, end), including start, excluding end. Do ensure that the start,
-// end byte slices are owned by keyRange struct.
-func (st *Stream) produceRanges(ctx context.Context) {
- splits := st.db.KeySplits(st.Prefix)
-
- // We don't need to create more key ranges than NumGo goroutines. This way, we will have limited
- // number of "streams" coming out, which then helps limit the memory used by SSWriter.
- {
- pickEvery := int(math.Floor(float64(len(splits)) / float64(st.NumGo)))
- if pickEvery < 1 {
- pickEvery = 1
- }
- filtered := splits[:0]
- for i, split := range splits {
- if (i+1)%pickEvery == 0 {
- filtered = append(filtered, split)
- }
- }
- splits = filtered
- }
-
- start := y.SafeCopy(nil, st.Prefix)
- for _, key := range splits {
- st.rangeCh <- keyRange{left: start, right: y.SafeCopy(nil, []byte(key))}
- start = y.SafeCopy(nil, []byte(key))
- }
- // Edge case: prefix is empty and no splits exist. In that case, we should have at least one
- // keyRange output.
- st.rangeCh <- keyRange{left: start}
- close(st.rangeCh)
-}
-
-// produceKVs picks up ranges from rangeCh, generates KV lists and sends them to kvChan.
-func (st *Stream) produceKVs(ctx context.Context, threadId int) error {
- var size int
- var txn *Txn
- if st.readTs > 0 {
- txn = st.db.NewTransactionAt(st.readTs, false)
- } else {
- txn = st.db.NewTransaction(false)
- }
- defer txn.Discard()
-
- iterate := func(kr keyRange) error {
- iterOpts := DefaultIteratorOptions
- iterOpts.AllVersions = true
- iterOpts.Prefix = st.Prefix
- iterOpts.PrefetchValues = false
- itr := txn.NewIterator(iterOpts)
- itr.ThreadId = threadId
- defer itr.Close()
-
- // This unique stream id is used to identify all the keys from this iteration.
- streamId := atomic.AddUint32(&st.nextStreamId, 1)
-
- outList := new(pb.KVList)
-
- sendIt := func() error {
- select {
- case st.kvChan <- outList:
- case <-ctx.Done():
- return ctx.Err()
- }
- outList = new(pb.KVList)
- size = 0
- return nil
- }
- var prevKey []byte
- for itr.Seek(kr.left); itr.Valid(); {
- // it.Valid would only return true for keys with the provided Prefix in iterOpts.
- item := itr.Item()
- if bytes.Equal(item.Key(), prevKey) {
- itr.Next()
- continue
- }
- prevKey = append(prevKey[:0], item.Key()...)
-
- // Check if we reached the end of the key range.
- if len(kr.right) > 0 && bytes.Compare(item.Key(), kr.right) >= 0 {
- break
- }
- // Check if we should pick this key.
- if st.ChooseKey != nil && !st.ChooseKey(item) {
- continue
- }
-
- // Now convert to key value.
- list, err := st.KeyToList(item.KeyCopy(nil), itr)
- if err != nil {
- return err
- }
- if list == nil || len(list.Kv) == 0 {
- continue
- }
- for _, kv := range list.Kv {
- size += proto.Size(kv)
- kv.StreamId = streamId
- outList.Kv = append(outList.Kv, kv)
-
- if size < pageSize {
- continue
- }
- if err := sendIt(); err != nil {
- return err
- }
- }
- }
- if len(outList.Kv) > 0 {
- // TODO: Think of a way to indicate that a stream is over.
- if err := sendIt(); err != nil {
- return err
- }
- }
- return nil
- }
-
- for {
- select {
- case kr, ok := <-st.rangeCh:
- if !ok {
- // Done with the keys.
- return nil
- }
- if err := iterate(kr); err != nil {
- return err
- }
- case <-ctx.Done():
- return ctx.Err()
- }
- }
-}
-
-func (st *Stream) streamKVs(ctx context.Context) error {
- var count int
- var bytesSent uint64
- t := time.NewTicker(time.Second)
- defer t.Stop()
- now := time.Now()
-
- sendBatch := func(batch *pb.KVList) error {
- sz := uint64(proto.Size(batch))
- bytesSent += sz
- count += len(batch.Kv)
- t := time.Now()
- if err := st.Send(batch); err != nil {
- return err
- }
- st.db.opt.Infof("%s Created batch of size: %s in %s.\n",
- st.LogPrefix, humanize.Bytes(sz), time.Since(t))
- return nil
- }
-
- slurp := func(batch *pb.KVList) error {
- loop:
- for {
- // Send the batch immediately if it already exceeds the maximum allowed size.
- // If the size of the batch exceeds maxStreamSize, break from the loop to
- // avoid creating a batch that is so big that certain limits are reached.
- sz := uint64(proto.Size(batch))
- if sz > maxStreamSize {
- break loop
- }
- select {
- case kvs, ok := <-st.kvChan:
- if !ok {
- break loop
- }
- y.AssertTrue(kvs != nil)
- batch.Kv = append(batch.Kv, kvs.Kv...)
- default:
- break loop
- }
- }
- return sendBatch(batch)
- }
-
-outer:
- for {
- var batch *pb.KVList
- select {
- case <-ctx.Done():
- return ctx.Err()
-
- case <-t.C:
- dur := time.Since(now)
- durSec := uint64(dur.Seconds())
- if durSec == 0 {
- continue
- }
- speed := bytesSent / durSec
- st.db.opt.Infof("%s Time elapsed: %s, bytes sent: %s, speed: %s/sec\n", st.LogPrefix,
- y.FixedDuration(dur), humanize.Bytes(bytesSent), humanize.Bytes(speed))
-
- case kvs, ok := <-st.kvChan:
- if !ok {
- break outer
- }
- y.AssertTrue(kvs != nil)
- batch = kvs
-
- // Otherwise, slurp more keys into this batch.
- if err := slurp(batch); err != nil {
- return err
- }
- }
- }
-
- st.db.opt.Infof("%s Sent %d keys\n", st.LogPrefix, count)
- return nil
-}
-
-// Orchestrate runs Stream. It picks up ranges from the SSTables, then runs NumGo number of
-// goroutines to iterate over these ranges and batch up KVs in lists. It concurrently runs a single
-// goroutine to pick these lists, batch them up further and send to Output.Send. Orchestrate also
-// spits logs out to Infof, using provided LogPrefix. Note that all calls to Output.Send
-// are serial. In case any of these steps encounter an error, Orchestrate would stop execution and
-// return that error. Orchestrate can be called multiple times, but in serial order.
-func (st *Stream) Orchestrate(ctx context.Context) error {
- st.rangeCh = make(chan keyRange, 3) // Contains keys for posting lists.
-
- // kvChan should only have a small capacity to ensure that we don't buffer up too much data if
- // sending is slow. Page size is set to 4MB, which is used to lazily cap the size of each
- // KVList. To get 128MB buffer, we can set the channel size to 32.
- st.kvChan = make(chan *pb.KVList, 32)
-
- if st.KeyToList == nil {
- st.KeyToList = st.ToList
- }
-
- // Picks up ranges from Badger, and sends them to rangeCh.
- go st.produceRanges(ctx)
-
- errCh := make(chan error, 1) // Stores error by consumeKeys.
- var wg sync.WaitGroup
- for i := 0; i < st.NumGo; i++ {
- wg.Add(1)
-
- go func(threadId int) {
- defer wg.Done()
- // Picks up ranges from rangeCh, generates KV lists, and sends them to kvChan.
- if err := st.produceKVs(ctx, threadId); err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }(i)
- }
-
- // Pick up key-values from kvChan and send to stream.
- kvErr := make(chan error, 1)
- go func() {
- // Picks up KV lists from kvChan, and sends them to Output.
- kvErr <- st.streamKVs(ctx)
- }()
- wg.Wait() // Wait for produceKVs to be over.
- close(st.kvChan) // Now we can close kvChan.
-
- select {
- case err := <-errCh: // Check error from produceKVs.
- return err
- default:
- }
-
- // Wait for key streaming to be over.
- err := <-kvErr
- return err
-}
-
-func (db *DB) newStream() *Stream {
- return &Stream{db: db, NumGo: 16, LogPrefix: "Badger.Stream"}
-}
-
-// NewStream creates a new Stream.
-func (db *DB) NewStream() *Stream {
- if db.opt.managedTxns {
- panic("This API can not be called in managed mode.")
- }
- return db.newStream()
-}
-
-// NewStreamAt creates a new Stream at a particular timestamp. Should only be used with managed DB.
-func (db *DB) NewStreamAt(readTs uint64) *Stream {
- if !db.opt.managedTxns {
- panic("This API can only be called in managed mode.")
- }
- stream := db.newStream()
- stream.readTs = readTs
- return stream
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/stream_writer.go b/vendor/github.com/dgraph-io/badger/v2/stream_writer.go
deleted file mode 100644
index 38ffba22..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/stream_writer.go
+++ /dev/null
@@ -1,485 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "encoding/hex"
- "fmt"
- "math"
- "sync"
-
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/table"
- "github.com/dgraph-io/badger/v2/y"
- humanize "github.com/dustin/go-humanize"
- "github.com/pkg/errors"
-)
-
-const headStreamId uint32 = math.MaxUint32
-
-// StreamWriter is used to write data coming from multiple streams. The streams must not have any
-// overlapping key ranges. Within each stream, the keys must be sorted. Badger Stream framework is
-// capable of generating such an output. So, this StreamWriter can be used at the other end to build
-// BadgerDB at a much faster pace by writing SSTables (and value logs) directly to LSM tree levels
-// without causing any compactions at all. This is way faster than using batched writer or using
-// transactions, but only applicable in situations where the keys are pre-sorted and the DB is being
-// bootstrapped. Existing data would get deleted when using this writer. So, this is only useful
-// when restoring from backup or replicating DB across servers.
-//
-// StreamWriter should not be called on in-use DB instances. It is designed only to bootstrap new
-// DBs.
-type StreamWriter struct {
- writeLock sync.Mutex
- db *DB
- done func()
- throttle *y.Throttle
- maxVersion uint64
- writers map[uint32]*sortedWriter
- maxHead valuePointer
-}
-
-// NewStreamWriter creates a StreamWriter. Right after creating StreamWriter, Prepare must be
-// called. The memory usage of a StreamWriter is directly proportional to the number of streams
-// possible. So, efforts must be made to keep the number of streams low. Stream framework would
-// typically use 16 goroutines and hence create 16 streams.
-func (db *DB) NewStreamWriter() *StreamWriter {
- return &StreamWriter{
- db: db,
- // throttle shouldn't make much difference. Memory consumption is based on the number of
- // concurrent streams being processed.
- throttle: y.NewThrottle(16),
- writers: make(map[uint32]*sortedWriter),
- }
-}
-
-// Prepare should be called before writing any entry to StreamWriter. It deletes all data present in
-// existing DB, stops compactions and any writes being done by other means. Be very careful when
-// calling Prepare, because it could result in permanent data loss. Not calling Prepare would result
-// in a corrupt Badger instance.
-func (sw *StreamWriter) Prepare() error {
- sw.writeLock.Lock()
- defer sw.writeLock.Unlock()
-
- var err error
- sw.done, err = sw.db.dropAll()
- return err
-}
-
-// Write writes KVList to DB. Each KV within the list contains the stream id which StreamWriter
-// would use to demux the writes. Write is thread safe and can be called concurrently by multiple
-// goroutines.
-func (sw *StreamWriter) Write(kvs *pb.KVList) error {
- if len(kvs.GetKv()) == 0 {
- return nil
- }
-
- // closedStreams keeps track of all streams which are going to be marked as done. We are
- // keeping track of all streams so that we can close them at the end, after inserting all
- // the valid kvs.
- closedStreams := make(map[uint32]struct{})
- streamReqs := make(map[uint32]*request)
- for _, kv := range kvs.Kv {
- if kv.StreamDone {
- closedStreams[kv.StreamId] = struct{}{}
- continue
- }
-
- // Panic if some kv comes after stream has been marked as closed.
- if _, ok := closedStreams[kv.StreamId]; ok {
- panic(fmt.Sprintf("write performed on closed stream: %d", kv.StreamId))
- }
-
- var meta, userMeta byte
- if len(kv.Meta) > 0 {
- meta = kv.Meta[0]
- }
- if len(kv.UserMeta) > 0 {
- userMeta = kv.UserMeta[0]
- }
- if sw.maxVersion < kv.Version {
- sw.maxVersion = kv.Version
- }
- e := &Entry{
- Key: y.KeyWithTs(kv.Key, kv.Version),
- Value: kv.Value,
- UserMeta: userMeta,
- ExpiresAt: kv.ExpiresAt,
- meta: meta,
- }
- // If the value can be collocated with the key in LSM tree, we can skip
- // writing the value to value log.
- e.skipVlog = sw.db.shouldWriteValueToLSM(*e)
- req := streamReqs[kv.StreamId]
- if req == nil {
- req = &request{}
- streamReqs[kv.StreamId] = req
- }
- req.Entries = append(req.Entries, e)
- }
- all := make([]*request, 0, len(streamReqs))
- for _, req := range streamReqs {
- all = append(all, req)
- }
-
- sw.writeLock.Lock()
- defer sw.writeLock.Unlock()
-
- // We are writing all requests to vlog even if some request belongs to already closed stream.
- // It is safe to do because we are panicking while writing to sorted writer, which will be nil
- // for closed stream. At restart, stream writer will drop all the data in Prepare function.
- if err := sw.db.vlog.write(all); err != nil {
- return err
- }
-
- for streamID, req := range streamReqs {
- writer, ok := sw.writers[streamID]
- if !ok {
- var err error
- writer, err = sw.newWriter(streamID)
- if err != nil {
- return errors.Wrapf(err, "failed to create writer with ID %d", streamID)
- }
- sw.writers[streamID] = writer
- }
-
- if writer == nil {
- panic(fmt.Sprintf("write performed on closed stream: %d", streamID))
- }
-
- writer.reqCh <- req
- }
-
- // Now we can close any streams if required. We will make writer for
- // the closed streams as nil.
- for streamId := range closedStreams {
- writer, ok := sw.writers[streamId]
- if !ok {
- sw.db.opt.Logger.Warningf("Trying to close stream: %d, but no sorted "+
- "writer found for it", streamId)
- continue
- }
-
- writer.closer.SignalAndWait()
- if err := writer.Done(); err != nil {
- return err
- }
-
- if sw.maxHead.Less(writer.head) {
- sw.maxHead = writer.head
- }
-
- sw.writers[streamId] = nil
- }
- return nil
-}
-
-// Flush is called once we are done writing all the entries. It syncs DB directories. It also
-// updates Oracle with maxVersion found in all entries (if DB is not managed).
-func (sw *StreamWriter) Flush() error {
- sw.writeLock.Lock()
- defer sw.writeLock.Unlock()
-
- defer sw.done()
-
- for _, writer := range sw.writers {
- if writer != nil {
- writer.closer.SignalAndWait()
- }
- }
-
- for _, writer := range sw.writers {
- if writer == nil {
- continue
- }
- if err := writer.Done(); err != nil {
- return err
- }
- if sw.maxHead.Less(writer.head) {
- sw.maxHead = writer.head
- }
- }
-
- // Encode and write the value log head into a new table.
- data := sw.maxHead.Encode()
- headWriter, err := sw.newWriter(headStreamId)
- if err != nil {
- return errors.Wrap(err, "failed to create head writer")
- }
- if err := headWriter.Add(
- y.KeyWithTs(head, sw.maxVersion),
- y.ValueStruct{Value: data}); err != nil {
- return err
- }
-
- headWriter.closer.SignalAndWait()
-
- if err := headWriter.Done(); err != nil {
- return err
- }
-
- if !sw.db.opt.managedTxns {
- if sw.db.orc != nil {
- sw.db.orc.Stop()
- }
- sw.db.orc = newOracle(sw.db.opt)
- sw.db.orc.nextTxnTs = sw.maxVersion
- sw.db.orc.txnMark.Done(sw.maxVersion)
- sw.db.orc.readMark.Done(sw.maxVersion)
- sw.db.orc.incrementNextTs()
- }
-
- // Wait for all files to be written.
- if err := sw.throttle.Finish(); err != nil {
- return err
- }
-
- // Sort tables at the end.
- for _, l := range sw.db.lc.levels {
- l.sortTables()
- }
-
- // Now sync the directories, so all the files are registered.
- if sw.db.opt.ValueDir != sw.db.opt.Dir {
- if err := sw.db.syncDir(sw.db.opt.ValueDir); err != nil {
- return err
- }
- }
- if err := sw.db.syncDir(sw.db.opt.Dir); err != nil {
- return err
- }
- return sw.db.lc.validate()
-}
-
-type sortedWriter struct {
- db *DB
- throttle *y.Throttle
-
- builder *table.Builder
- lastKey []byte
- streamID uint32
- reqCh chan *request
- head valuePointer
- // Have separate closer for each writer, as it can be closed at any time.
- closer *y.Closer
-}
-
-func (sw *StreamWriter) newWriter(streamID uint32) (*sortedWriter, error) {
- dk, err := sw.db.registry.latestDataKey()
- if err != nil {
- return nil, err
- }
-
- bopts := buildTableOptions(sw.db.opt)
- bopts.DataKey = dk
- w := &sortedWriter{
- db: sw.db,
- streamID: streamID,
- throttle: sw.throttle,
- builder: table.NewTableBuilder(bopts),
- reqCh: make(chan *request, 3),
- closer: y.NewCloser(1),
- }
-
- go w.handleRequests()
- return w, nil
-}
-
-func (w *sortedWriter) handleRequests() {
- defer w.closer.Done()
-
- process := func(req *request) {
- for i, e := range req.Entries {
- // If badger is running in InMemory mode, len(req.Ptrs) == 0.
- if i < len(req.Ptrs) {
- vptr := req.Ptrs[i]
- if !vptr.IsZero() {
- y.AssertTrue(w.head.Less(vptr))
- w.head = vptr
- }
- }
- var vs y.ValueStruct
- if e.skipVlog {
- vs = y.ValueStruct{
- Value: e.Value,
- Meta: e.meta,
- UserMeta: e.UserMeta,
- ExpiresAt: e.ExpiresAt,
- }
- } else {
- vptr := req.Ptrs[i]
- vs = y.ValueStruct{
- Value: vptr.Encode(),
- Meta: e.meta | bitValuePointer,
- UserMeta: e.UserMeta,
- ExpiresAt: e.ExpiresAt,
- }
- }
- if err := w.Add(e.Key, vs); err != nil {
- panic(err)
- }
- }
- }
-
- for {
- select {
- case req := <-w.reqCh:
- process(req)
- case <-w.closer.HasBeenClosed():
- close(w.reqCh)
- for req := range w.reqCh {
- process(req)
- }
- return
- }
- }
-}
-
-// Add adds key and vs to sortedWriter.
-func (w *sortedWriter) Add(key []byte, vs y.ValueStruct) error {
- if len(w.lastKey) > 0 && y.CompareKeys(key, w.lastKey) <= 0 {
- return errors.Errorf("keys not in sorted order (last key: %s, key: %s)",
- hex.Dump(w.lastKey), hex.Dump(key))
- }
-
- sameKey := y.SameKey(key, w.lastKey)
- // Same keys should go into the same SSTable.
- if !sameKey && w.builder.ReachedCapacity(w.db.opt.MaxTableSize) {
- if err := w.send(false); err != nil {
- return err
- }
- }
-
- w.lastKey = y.SafeCopy(w.lastKey, key)
- var vp valuePointer
- if vs.Meta&bitValuePointer > 0 {
- vp.Decode(vs.Value)
- }
- w.builder.Add(key, vs, vp.Len)
- return nil
-}
-
-func (w *sortedWriter) send(done bool) error {
- if err := w.throttle.Do(); err != nil {
- return err
- }
- go func(builder *table.Builder) {
- err := w.createTable(builder)
- w.throttle.Done(err)
- }(w.builder)
- // If done is true, this indicates we can close the writer.
- // No need to allocate underlying TableBuilder now.
- if done {
- w.builder = nil
- return nil
- }
-
- dk, err := w.db.registry.latestDataKey()
- if err != nil {
- return y.Wrapf(err, "Error while retriving datakey in sortedWriter.send")
- }
- bopts := buildTableOptions(w.db.opt)
- bopts.DataKey = dk
- w.builder = table.NewTableBuilder(bopts)
- return nil
-}
-
-// Done is called once we are done writing all keys and valueStructs
-// to sortedWriter. It completes writing current SST to disk.
-func (w *sortedWriter) Done() error {
- if w.builder.Empty() {
- // Assign builder as nil, so that underlying memory can be garbage collected.
- w.builder = nil
- return nil
- }
-
- return w.send(true)
-}
-
-func (w *sortedWriter) createTable(builder *table.Builder) error {
- data := builder.Finish()
- if len(data) == 0 {
- return nil
- }
- fileID := w.db.lc.reserveFileID()
- opts := buildTableOptions(w.db.opt)
- opts.DataKey = builder.DataKey()
- opts.BlockCache = w.db.blockCache
- opts.IndexCache = w.db.indexCache
- var tbl *table.Table
- if w.db.opt.InMemory {
- var err error
- if tbl, err = table.OpenInMemoryTable(data, fileID, &opts); err != nil {
- return err
- }
- } else {
- fd, err := y.CreateSyncedFile(table.NewFilename(fileID, w.db.opt.Dir), true)
- if err != nil {
- return err
- }
- if _, err := fd.Write(data); err != nil {
- return err
- }
- if tbl, err = table.OpenTable(fd, opts); err != nil {
- return err
- }
- }
- lc := w.db.lc
-
- var lhandler *levelHandler
- // We should start the levels from 1, because we need level 0 to set the !badger!head key. We
- // cannot mix up this key with other keys from the DB, otherwise we would introduce a range
- // overlap violation.
- y.AssertTrue(len(lc.levels) > 1)
- for _, l := range lc.levels[1:] {
- ratio := float64(l.getTotalSize()) / float64(l.maxTotalSize)
- if ratio < 1.0 {
- lhandler = l
- break
- }
- }
- if lhandler == nil {
- // If we're exceeding the size of the lowest level, shove it in the lowest level. Can't do
- // better than that.
- lhandler = lc.levels[len(lc.levels)-1]
- }
- if w.streamID == headStreamId {
- // This is a special !badger!head key. We should store it at level 0, separate from all the
- // other keys to avoid an overlap.
- lhandler = lc.levels[0]
- }
- // Now that table can be opened successfully, let's add this to the MANIFEST.
- change := &pb.ManifestChange{
- Id: tbl.ID(),
- KeyId: tbl.KeyID(),
- Op: pb.ManifestChange_CREATE,
- Level: uint32(lhandler.level),
- Compression: uint32(tbl.CompressionType()),
- }
- if err := w.db.manifest.addChanges([]*pb.ManifestChange{change}); err != nil {
- return err
- }
-
- // We are not calling lhandler.replaceTables() here, as it sorts tables on every addition.
- // We can sort all tables only once during Flush() call.
- lhandler.addTable(tbl)
-
- // Release the ref held by OpenTable.
- _ = tbl.DecrRef()
- w.db.opt.Infof("Table created: %d at level: %d for stream: %d. Size: %s\n",
- fileID, lhandler.level, w.streamID, humanize.Bytes(uint64(tbl.Size())))
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/structs.go b/vendor/github.com/dgraph-io/badger/v2/structs.go
deleted file mode 100644
index 469cdc48..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/structs.go
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "encoding/binary"
- "fmt"
- "time"
- "unsafe"
-)
-
-type valuePointer struct {
- Fid uint32
- Len uint32
- Offset uint32
-}
-
-const vptrSize = unsafe.Sizeof(valuePointer{})
-
-func (p valuePointer) Less(o valuePointer) bool {
- if p.Fid != o.Fid {
- return p.Fid < o.Fid
- }
- if p.Offset != o.Offset {
- return p.Offset < o.Offset
- }
- return p.Len < o.Len
-}
-
-func (p valuePointer) IsZero() bool {
- return p.Fid == 0 && p.Offset == 0 && p.Len == 0
-}
-
-// Encode encodes Pointer into byte buffer.
-func (p valuePointer) Encode() []byte {
- b := make([]byte, vptrSize)
- // Copy over the content from p to b.
- *(*valuePointer)(unsafe.Pointer(&b[0])) = p
- return b
-}
-
-// Decode decodes the value pointer into the provided byte buffer.
-func (p *valuePointer) Decode(b []byte) {
- // Copy over data from b into p. Using *p=unsafe.pointer(...) leads to
- // pointer alignment issues. See https://github.com/dgraph-io/badger/issues/1096
- // and comment https://github.com/dgraph-io/badger/pull/1097#pullrequestreview-307361714
- copy(((*[vptrSize]byte)(unsafe.Pointer(p))[:]), b[:vptrSize])
-}
-
-// header is used in value log as a header before Entry.
-type header struct {
- klen uint32
- vlen uint32
- expiresAt uint64
- meta byte
- userMeta byte
-}
-
-const (
- // Maximum possible size of the header. The maximum size of header struct will be 18 but the
- // maximum size of varint encoded header will be 21.
- maxHeaderSize = 21
-)
-
-// Encode encodes the header into []byte. The provided []byte should be atleast 5 bytes. The
-// function will panic if out []byte isn't large enough to hold all the values.
-// The encoded header looks like
-// +------+----------+------------+--------------+-----------+
-// | Meta | UserMeta | Key Length | Value Length | ExpiresAt |
-// +------+----------+------------+--------------+-----------+
-func (h header) Encode(out []byte) int {
- out[0], out[1] = h.meta, h.userMeta
- index := 2
- index += binary.PutUvarint(out[index:], uint64(h.klen))
- index += binary.PutUvarint(out[index:], uint64(h.vlen))
- index += binary.PutUvarint(out[index:], h.expiresAt)
- return index
-}
-
-// Decode decodes the given header from the provided byte slice.
-// Returns the number of bytes read.
-func (h *header) Decode(buf []byte) int {
- h.meta, h.userMeta = buf[0], buf[1]
- index := 2
- klen, count := binary.Uvarint(buf[index:])
- h.klen = uint32(klen)
- index += count
- vlen, count := binary.Uvarint(buf[index:])
- h.vlen = uint32(vlen)
- index += count
- h.expiresAt, count = binary.Uvarint(buf[index:])
- return index + count
-}
-
-// DecodeFrom reads the header from the hashReader.
-// Returns the number of bytes read.
-func (h *header) DecodeFrom(reader *hashReader) (int, error) {
- var err error
- h.meta, err = reader.ReadByte()
- if err != nil {
- return 0, err
- }
- h.userMeta, err = reader.ReadByte()
- if err != nil {
- return 0, err
- }
- klen, err := binary.ReadUvarint(reader)
- if err != nil {
- return 0, err
- }
- h.klen = uint32(klen)
- vlen, err := binary.ReadUvarint(reader)
- if err != nil {
- return 0, err
- }
- h.vlen = uint32(vlen)
- h.expiresAt, err = binary.ReadUvarint(reader)
- if err != nil {
- return 0, err
- }
- return reader.bytesRead, nil
-}
-
-// Entry provides Key, Value, UserMeta and ExpiresAt. This struct can be used by
-// the user to set data.
-type Entry struct {
- Key []byte
- Value []byte
- UserMeta byte
- ExpiresAt uint64 // time.Unix
- meta byte
- version uint64
-
- // Fields maintained internally.
- offset uint32
- skipVlog bool
- hlen int // Length of the header.
-}
-
-func (e *Entry) estimateSize(threshold int) int {
- if len(e.Value) < threshold {
- return len(e.Key) + len(e.Value) + 2 // Meta, UserMeta
- }
- return len(e.Key) + 12 + 2 // 12 for ValuePointer, 2 for metas.
-}
-
-func (e Entry) print(prefix string) {
- fmt.Printf("%s Key: %s Meta: %d UserMeta: %d Offset: %d len(val)=%d",
- prefix, e.Key, e.meta, e.UserMeta, e.offset, len(e.Value))
-}
-
-// NewEntry creates a new entry with key and value passed in args. This newly created entry can be
-// set in a transaction by calling txn.SetEntry(). All other properties of Entry can be set by
-// calling WithMeta, WithDiscard, WithTTL methods on it.
-// This function uses key and value reference, hence users must
-// not modify key and value until the end of transaction.
-func NewEntry(key, value []byte) *Entry {
- return &Entry{
- Key: key,
- Value: value,
- }
-}
-
-// WithMeta adds meta data to Entry e. This byte is stored alongside the key
-// and can be used as an aid to interpret the value or store other contextual
-// bits corresponding to the key-value pair of entry.
-func (e *Entry) WithMeta(meta byte) *Entry {
- e.UserMeta = meta
- return e
-}
-
-// WithDiscard adds a marker to Entry e. This means all the previous versions of the key (of the
-// Entry) will be eligible for garbage collection.
-// This method is only useful if you have set a higher limit for options.NumVersionsToKeep. The
-// default setting is 1, in which case, this function doesn't add any more benefit. If however, you
-// have a higher setting for NumVersionsToKeep (in Dgraph, we set it to infinity), you can use this
-// method to indicate that all the older versions can be discarded and removed during compactions.
-func (e *Entry) WithDiscard() *Entry {
- e.meta = bitDiscardEarlierVersions
- return e
-}
-
-// WithTTL adds time to live duration to Entry e. Entry stored with a TTL would automatically expire
-// after the time has elapsed, and will be eligible for garbage collection.
-func (e *Entry) WithTTL(dur time.Duration) *Entry {
- e.ExpiresAt = uint64(time.Now().Add(dur).Unix())
- return e
-}
-
-// withMergeBit sets merge bit in entry's metadata. This
-// function is called by MergeOperator's Add method.
-func (e *Entry) withMergeBit() *Entry {
- e.meta = bitMergeEntry
- return e
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/table/README.md b/vendor/github.com/dgraph-io/badger/v2/table/README.md
deleted file mode 100644
index 19276079..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/table/README.md
+++ /dev/null
@@ -1,108 +0,0 @@
-Size of table is 123,217,667 bytes for all benchmarks.
-
-# BenchmarkRead
-```
-$ go test -bench ^BenchmarkRead$ -run ^$ -count 3
-goos: linux
-goarch: amd64
-pkg: github.com/dgraph-io/badger/table
-BenchmarkRead-16 10 154074944 ns/op
-BenchmarkRead-16 10 154340411 ns/op
-BenchmarkRead-16 10 151914489 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 22.467s
-```
-
-Size of table is 123,217,667 bytes, which is ~118MB.
-
-The rate is ~762MB/s using LoadToRAM (when table is in RAM).
-
-To read a 64MB table, this would take ~0.084s, which is negligible.
-
-# BenchmarkReadAndBuild
-```go
-$ go test -bench BenchmarkReadAndBuild -run ^$ -count 3
-goos: linux
-goarch: amd64
-pkg: github.com/dgraph-io/badger/table
-BenchmarkReadAndBuild-16 1 1026755231 ns/op
-BenchmarkReadAndBuild-16 1 1009543316 ns/op
-BenchmarkReadAndBuild-16 1 1039920546 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 12.081s
-```
-
-The rate is ~123MB/s. To build a 64MB table, this would take ~0.56s. Note that this
-does NOT include the flushing of the table to disk. All we are doing above is
-reading one table (which is in RAM) and write one table in memory.
-
-The table building takes 0.56-0.084s ~ 0.4823s.
-
-# BenchmarkReadMerged
-Below, we merge 5 tables. The total size remains unchanged at ~122M.
-
-```go
-$ go test -bench ReadMerged -run ^$ -count 3
-goos: linux
-goarch: amd64
-pkg: github.com/dgraph-io/badger/table
-BenchmarkReadMerged-16 2 977588975 ns/op
-BenchmarkReadMerged-16 2 982140738 ns/op
-BenchmarkReadMerged-16 2 962046017 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 27.433s
-```
-
-The rate is ~120MB/s. To read a 64MB table using merge iterator, this would take ~0.53s.
-
-# BenchmarkRandomRead
-
-```go
-go test -bench BenchmarkRandomRead$ -run ^$ -count 3
-goos: linux
-goarch: amd64
-pkg: github.com/dgraph-io/badger/table
-BenchmarkRandomRead-16 500000 2645 ns/op
-BenchmarkRandomRead-16 500000 2648 ns/op
-BenchmarkRandomRead-16 500000 2614 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 50.850s
-```
-For random read benchmarking, we are randomly reading a key and verifying its value.
-
-# DB Open benchmark
-1. Create badger DB with 2 billion key-value pairs (about 380GB of data)
-```
-badger fill -m 2000 --dir="/tmp/data" --sorted
-```
-2. Clear buffers and swap memory
-```
-free -mh && sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && sudo swapoff -a && sudo swapon -a && free -mh
-```
-Also flush disk buffers
-```
-blockdev --flushbufs /dev/nvme0n1p4
-```
-3. Run the benchmark
-```
-go test -run=^$ github.com/dgraph-io/badger -bench ^BenchmarkDBOpen$ -benchdir="/tmp/data" -v
-
-badger 2019/06/04 17:15:56 INFO: 126 tables out of 1028 opened in 3.017s
-badger 2019/06/04 17:15:59 INFO: 257 tables out of 1028 opened in 6.014s
-badger 2019/06/04 17:16:02 INFO: 387 tables out of 1028 opened in 9.017s
-badger 2019/06/04 17:16:05 INFO: 516 tables out of 1028 opened in 12.025s
-badger 2019/06/04 17:16:08 INFO: 645 tables out of 1028 opened in 15.013s
-badger 2019/06/04 17:16:11 INFO: 775 tables out of 1028 opened in 18.008s
-badger 2019/06/04 17:16:14 INFO: 906 tables out of 1028 opened in 21.003s
-badger 2019/06/04 17:16:17 INFO: All 1028 tables opened in 23.851s
-badger 2019/06/04 17:16:17 INFO: Replaying file id: 1998 at offset: 332000
-badger 2019/06/04 17:16:17 INFO: Replay took: 9.81µs
-goos: linux
-goarch: amd64
-pkg: github.com/dgraph-io/badger
-BenchmarkDBOpen-16 1 23930082140 ns/op
-PASS
-ok github.com/dgraph-io/badger 24.076s
-
-```
-It takes about 23.851s to open a DB with 2 billion sorted key-value entries.
diff --git a/vendor/github.com/dgraph-io/badger/v2/table/builder.go b/vendor/github.com/dgraph-io/badger/v2/table/builder.go
deleted file mode 100644
index 0e22ee1e..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/table/builder.go
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package table
-
-import (
- "bytes"
- "crypto/aes"
- "math"
- "unsafe"
-
- "github.com/dgryski/go-farm"
- "github.com/golang/protobuf/proto"
- "github.com/golang/snappy"
- "github.com/pkg/errors"
-
- "github.com/dgraph-io/badger/v2/options"
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/y"
- "github.com/dgraph-io/ristretto/z"
-)
-
-func newBuffer(sz int) *bytes.Buffer {
- b := new(bytes.Buffer)
- b.Grow(sz)
- return b
-}
-
-type header struct {
- overlap uint16 // Overlap with base key.
- diff uint16 // Length of the diff.
-}
-
-const headerSize = uint16(unsafe.Sizeof(header{}))
-
-// Encode encodes the header.
-func (h header) Encode() []byte {
- var b [4]byte
- *(*header)(unsafe.Pointer(&b[0])) = h
- return b[:]
-}
-
-// Decode decodes the header.
-func (h *header) Decode(buf []byte) {
- // Copy over data from buf into h. Using *h=unsafe.pointer(...) leads to
- // pointer alignment issues. See https://github.com/dgraph-io/badger/issues/1096
- // and comment https://github.com/dgraph-io/badger/pull/1097#pullrequestreview-307361714
- copy(((*[headerSize]byte)(unsafe.Pointer(h))[:]), buf[:headerSize])
-}
-
-// Builder is used in building a table.
-type Builder struct {
- // Typically tens or hundreds of meg. This is for one single file.
- buf *bytes.Buffer
-
- baseKey []byte // Base key for the current block.
- baseOffset uint32 // Offset for the current block.
- entryOffsets []uint32 // Offsets of entries present in current block.
- tableIndex *pb.TableIndex
- keyHashes []uint64 // Used for building the bloomfilter.
- opt *Options
-}
-
-// NewTableBuilder makes a new TableBuilder.
-func NewTableBuilder(opts Options) *Builder {
- return &Builder{
- buf: newBuffer(1 << 20),
- tableIndex: &pb.TableIndex{},
- keyHashes: make([]uint64, 0, 1024), // Avoid some malloc calls.
- opt: &opts,
- }
-}
-
-// Close closes the TableBuilder.
-func (b *Builder) Close() {}
-
-// Empty returns whether it's empty.
-func (b *Builder) Empty() bool { return b.buf.Len() == 0 }
-
-// keyDiff returns a suffix of newKey that is different from b.baseKey.
-func (b *Builder) keyDiff(newKey []byte) []byte {
- var i int
- for i = 0; i < len(newKey) && i < len(b.baseKey); i++ {
- if newKey[i] != b.baseKey[i] {
- break
- }
- }
- return newKey[i:]
-}
-
-func (b *Builder) addHelper(key []byte, v y.ValueStruct, vpLen uint64) {
- b.keyHashes = append(b.keyHashes, farm.Fingerprint64(y.ParseKey(key)))
-
- // diffKey stores the difference of key with baseKey.
- var diffKey []byte
- if len(b.baseKey) == 0 {
- // Make a copy. Builder should not keep references. Otherwise, caller has to be very careful
- // and will have to make copies of keys every time they add to builder, which is even worse.
- b.baseKey = append(b.baseKey[:0], key...)
- diffKey = key
- } else {
- diffKey = b.keyDiff(key)
- }
-
- y.AssertTrue(len(key)-len(diffKey) <= math.MaxUint16)
- y.AssertTrue(len(diffKey) <= math.MaxUint16)
-
- h := header{
- overlap: uint16(len(key) - len(diffKey)),
- diff: uint16(len(diffKey)),
- }
-
- // store current entry's offset
- y.AssertTrue(uint32(b.buf.Len()) < math.MaxUint32)
- b.entryOffsets = append(b.entryOffsets, uint32(b.buf.Len())-b.baseOffset)
-
- // Layout: header, diffKey, value.
- b.buf.Write(h.Encode())
- b.buf.Write(diffKey) // We only need to store the key difference.
-
- v.EncodeTo(b.buf)
- // Size of KV on SST.
- sstSz := uint64(uint32(headerSize) + uint32(len(diffKey)) + v.EncodedSize())
- // Total estimated size = size on SST + size on vlog (length of value pointer).
- b.tableIndex.EstimatedSize += (sstSz + vpLen)
-}
-
-/*
-Structure of Block.
-+-------------------+---------------------+--------------------+--------------+------------------+
-| Entry1 | Entry2 | Entry3 | Entry4 | Entry5 |
-+-------------------+---------------------+--------------------+--------------+------------------+
-| Entry6 | ... | ... | ... | EntryN |
-+-------------------+---------------------+--------------------+--------------+------------------+
-| Block Meta(contains list of offsets used| Block Meta Size | Block | Checksum Size |
-| to perform binary search in the block) | (4 Bytes) | Checksum | (4 Bytes) |
-+-----------------------------------------+--------------------+--------------+------------------+
-*/
-// In case the data is encrypted, the "IV" is added to the end of the block.
-func (b *Builder) finishBlock() {
- b.buf.Write(y.U32SliceToBytes(b.entryOffsets))
- b.buf.Write(y.U32ToBytes(uint32(len(b.entryOffsets))))
-
- blockBuf := b.buf.Bytes()[b.baseOffset:] // Store checksum for current block.
- b.writeChecksum(blockBuf)
-
- // Compress the block.
- if b.opt.Compression != options.None {
- var err error
- // TODO: Find a way to reuse buffers. Current implementation creates a
- // new buffer for each compressData call.
- blockBuf, err = b.compressData(b.buf.Bytes()[b.baseOffset:])
- y.Check(err)
- // Truncate already written data.
- b.buf.Truncate(int(b.baseOffset))
- // Write compressed data.
- b.buf.Write(blockBuf)
- }
- if b.shouldEncrypt() {
- block := b.buf.Bytes()[b.baseOffset:]
- eBlock, err := b.encrypt(block)
- y.Check(y.Wrapf(err, "Error while encrypting block in table builder."))
- // We're rewriting the block, after encrypting.
- b.buf.Truncate(int(b.baseOffset))
- b.buf.Write(eBlock)
- }
-
- // TODO(Ashish):Add padding: If we want to make block as multiple of OS pages, we can
- // implement padding. This might be useful while using direct I/O.
-
- // Add key to the block index
- bo := &pb.BlockOffset{
- Key: y.Copy(b.baseKey),
- Offset: b.baseOffset,
- Len: uint32(b.buf.Len()) - b.baseOffset,
- }
- b.tableIndex.Offsets = append(b.tableIndex.Offsets, bo)
-}
-
-func (b *Builder) shouldFinishBlock(key []byte, value y.ValueStruct) bool {
- // If there is no entry till now, we will return false.
- if len(b.entryOffsets) <= 0 {
- return false
- }
-
- // Integer overflow check for statements below.
- y.AssertTrue((uint32(len(b.entryOffsets))+1)*4+4+8+4 < math.MaxUint32)
- // We should include current entry also in size, that's why +1 to len(b.entryOffsets).
- entriesOffsetsSize := uint32((len(b.entryOffsets)+1)*4 +
- 4 + // size of list
- 8 + // Sum64 in checksum proto
- 4) // checksum length
- estimatedSize := uint32(b.buf.Len()) - b.baseOffset + uint32(6 /*header size for entry*/) +
- uint32(len(key)) + uint32(value.EncodedSize()) + entriesOffsetsSize
-
- if b.shouldEncrypt() {
- // IV is added at the end of the block, while encrypting.
- // So, size of IV is added to estimatedSize.
- estimatedSize += aes.BlockSize
- }
- return estimatedSize > uint32(b.opt.BlockSize)
-}
-
-// Add adds a key-value pair to the block.
-func (b *Builder) Add(key []byte, value y.ValueStruct, valueLen uint32) {
- if b.shouldFinishBlock(key, value) {
- b.finishBlock()
- // Start a new block. Initialize the block.
- b.baseKey = []byte{}
- y.AssertTrue(uint32(b.buf.Len()) < math.MaxUint32)
- b.baseOffset = uint32(b.buf.Len())
- b.entryOffsets = b.entryOffsets[:0]
- }
- b.addHelper(key, value, uint64(valueLen))
-}
-
-// TODO: vvv this was the comment on ReachedCapacity.
-// FinalSize returns the *rough* final size of the array, counting the header which is
-// not yet written.
-// TODO: Look into why there is a discrepancy. I suspect it is because of Write(empty, empty)
-// at the end. The diff can vary.
-
-// ReachedCapacity returns true if we... roughly (?) reached capacity?
-func (b *Builder) ReachedCapacity(cap int64) bool {
- blocksSize := b.buf.Len() + // length of current buffer
- len(b.entryOffsets)*4 + // all entry offsets size
- 4 + // count of all entry offsets
- 8 + // checksum bytes
- 4 // checksum length
- estimateSz := blocksSize +
- 4 + // Index length
- 5*(len(b.tableIndex.Offsets)) // approximate index size
-
- return int64(estimateSz) > cap
-}
-
-// Finish finishes the table by appending the index.
-/*
-The table structure looks like
-+---------+------------+-----------+---------------+
-| Block 1 | Block 2 | Block 3 | Block 4 |
-+---------+------------+-----------+---------------+
-| Block 5 | Block 6 | Block ... | Block N |
-+---------+------------+-----------+---------------+
-| Index | Index Size | Checksum | Checksum Size |
-+---------+------------+-----------+---------------+
-*/
-// In case the data is encrypted, the "IV" is added to the end of the index.
-func (b *Builder) Finish() []byte {
- bf := z.NewBloomFilter(float64(len(b.keyHashes)), b.opt.BloomFalsePositive)
- for _, h := range b.keyHashes {
- bf.Add(h)
- }
- // Add bloom filter to the index.
- b.tableIndex.BloomFilter = bf.JSONMarshal()
-
- b.finishBlock() // This will never start a new block.
-
- index, err := proto.Marshal(b.tableIndex)
- y.Check(err)
-
- if b.shouldEncrypt() {
- index, err = b.encrypt(index)
- y.Check(err)
- }
- // Write index the file.
- n, err := b.buf.Write(index)
- y.Check(err)
-
- y.AssertTrue(uint32(n) < math.MaxUint32)
- // Write index size.
- _, err = b.buf.Write(y.U32ToBytes(uint32(n)))
- y.Check(err)
-
- b.writeChecksum(index)
- return b.buf.Bytes()
-}
-
-func (b *Builder) writeChecksum(data []byte) {
- // Build checksum for the index.
- checksum := pb.Checksum{
- // TODO: The checksum type should be configurable from the
- // options.
- // We chose to use CRC32 as the default option because
- // it performed better compared to xxHash64.
- // See the BenchmarkChecksum in table_test.go file
- // Size => 1024 B 2048 B
- // CRC32 => 63.7 ns/op 112 ns/op
- // xxHash64 => 87.5 ns/op 158 ns/op
- Sum: y.CalculateChecksum(data, pb.Checksum_CRC32C),
- Algo: pb.Checksum_CRC32C,
- }
-
- // Write checksum to the file.
- chksum, err := proto.Marshal(&checksum)
- y.Check(err)
- n, err := b.buf.Write(chksum)
- y.Check(err)
-
- y.AssertTrue(uint32(n) < math.MaxUint32)
- // Write checksum size.
- _, err = b.buf.Write(y.U32ToBytes(uint32(n)))
- y.Check(err)
-}
-
-// DataKey returns datakey of the builder.
-func (b *Builder) DataKey() *pb.DataKey {
- return b.opt.DataKey
-}
-
-// encrypt will encrypt the given data and appends IV to the end of the encrypted data.
-// This should be only called only after checking shouldEncrypt method.
-func (b *Builder) encrypt(data []byte) ([]byte, error) {
- iv, err := y.GenerateIV()
- if err != nil {
- return data, y.Wrapf(err, "Error while generating IV in Builder.encrypt")
- }
- data, err = y.XORBlock(data, b.DataKey().Data, iv)
- if err != nil {
- return data, y.Wrapf(err, "Error while encrypting in Builder.encrypt")
- }
- data = append(data, iv...)
- return data, nil
-}
-
-// shouldEncrypt tells us whether to encrypt the data or not.
-// We encrypt only if the data key exist. Otherwise, not.
-func (b *Builder) shouldEncrypt() bool {
- return b.opt.DataKey != nil
-}
-
-// compressData compresses the given data.
-func (b *Builder) compressData(data []byte) ([]byte, error) {
- switch b.opt.Compression {
- case options.None:
- return data, nil
- case options.Snappy:
- return snappy.Encode(nil, data), nil
- case options.ZSTD:
- return y.ZSTDCompress(nil, data, b.opt.ZSTDCompressionLevel)
- }
- return nil, errors.New("Unsupported compression type")
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/table/iterator.go b/vendor/github.com/dgraph-io/badger/v2/table/iterator.go
deleted file mode 100644
index 8f46fe1b..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/table/iterator.go
+++ /dev/null
@@ -1,524 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package table
-
-import (
- "bytes"
- "io"
- "sort"
-
- "github.com/dgraph-io/badger/v2/y"
- "github.com/pkg/errors"
-)
-
-type blockIterator struct {
- data []byte
- idx int // Idx of the entry inside a block
- err error
- baseKey []byte
- key []byte
- val []byte
- entryOffsets []uint32
-
- // prevOverlap stores the overlap of the previous key with the base key.
- // This avoids unnecessary copy of base key when the overlap is same for multiple keys.
- prevOverlap uint16
-}
-
-func (itr *blockIterator) setBlock(b *block) {
- itr.err = nil
- itr.idx = 0
- itr.baseKey = itr.baseKey[:0]
- itr.prevOverlap = 0
- itr.key = itr.key[:0]
- itr.val = itr.val[:0]
- // Drop the index from the block. We don't need it anymore.
- itr.data = b.data[:b.entriesIndexStart]
- itr.entryOffsets = b.entryOffsets
-}
-
-// setIdx sets the iterator to the entry at index i and set it's key and value.
-func (itr *blockIterator) setIdx(i int) {
- itr.idx = i
- if i >= len(itr.entryOffsets) || i < 0 {
- itr.err = io.EOF
- return
- }
- itr.err = nil
- startOffset := int(itr.entryOffsets[i])
-
- // Set base key.
- if len(itr.baseKey) == 0 {
- var baseHeader header
- baseHeader.Decode(itr.data)
- itr.baseKey = itr.data[headerSize : headerSize+baseHeader.diff]
- }
- var endOffset int
- // idx points to the last entry in the block.
- if itr.idx+1 == len(itr.entryOffsets) {
- endOffset = len(itr.data)
- } else {
- // idx point to some entry other than the last one in the block.
- // EndOffset of the current entry is the start offset of the next entry.
- endOffset = int(itr.entryOffsets[itr.idx+1])
- }
-
- entryData := itr.data[startOffset:endOffset]
- var h header
- h.Decode(entryData)
- // Header contains the length of key overlap and difference compared to the base key. If the key
- // before this one had the same or better key overlap, we can avoid copying that part into
- // itr.key. But, if the overlap was lesser, we could copy over just that portion.
- if h.overlap > itr.prevOverlap {
- itr.key = append(itr.key[:itr.prevOverlap], itr.baseKey[itr.prevOverlap:h.overlap]...)
- }
- itr.prevOverlap = h.overlap
- valueOff := headerSize + h.diff
- diffKey := entryData[headerSize:valueOff]
- itr.key = append(itr.key[:h.overlap], diffKey...)
- itr.val = entryData[valueOff:]
-}
-
-func (itr *blockIterator) Valid() bool {
- return itr != nil && itr.err == nil
-}
-
-func (itr *blockIterator) Error() error {
- return itr.err
-}
-
-func (itr *blockIterator) Close() {}
-
-var (
- origin = 0
- current = 1
-)
-
-// seek brings us to the first block element that is >= input key.
-func (itr *blockIterator) seek(key []byte, whence int) {
- itr.err = nil
- startIndex := 0 // This tells from which index we should start binary search.
-
- switch whence {
- case origin:
- // We don't need to do anything. startIndex is already at 0
- case current:
- startIndex = itr.idx
- }
-
- foundEntryIdx := sort.Search(len(itr.entryOffsets), func(idx int) bool {
- // If idx is less than start index then just return false.
- if idx < startIndex {
- return false
- }
- itr.setIdx(idx)
- return y.CompareKeys(itr.key, key) >= 0
- })
- itr.setIdx(foundEntryIdx)
-}
-
-// seekToFirst brings us to the first element.
-func (itr *blockIterator) seekToFirst() {
- itr.setIdx(0)
-}
-
-// seekToLast brings us to the last element.
-func (itr *blockIterator) seekToLast() {
- itr.setIdx(len(itr.entryOffsets) - 1)
-}
-
-func (itr *blockIterator) next() {
- itr.setIdx(itr.idx + 1)
-}
-
-func (itr *blockIterator) prev() {
- itr.setIdx(itr.idx - 1)
-}
-
-// Iterator is an iterator for a Table.
-type Iterator struct {
- t *Table
- bpos int
- bi blockIterator
- err error
-
- // Internally, Iterator is bidirectional. However, we only expose the
- // unidirectional functionality for now.
- reversed bool
-}
-
-// NewIterator returns a new iterator of the Table
-func (t *Table) NewIterator(reversed bool) *Iterator {
- t.IncrRef() // Important.
- ti := &Iterator{t: t, reversed: reversed}
- ti.next()
- return ti
-}
-
-// Close closes the iterator (and it must be called).
-func (itr *Iterator) Close() error {
- return itr.t.DecrRef()
-}
-
-func (itr *Iterator) reset() {
- itr.bpos = 0
- itr.err = nil
-}
-
-// Valid follows the y.Iterator interface
-func (itr *Iterator) Valid() bool {
- return itr.err == nil
-}
-
-func (itr *Iterator) seekToFirst() {
- numBlocks := itr.t.noOfBlocks
- if numBlocks == 0 {
- itr.err = io.EOF
- return
- }
- itr.bpos = 0
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi.setBlock(block)
- itr.bi.seekToFirst()
- itr.err = itr.bi.Error()
-}
-
-func (itr *Iterator) seekToLast() {
- numBlocks := itr.t.noOfBlocks
- if numBlocks == 0 {
- itr.err = io.EOF
- return
- }
- itr.bpos = numBlocks - 1
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi.setBlock(block)
- itr.bi.seekToLast()
- itr.err = itr.bi.Error()
-}
-
-func (itr *Iterator) seekHelper(blockIdx int, key []byte) {
- itr.bpos = blockIdx
- block, err := itr.t.block(blockIdx)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi.setBlock(block)
- itr.bi.seek(key, origin)
- itr.err = itr.bi.Error()
-}
-
-// seekFrom brings us to a key that is >= input key.
-func (itr *Iterator) seekFrom(key []byte, whence int) {
- itr.err = nil
- switch whence {
- case origin:
- itr.reset()
- case current:
- }
-
- idx := sort.Search(itr.t.noOfBlocks, func(idx int) bool {
- ko := itr.t.blockOffsets()[idx]
- return y.CompareKeys(ko.Key, key) > 0
- })
- if idx == 0 {
- // The smallest key in our table is already strictly > key. We can return that.
- // This is like a SeekToFirst.
- itr.seekHelper(0, key)
- return
- }
-
- // block[idx].smallest is > key.
- // Since idx>0, we know block[idx-1].smallest is <= key.
- // There are two cases.
- // 1) Everything in block[idx-1] is strictly < key. In this case, we should go to the first
- // element of block[idx].
- // 2) Some element in block[idx-1] is >= key. We should go to that element.
- itr.seekHelper(idx-1, key)
- if itr.err == io.EOF {
- // Case 1. Need to visit block[idx].
- if idx == itr.t.noOfBlocks {
- // If idx == len(itr.t.blockIndex), then input key is greater than ANY element of table.
- // There's nothing we can do. Valid() should return false as we seek to end of table.
- return
- }
- // Since block[idx].smallest is > key. This is essentially a block[idx].SeekToFirst.
- itr.seekHelper(idx, key)
- }
- // Case 2: No need to do anything. We already did the seek in block[idx-1].
-}
-
-// seek will reset iterator and seek to >= key.
-func (itr *Iterator) seek(key []byte) {
- itr.seekFrom(key, origin)
-}
-
-// seekForPrev will reset iterator and seek to <= key.
-func (itr *Iterator) seekForPrev(key []byte) {
- // TODO: Optimize this. We shouldn't have to take a Prev step.
- itr.seekFrom(key, origin)
- if !bytes.Equal(itr.Key(), key) {
- itr.prev()
- }
-}
-
-func (itr *Iterator) next() {
- itr.err = nil
-
- if itr.bpos >= itr.t.noOfBlocks {
- itr.err = io.EOF
- return
- }
-
- if len(itr.bi.data) == 0 {
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi.setBlock(block)
- itr.bi.seekToFirst()
- itr.err = itr.bi.Error()
- return
- }
-
- itr.bi.next()
- if !itr.bi.Valid() {
- itr.bpos++
- itr.bi.data = nil
- itr.next()
- return
- }
-}
-
-func (itr *Iterator) prev() {
- itr.err = nil
- if itr.bpos < 0 {
- itr.err = io.EOF
- return
- }
-
- if len(itr.bi.data) == 0 {
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi.setBlock(block)
- itr.bi.seekToLast()
- itr.err = itr.bi.Error()
- return
- }
-
- itr.bi.prev()
- if !itr.bi.Valid() {
- itr.bpos--
- itr.bi.data = nil
- itr.prev()
- return
- }
-}
-
-// Key follows the y.Iterator interface.
-// Returns the key with timestamp.
-func (itr *Iterator) Key() []byte {
- return itr.bi.key
-}
-
-// Value follows the y.Iterator interface
-func (itr *Iterator) Value() (ret y.ValueStruct) {
- ret.Decode(itr.bi.val)
- return
-}
-
-// ValueCopy copies the current value and returns it as decoded
-// ValueStruct.
-func (itr *Iterator) ValueCopy() (ret y.ValueStruct) {
- dst := y.Copy(itr.bi.val)
- ret.Decode(dst)
- return
-}
-
-// Next follows the y.Iterator interface
-func (itr *Iterator) Next() {
- if !itr.reversed {
- itr.next()
- } else {
- itr.prev()
- }
-}
-
-// Rewind follows the y.Iterator interface
-func (itr *Iterator) Rewind() {
- if !itr.reversed {
- itr.seekToFirst()
- } else {
- itr.seekToLast()
- }
-}
-
-// Seek follows the y.Iterator interface
-func (itr *Iterator) Seek(key []byte) {
- if !itr.reversed {
- itr.seek(key)
- } else {
- itr.seekForPrev(key)
- }
-}
-
-// ConcatIterator concatenates the sequences defined by several iterators. (It only works with
-// TableIterators, probably just because it's faster to not be so generic.)
-type ConcatIterator struct {
- idx int // Which iterator is active now.
- cur *Iterator
- iters []*Iterator // Corresponds to tables.
- tables []*Table // Disregarding reversed, this is in ascending order.
- reversed bool
-}
-
-// NewConcatIterator creates a new concatenated iterator
-func NewConcatIterator(tbls []*Table, reversed bool) *ConcatIterator {
- iters := make([]*Iterator, len(tbls))
- for i := 0; i < len(tbls); i++ {
- // Increment the reference count. Since, we're not creating the iterator right now.
- // Here, We'll hold the reference of the tables, till the lifecycle of the iterator.
- tbls[i].IncrRef()
-
- // Save cycles by not initializing the iterators until needed.
- // iters[i] = tbls[i].NewIterator(reversed)
- }
- return &ConcatIterator{
- reversed: reversed,
- iters: iters,
- tables: tbls,
- idx: -1, // Not really necessary because s.it.Valid()=false, but good to have.
- }
-}
-
-func (s *ConcatIterator) setIdx(idx int) {
- s.idx = idx
- if idx < 0 || idx >= len(s.iters) {
- s.cur = nil
- return
- }
- if s.iters[idx] == nil {
- s.iters[idx] = s.tables[idx].NewIterator(s.reversed)
- }
- s.cur = s.iters[s.idx]
-}
-
-// Rewind implements y.Interface
-func (s *ConcatIterator) Rewind() {
- if len(s.iters) == 0 {
- return
- }
- if !s.reversed {
- s.setIdx(0)
- } else {
- s.setIdx(len(s.iters) - 1)
- }
- s.cur.Rewind()
-}
-
-// Valid implements y.Interface
-func (s *ConcatIterator) Valid() bool {
- return s.cur != nil && s.cur.Valid()
-}
-
-// Key implements y.Interface
-func (s *ConcatIterator) Key() []byte {
- return s.cur.Key()
-}
-
-// Value implements y.Interface
-func (s *ConcatIterator) Value() y.ValueStruct {
- return s.cur.Value()
-}
-
-// Seek brings us to element >= key if reversed is false. Otherwise, <= key.
-func (s *ConcatIterator) Seek(key []byte) {
- var idx int
- if !s.reversed {
- idx = sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(s.tables[i].Biggest(), key) >= 0
- })
- } else {
- n := len(s.tables)
- idx = n - 1 - sort.Search(n, func(i int) bool {
- return y.CompareKeys(s.tables[n-1-i].Smallest(), key) <= 0
- })
- }
- if idx >= len(s.tables) || idx < 0 {
- s.setIdx(-1)
- return
- }
- // For reversed=false, we know s.tables[i-1].Biggest() < key. Thus, the
- // previous table cannot possibly contain key.
- s.setIdx(idx)
- s.cur.Seek(key)
-}
-
-// Next advances our concat iterator.
-func (s *ConcatIterator) Next() {
- s.cur.Next()
- if s.cur.Valid() {
- // Nothing to do. Just stay with the current table.
- return
- }
- for { // In case there are empty tables.
- if !s.reversed {
- s.setIdx(s.idx + 1)
- } else {
- s.setIdx(s.idx - 1)
- }
- if s.cur == nil {
- // End of list. Valid will become false.
- return
- }
- s.cur.Rewind()
- if s.cur.Valid() {
- break
- }
- }
-}
-
-// Close implements y.Interface.
-func (s *ConcatIterator) Close() error {
- for _, t := range s.tables {
- // DeReference the tables while closing the iterator.
- if err := t.DecrRef(); err != nil {
- return err
- }
- }
- for _, it := range s.iters {
- if it == nil {
- continue
- }
- if err := it.Close(); err != nil {
- return errors.Wrap(err, "ConcatIterator")
- }
- }
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/table/merge_iterator.go b/vendor/github.com/dgraph-io/badger/v2/table/merge_iterator.go
deleted file mode 100644
index e1809e02..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/table/merge_iterator.go
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package table
-
-import (
- "bytes"
-
- "github.com/dgraph-io/badger/v2/y"
- "github.com/pkg/errors"
-)
-
-// MergeIterator merges multiple iterators.
-// NOTE: MergeIterator owns the array of iterators and is responsible for closing them.
-type MergeIterator struct {
- left node
- right node
- small *node
-
- curKey []byte
- reverse bool
-}
-
-type node struct {
- valid bool
- key []byte
- iter y.Iterator
-
- // The two iterators are type asserted from `y.Iterator`, used to inline more function calls.
- // Calling functions on concrete types is much faster (about 25-30%) than calling the
- // interface's function.
- merge *MergeIterator
- concat *ConcatIterator
-}
-
-func (n *node) setIterator(iter y.Iterator) {
- n.iter = iter
- // It's okay if the type assertion below fails and n.merge/n.concat are set to nil.
- // We handle the nil values of merge and concat in all the methods.
- n.merge, _ = iter.(*MergeIterator)
- n.concat, _ = iter.(*ConcatIterator)
-}
-
-func (n *node) setKey() {
- switch {
- case n.merge != nil:
- n.valid = n.merge.small.valid
- if n.valid {
- n.key = n.merge.small.key
- }
- case n.concat != nil:
- n.valid = n.concat.Valid()
- if n.valid {
- n.key = n.concat.Key()
- }
- default:
- n.valid = n.iter.Valid()
- if n.valid {
- n.key = n.iter.Key()
- }
- }
-}
-
-func (n *node) next() {
- switch {
- case n.merge != nil:
- n.merge.Next()
- case n.concat != nil:
- n.concat.Next()
- default:
- n.iter.Next()
- }
- n.setKey()
-}
-
-func (n *node) rewind() {
- n.iter.Rewind()
- n.setKey()
-}
-
-func (n *node) seek(key []byte) {
- n.iter.Seek(key)
- n.setKey()
-}
-
-func (mi *MergeIterator) fix() {
- if !mi.bigger().valid {
- return
- }
- if !mi.small.valid {
- mi.swapSmall()
- return
- }
- cmp := y.CompareKeys(mi.small.key, mi.bigger().key)
- switch {
- case cmp == 0: // Both the keys are equal.
- // In case of same keys, move the right iterator ahead.
- mi.right.next()
- if &mi.right == mi.small {
- mi.swapSmall()
- }
- return
- case cmp < 0: // Small is less than bigger().
- if mi.reverse {
- mi.swapSmall()
- } else {
- // we don't need to do anything. Small already points to the smallest.
- }
- return
- default: // bigger() is less than small.
- if mi.reverse {
- // Do nothing since we're iterating in reverse. Small currently points to
- // the bigger key and that's okay in reverse iteration.
- } else {
- mi.swapSmall()
- }
- return
- }
-}
-
-func (mi *MergeIterator) bigger() *node {
- if mi.small == &mi.left {
- return &mi.right
- }
- return &mi.left
-}
-
-func (mi *MergeIterator) swapSmall() {
- if mi.small == &mi.left {
- mi.small = &mi.right
- return
- }
- if mi.small == &mi.right {
- mi.small = &mi.left
- return
- }
-}
-
-// Next returns the next element. If it is the same as the current key, ignore it.
-func (mi *MergeIterator) Next() {
- for mi.Valid() {
- if !bytes.Equal(mi.small.key, mi.curKey) {
- break
- }
- mi.small.next()
- mi.fix()
- }
- mi.setCurrent()
-}
-
-func (mi *MergeIterator) setCurrent() {
- mi.curKey = append(mi.curKey[:0], mi.small.key...)
-}
-
-// Rewind seeks to first element (or last element for reverse iterator).
-func (mi *MergeIterator) Rewind() {
- mi.left.rewind()
- mi.right.rewind()
- mi.fix()
- mi.setCurrent()
-}
-
-// Seek brings us to element with key >= given key.
-func (mi *MergeIterator) Seek(key []byte) {
- mi.left.seek(key)
- mi.right.seek(key)
- mi.fix()
- mi.setCurrent()
-}
-
-// Valid returns whether the MergeIterator is at a valid element.
-func (mi *MergeIterator) Valid() bool {
- return mi.small.valid
-}
-
-// Key returns the key associated with the current iterator.
-func (mi *MergeIterator) Key() []byte {
- return mi.small.key
-}
-
-// Value returns the value associated with the iterator.
-func (mi *MergeIterator) Value() y.ValueStruct {
- return mi.small.iter.Value()
-}
-
-// Close implements y.Iterator.
-func (mi *MergeIterator) Close() error {
- err1 := mi.left.iter.Close()
- err2 := mi.right.iter.Close()
- if err1 != nil {
- return errors.Wrap(err1, "MergeIterator")
- }
- return errors.Wrap(err2, "MergeIterator")
-}
-
-// NewMergeIterator creates a merge iterator.
-func NewMergeIterator(iters []y.Iterator, reverse bool) y.Iterator {
- switch len(iters) {
- case 0:
- return nil
- case 1:
- return iters[0]
- case 2:
- mi := &MergeIterator{
- reverse: reverse,
- }
- mi.left.setIterator(iters[0])
- mi.right.setIterator(iters[1])
- // Assign left iterator randomly. This will be fixed when user calls rewind/seek.
- mi.small = &mi.left
- return mi
- }
- mid := len(iters) / 2
- return NewMergeIterator(
- []y.Iterator{
- NewMergeIterator(iters[:mid], reverse),
- NewMergeIterator(iters[mid:], reverse),
- }, reverse)
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/table/table.go b/vendor/github.com/dgraph-io/badger/v2/table/table.go
deleted file mode 100644
index f8841b63..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/table/table.go
+++ /dev/null
@@ -1,716 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package table
-
-import (
- "crypto/aes"
- "encoding/binary"
- "fmt"
- "io"
- "math"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "unsafe"
-
- "github.com/golang/protobuf/proto"
- "github.com/golang/snappy"
- "github.com/pkg/errors"
-
- "github.com/dgraph-io/badger/v2/options"
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/y"
- "github.com/dgraph-io/ristretto"
- "github.com/dgraph-io/ristretto/z"
-)
-
-const fileSuffix = ".sst"
-const intSize = int(unsafe.Sizeof(int(0)))
-
-// 1 word = 8 bytes
-// sizeOfOffsetStruct is the size of pb.BlockOffset
-const sizeOfOffsetStruct int64 = 3*8 + // key array take 3 words
- 1*8 + // offset and len takes 1 word
- 3*8 + // XXX_unrecognized array takes 3 word.
- 1*8 // so far 7 words, in order to round the slab we're adding one more word.
-
-// Options contains configurable options for Table/Builder.
-type Options struct {
- // Options for Opening/Building Table.
-
- // ChkMode is the checksum verification mode for Table.
- ChkMode options.ChecksumVerificationMode
-
- // LoadingMode is the mode to be used for loading Table.
- LoadingMode options.FileLoadingMode
-
- // Options for Table builder.
-
- // BloomFalsePositive is the false positive probabiltiy of bloom filter.
- BloomFalsePositive float64
-
- // BlockSize is the size of each block inside SSTable in bytes.
- BlockSize int
-
- // DataKey is the key used to decrypt the encrypted text.
- DataKey *pb.DataKey
-
- // Compression indicates the compression algorithm used for block compression.
- Compression options.CompressionType
-
- BlockCache *ristretto.Cache
- IndexCache *ristretto.Cache
-
- // ZSTDCompressionLevel is the ZSTD compression level used for compressing blocks.
- ZSTDCompressionLevel int
-
- // When LoadBloomsOnOpen is set, bloom filters will be loaded while opening
- // the table. Otherwise, they will be loaded lazily when they're accessed.
- LoadBloomsOnOpen bool
-}
-
-// TableInterface is useful for testing.
-type TableInterface interface {
- Smallest() []byte
- Biggest() []byte
- DoesNotHave(hash uint64) bool
-}
-
-// Table represents a loaded table file with the info we have about it.
-type Table struct {
- sync.Mutex
-
- fd *os.File // Own fd.
- tableSize int // Initialized in OpenTable, using fd.Stat().
- bfLock sync.Mutex
-
- blockOffset []*pb.BlockOffset
- ref int32 // For file garbage collection. Atomic.
- bf *z.Bloom // Nil if index cache in enabled.
-
- mmap []byte // Memory mapped.
-
- // The following are initialized once and const.
- smallest, biggest []byte // Smallest and largest keys (with timestamps).
- id uint64 // file id, part of filename
-
- Checksum []byte
- // Stores the total size of key-values stored in this table (including the size on vlog).
- estimatedSize uint64
- indexStart int
- indexLen int
-
- IsInmemory bool // Set to true if the table is on level 0 and opened in memory.
- opt *Options
-
- noOfBlocks int // Total number of blocks.
-}
-
-// CompressionType returns the compression algorithm used for block compression.
-func (t *Table) CompressionType() options.CompressionType {
- return t.opt.Compression
-}
-
-// IncrRef increments the refcount (having to do with whether the file should be deleted)
-func (t *Table) IncrRef() {
- atomic.AddInt32(&t.ref, 1)
-}
-
-// DecrRef decrements the refcount and possibly deletes the table
-func (t *Table) DecrRef() error {
- newRef := atomic.AddInt32(&t.ref, -1)
- if newRef == 0 {
- // We can safely delete this file, because for all the current files, we always have
- // at least one reference pointing to them.
-
- // It's necessary to delete windows files.
- if t.opt.LoadingMode == options.MemoryMap {
- if err := y.Munmap(t.mmap); err != nil {
- return err
- }
- t.mmap = nil
- }
- // fd can be nil if the table belongs to L0 and it is opened in memory. See
- // OpenTableInMemory method.
- if t.fd == nil {
- return nil
- }
- if err := t.fd.Truncate(0); err != nil {
- // This is very important to let the FS know that the file is deleted.
- return err
- }
- filename := t.fd.Name()
- if err := t.fd.Close(); err != nil {
- return err
- }
- if err := os.Remove(filename); err != nil {
- return err
- }
- // Delete all blocks from the cache.
- for i := 0; i < t.noOfBlocks; i++ {
- t.opt.BlockCache.Del(t.blockCacheKey(i))
- }
- // Delete bloom filter and indices from the cache.
- t.opt.IndexCache.Del(t.blockOffsetsCacheKey())
- t.opt.IndexCache.Del(t.bfCacheKey())
- }
- return nil
-}
-
-type block struct {
- offset int
- data []byte
- checksum []byte
- entriesIndexStart int // start index of entryOffsets list
- entryOffsets []uint32
- chkLen int // checksum length
-}
-
-func (b *block) size() int64 {
- return int64(3*intSize /* Size of the offset, entriesIndexStart and chkLen */ +
- cap(b.data) + cap(b.checksum) + cap(b.entryOffsets)*4)
-}
-
-func (b block) verifyCheckSum() error {
- cs := &pb.Checksum{}
- if err := proto.Unmarshal(b.checksum, cs); err != nil {
- return y.Wrapf(err, "unable to unmarshal checksum for block")
- }
- return y.VerifyChecksum(b.data, cs)
-}
-
-// OpenTable assumes file has only one table and opens it. Takes ownership of fd upon function
-// entry. Returns a table with one reference count on it (decrementing which may delete the file!
-// -- consider t.Close() instead). The fd has to writeable because we call Truncate on it before
-// deleting. Checksum for all blocks of table is verified based on value of chkMode.
-func OpenTable(fd *os.File, opts Options) (*Table, error) {
- fileInfo, err := fd.Stat()
- if err != nil {
- // It's OK to ignore fd.Close() errs in this function because we have only read
- // from the file.
- _ = fd.Close()
- return nil, y.Wrap(err)
- }
-
- filename := fileInfo.Name()
- id, ok := ParseFileID(filename)
- if !ok {
- _ = fd.Close()
- return nil, errors.Errorf("Invalid filename: %s", filename)
- }
- t := &Table{
- fd: fd,
- ref: 1, // Caller is given one reference.
- id: id,
- opt: &opts,
- IsInmemory: false,
- }
-
- t.tableSize = int(fileInfo.Size())
-
- switch opts.LoadingMode {
- case options.LoadToRAM:
- if _, err := t.fd.Seek(0, io.SeekStart); err != nil {
- return nil, err
- }
- t.mmap = make([]byte, t.tableSize)
- n, err := t.fd.Read(t.mmap)
- if err != nil {
- // It's OK to ignore fd.Close() error because we have only read from the file.
- _ = t.fd.Close()
- return nil, y.Wrapf(err, "Failed to load file into RAM")
- }
- if n != t.tableSize {
- return nil, errors.Errorf("Failed to read all bytes from the file."+
- "Bytes in file: %d Bytes actually Read: %d", t.tableSize, n)
- }
- case options.MemoryMap:
- t.mmap, err = y.Mmap(fd, false, fileInfo.Size())
- if err != nil {
- _ = fd.Close()
- return nil, y.Wrapf(err, "Unable to map file: %q", fileInfo.Name())
- }
- case options.FileIO:
- t.mmap = nil
- default:
- panic(fmt.Sprintf("Invalid loading mode: %v", opts.LoadingMode))
- }
-
- if err := t.initBiggestAndSmallest(); err != nil {
- return nil, errors.Wrapf(err, "failed to initialize table")
- }
-
- if opts.ChkMode == options.OnTableRead || opts.ChkMode == options.OnTableAndBlockRead {
- if err := t.VerifyChecksum(); err != nil {
- _ = fd.Close()
- return nil, errors.Wrapf(err, "failed to verify checksum")
- }
- }
-
- return t, nil
-}
-
-// OpenInMemoryTable is similar to OpenTable but it opens a new table from the provided data.
-// OpenInMemoryTable is used for L0 tables.
-func OpenInMemoryTable(data []byte, id uint64, opt *Options) (*Table, error) {
- opt.LoadingMode = options.LoadToRAM
- t := &Table{
- ref: 1, // Caller is given one reference.
- opt: opt,
- mmap: data,
- tableSize: len(data),
- IsInmemory: true,
- id: id, // It is important that each table gets a unique ID.
- }
-
- if err := t.initBiggestAndSmallest(); err != nil {
- return nil, err
- }
- return t, nil
-}
-
-func (t *Table) initBiggestAndSmallest() error {
- var err error
- var ko *pb.BlockOffset
- if ko, err = t.initIndex(); err != nil {
- return errors.Wrapf(err, "failed to read index.")
- }
-
- t.smallest = ko.Key
-
- it2 := t.NewIterator(true)
- defer it2.Close()
- it2.Rewind()
- if !it2.Valid() {
- return errors.Wrapf(it2.err, "failed to initialize biggest for table %s", t.Filename())
- }
- t.biggest = it2.Key()
- return nil
-}
-
-// Close closes the open table. (Releases resources back to the OS.)
-func (t *Table) Close() error {
- if t.opt.LoadingMode == options.MemoryMap {
- if err := y.Munmap(t.mmap); err != nil {
- return err
- }
- t.mmap = nil
- }
- if t.fd == nil {
- return nil
- }
- return t.fd.Close()
-}
-
-func (t *Table) read(off, sz int) ([]byte, error) {
- if len(t.mmap) > 0 {
- if len(t.mmap[off:]) < sz {
- return nil, y.ErrEOF
- }
- return t.mmap[off : off+sz], nil
- }
-
- res := make([]byte, sz)
- nbr, err := t.fd.ReadAt(res, int64(off))
- y.NumReads.Add(1)
- y.NumBytesRead.Add(int64(nbr))
- return res, err
-}
-
-func (t *Table) readNoFail(off, sz int) []byte {
- res, err := t.read(off, sz)
- y.Check(err)
- return res
-}
-
-// initIndex reads the index and populate the necessary table fields and returns
-// first block offset
-func (t *Table) initIndex() (*pb.BlockOffset, error) {
- readPos := t.tableSize
-
- // Read checksum len from the last 4 bytes.
- readPos -= 4
- buf := t.readNoFail(readPos, 4)
- checksumLen := int(y.BytesToU32(buf))
- if checksumLen < 0 {
- return nil, errors.New("checksum length less than zero. Data corrupted")
- }
-
- // Read checksum.
- expectedChk := &pb.Checksum{}
- readPos -= checksumLen
- buf = t.readNoFail(readPos, checksumLen)
- if err := proto.Unmarshal(buf, expectedChk); err != nil {
- return nil, err
- }
-
- // Read index size from the footer.
- readPos -= 4
- buf = t.readNoFail(readPos, 4)
- t.indexLen = int(y.BytesToU32(buf))
-
- // Read index.
- readPos -= t.indexLen
- t.indexStart = readPos
- data := t.readNoFail(readPos, t.indexLen)
-
- if err := y.VerifyChecksum(data, expectedChk); err != nil {
- return nil, y.Wrapf(err, "failed to verify checksum for table: %s", t.Filename())
- }
-
- index, err := t.readTableIndex()
- if err != nil {
- return nil, err
- }
-
- if t.opt.Compression == options.None {
- t.estimatedSize = index.EstimatedSize
- } else {
- // Due to compression the real size on disk is much
- // smaller than what we estimate from index.EstimatedSize.
- t.estimatedSize = uint64(t.tableSize)
- }
- t.noOfBlocks = len(index.Offsets)
-
- // No cache
- if t.opt.IndexCache == nil {
- if t.opt.LoadBloomsOnOpen {
- bf, err := z.JSONUnmarshal(index.BloomFilter)
- if err != nil {
- return nil,
- errors.Wrapf(err, "failed to unmarshal bloomfilter for table:%d", t.id)
- }
- // Keep blooms in memory.
- t.bfLock.Lock()
- t.bf = bf
- t.bfLock.Unlock()
- }
- // Keep block offsets in memory since there is no cache.
- t.blockOffset = index.Offsets
- }
-
- // We don't need to put anything in the indexCache here. Table.Open will
- // create an iterator and that iterator will push the indices in cache.
- return index.Offsets[0], nil
-}
-
-// blockOffsets returns block offsets of this table.
-func (t *Table) blockOffsets() []*pb.BlockOffset {
- if t.opt.IndexCache == nil {
- return t.blockOffset
- }
-
- if val, ok := t.opt.IndexCache.Get(t.blockOffsetsCacheKey()); ok && val != nil {
- return val.([]*pb.BlockOffset)
- }
-
- index, err := t.readTableIndex()
- y.Check(err)
- t.opt.IndexCache.Set(
- t.blockOffsetsCacheKey(),
- index.Offsets,
- calculateOffsetsSize(index.Offsets))
-
- return index.Offsets
-}
-
-// calculateOffsetsSize returns the size of *pb.BlockOffset array
-func calculateOffsetsSize(offsets []*pb.BlockOffset) int64 {
- totalSize := sizeOfOffsetStruct * int64(len(offsets))
-
- for _, ko := range offsets {
- // add key size.
- totalSize += int64(cap(ko.Key))
- // add XXX_unrecognized size.
- totalSize += int64(cap(ko.XXX_unrecognized))
- }
- // Add three words for array size.
- return totalSize + 3*8
-}
-
-func (t *Table) block(idx int) (*block, error) {
- y.AssertTruef(idx >= 0, "idx=%d", idx)
- if idx >= t.noOfBlocks {
- return nil, errors.New("block out of index")
- }
- if t.opt.BlockCache != nil {
- key := t.blockCacheKey(idx)
- blk, ok := t.opt.BlockCache.Get(key)
- if ok && blk != nil {
- return blk.(*block), nil
- }
- }
-
- // Read the block index if it's nil
- ko := t.blockOffsets()[idx]
- blk := &block{
- offset: int(ko.Offset),
- }
- var err error
- if blk.data, err = t.read(blk.offset, int(ko.Len)); err != nil {
- return nil, errors.Wrapf(err,
- "failed to read from file: %s at offset: %d, len: %d", t.fd.Name(), blk.offset, ko.Len)
- }
-
- if t.shouldDecrypt() {
- // Decrypt the block if it is encrypted.
- if blk.data, err = t.decrypt(blk.data); err != nil {
- return nil, err
- }
- }
-
- blk.data, err = t.decompressData(blk.data)
- if err != nil {
- return nil, errors.Wrapf(err,
- "failed to decode compressed data in file: %s at offset: %d, len: %d",
- t.fd.Name(), blk.offset, ko.Len)
- }
-
- // Read meta data related to block.
- readPos := len(blk.data) - 4 // First read checksum length.
- blk.chkLen = int(y.BytesToU32(blk.data[readPos : readPos+4]))
-
- // Checksum length greater than block size could happen if the table was compressed and
- // it was opened with an incorrect compression algorithm (or the data was corrupted).
- if blk.chkLen > len(blk.data) {
- return nil, errors.New("invalid checksum length. Either the data is" +
- "corrupted or the table options are incorrectly set")
- }
-
- // Read checksum and store it
- readPos -= blk.chkLen
- blk.checksum = blk.data[readPos : readPos+blk.chkLen]
- // Move back and read numEntries in the block.
- readPos -= 4
- numEntries := int(y.BytesToU32(blk.data[readPos : readPos+4]))
- entriesIndexStart := readPos - (numEntries * 4)
- entriesIndexEnd := entriesIndexStart + numEntries*4
-
- blk.entryOffsets = y.BytesToU32Slice(blk.data[entriesIndexStart:entriesIndexEnd])
-
- blk.entriesIndexStart = entriesIndexStart
-
- // Drop checksum and checksum length.
- // The checksum is calculated for actual data + entry index + index length
- blk.data = blk.data[:readPos+4]
-
- // Verify checksum on if checksum verification mode is OnRead on OnStartAndRead.
- if t.opt.ChkMode == options.OnBlockRead || t.opt.ChkMode == options.OnTableAndBlockRead {
- if err = blk.verifyCheckSum(); err != nil {
- return nil, err
- }
- }
- if t.opt.BlockCache != nil {
- key := t.blockCacheKey(idx)
- t.opt.BlockCache.Set(key, blk, blk.size())
- }
- return blk, nil
-}
-
-// bfCacheKey returns the cache key for bloom filter. Bloom filters are stored in index cache.
-func (t *Table) bfCacheKey() []byte {
- y.AssertTrue(t.id < math.MaxUint32)
- buf := make([]byte, 6)
- // Without the "bf" prefix, we will have conflict with the blockCacheKey.
- buf[0] = 'b'
- buf[1] = 'f'
-
- binary.BigEndian.PutUint32(buf[2:], uint32(t.id))
- return buf
-}
-
-// blockCacheKey is used to store blocks in the block cache.
-func (t *Table) blockCacheKey(idx int) []byte {
- y.AssertTrue(t.id < math.MaxUint32)
- y.AssertTrue(uint32(idx) < math.MaxUint32)
-
- buf := make([]byte, 8)
- // Assume t.ID does not overflow uint32.
- binary.BigEndian.PutUint32(buf[:4], uint32(t.ID()))
- binary.BigEndian.PutUint32(buf[4:], uint32(idx))
- return buf
-}
-
-// blockOffsetsCacheKey returns the cache key for block offsets. blockOffsets
-// are stored in the index cache.
-func (t *Table) blockOffsetsCacheKey() uint64 {
- return t.id
-}
-
-// EstimatedSize returns the total size of key-values stored in this table (including the
-// disk space occupied on the value log).
-func (t *Table) EstimatedSize() uint64 { return t.estimatedSize }
-
-// Size is its file size in bytes
-func (t *Table) Size() int64 { return int64(t.tableSize) }
-
-// Smallest is its smallest key, or nil if there are none
-func (t *Table) Smallest() []byte { return t.smallest }
-
-// Biggest is its biggest key, or nil if there are none
-func (t *Table) Biggest() []byte { return t.biggest }
-
-// Filename is NOT the file name. Just kidding, it is.
-func (t *Table) Filename() string { return t.fd.Name() }
-
-// ID is the table's ID number (used to make the file name).
-func (t *Table) ID() uint64 { return t.id }
-
-// DoesNotHave returns true if (but not "only if") the table does not have the key hash.
-// It does a bloom filter lookup.
-func (t *Table) DoesNotHave(hash uint64) bool {
- // Return fast if the cache is absent.
- if t.opt.IndexCache == nil {
- t.bfLock.Lock()
- if t.bf == nil {
- y.AssertTrue(!t.opt.LoadBloomsOnOpen)
- // Load bloomfilter into memory since the cache is absent.
- t.bf, _ = t.readBloomFilter()
- }
- t.bfLock.Unlock()
- return !t.bf.Has(hash)
- }
-
- // Check if the bloom filter exists in the cache.
- if bf, ok := t.opt.IndexCache.Get(t.bfCacheKey()); bf != nil && ok {
- return !bf.(*z.Bloom).Has(hash)
- }
-
- bf, sz := t.readBloomFilter()
- t.opt.IndexCache.Set(t.bfCacheKey(), bf, int64(sz))
- return !bf.Has(hash)
-}
-
-// readBloomFilter reads the bloom filter from the SST and returns its length
-// along with the bloom filter.
-func (t *Table) readBloomFilter() (*z.Bloom, int) {
- // Read bloom filter from the SST.
- index, err := t.readTableIndex()
- y.Check(err)
-
- bf, err := z.JSONUnmarshal(index.BloomFilter)
- y.Check(err)
- return bf, len(index.BloomFilter)
-}
-
-// readTableIndex reads table index from the sst and returns its pb format.
-func (t *Table) readTableIndex() (*pb.TableIndex, error) {
- data := t.readNoFail(t.indexStart, t.indexLen)
- index := pb.TableIndex{}
- var err error
- // Decrypt the table index if it is encrypted.
- if t.shouldDecrypt() {
- if data, err = t.decrypt(data); err != nil {
- return nil, y.Wrapf(err,
- "Error while decrypting table index for the table %d in readTableIndex", t.id)
- }
- }
- y.Check(proto.Unmarshal(data, &index))
- return &index, nil
-}
-
-// VerifyChecksum verifies checksum for all blocks of table. This function is called by
-// OpenTable() function. This function is also called inside levelsController.VerifyChecksum().
-func (t *Table) VerifyChecksum() error {
- for i, os := range t.blockOffsets() {
- b, err := t.block(i)
- if err != nil {
- return y.Wrapf(err, "checksum validation failed for table: %s, block: %d, offset:%d",
- t.Filename(), i, os.Offset)
- }
-
- // OnBlockRead or OnTableAndBlockRead, we don't need to call verify checksum
- // on block, verification would be done while reading block itself.
- if !(t.opt.ChkMode == options.OnBlockRead || t.opt.ChkMode == options.OnTableAndBlockRead) {
- if err = b.verifyCheckSum(); err != nil {
- return y.Wrapf(err,
- "checksum validation failed for table: %s, block: %d, offset:%d",
- t.Filename(), i, os.Offset)
- }
- }
- }
-
- return nil
-}
-
-// shouldDecrypt tells whether to decrypt or not. We decrypt only if the datakey exist
-// for the table.
-func (t *Table) shouldDecrypt() bool {
- return t.opt.DataKey != nil
-}
-
-// KeyID returns data key id.
-func (t *Table) KeyID() uint64 {
- if t.opt.DataKey != nil {
- return t.opt.DataKey.KeyId
- }
- // By default it's 0, if it is plain text.
- return 0
-}
-
-// decrypt decrypts the given data. It should be called only after checking shouldDecrypt.
-func (t *Table) decrypt(data []byte) ([]byte, error) {
- // Last BlockSize bytes of the data is the IV.
- iv := data[len(data)-aes.BlockSize:]
- // Rest all bytes are data.
- data = data[:len(data)-aes.BlockSize]
- return y.XORBlock(data, t.opt.DataKey.Data, iv)
-}
-
-// ParseFileID reads the file id out of a filename.
-func ParseFileID(name string) (uint64, bool) {
- name = path.Base(name)
- if !strings.HasSuffix(name, fileSuffix) {
- return 0, false
- }
- // suffix := name[len(fileSuffix):]
- name = strings.TrimSuffix(name, fileSuffix)
- id, err := strconv.Atoi(name)
- if err != nil {
- return 0, false
- }
- y.AssertTrue(id >= 0)
- return uint64(id), true
-}
-
-// IDToFilename does the inverse of ParseFileID
-func IDToFilename(id uint64) string {
- return fmt.Sprintf("%06d", id) + fileSuffix
-}
-
-// NewFilename should be named TableFilepath -- it combines the dir with the ID to make a table
-// filepath.
-func NewFilename(id uint64, dir string) string {
- return filepath.Join(dir, IDToFilename(id))
-}
-
-// decompressData decompresses the given data.
-func (t *Table) decompressData(data []byte) ([]byte, error) {
- switch t.opt.Compression {
- case options.None:
- return data, nil
- case options.Snappy:
- return snappy.Decode(nil, data)
- case options.ZSTD:
- return y.ZSTDDecompress(nil, data)
- }
- return nil, errors.New("Unsupported compression type")
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/test.sh b/vendor/github.com/dgraph-io/badger/v2/test.sh
deleted file mode 100644
index 06c9396c..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/test.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-set -e
-
-go version
-
-packages=$(go list ./... | grep github.com/dgraph-io/badger/v2/)
-
-if [[ ! -z "$TEAMCITY_VERSION" ]]; then
- export GOFLAGS="-json"
-fi
-
-# Ensure that we can compile the binary.
-pushd badger
-go build -v .
-popd
-
-# Run the memory intensive tests first.
-go test -v -run='TestBigKeyValuePairs$' --manual=true
-go test -v -run='TestPushValueLogLimit' --manual=true
-
-# Run the special Truncate test.
-rm -rf p
-go test -v -run='TestTruncateVlogNoClose$' --manual=true
-truncate --size=4096 p/000000.vlog
-go test -v -run='TestTruncateVlogNoClose2$' --manual=true
-go test -v -run='TestTruncateVlogNoClose3$' --manual=true
-rm -rf p
-
-# Then the normal tests.
-echo
-echo "==> Starting test for table, skl and y package"
-go test -v -race github.com/dgraph-io/badger/v2/skl
-# Run test for all package except the top level package. The top level package support the
-# `vlog_mmap` flag which rest of the packages don't support.
-go test -v -race $packages
-
-echo
-echo "==> Starting tests with value log mmapped..."
-# Run top level package tests with mmap flag.
-go test -timeout=25m -v -race github.com/dgraph-io/badger/v2 --vlog_mmap=true
-
-echo
-echo "==> Starting tests with value log not mmapped..."
-go test -timeout=25m -v -race github.com/dgraph-io/badger/v2 --vlog_mmap=false
-
diff --git a/vendor/github.com/dgraph-io/badger/v2/trie/trie.go b/vendor/github.com/dgraph-io/badger/v2/trie/trie.go
deleted file mode 100644
index 98e4a9dc..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/trie/trie.go
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package trie
-
-type node struct {
- children map[byte]*node
- ids []uint64
-}
-
-func newNode() *node {
- return &node{
- children: make(map[byte]*node),
- ids: []uint64{},
- }
-}
-
-// Trie datastructure.
-type Trie struct {
- root *node
-}
-
-// NewTrie returns Trie.
-func NewTrie() *Trie {
- return &Trie{
- root: newNode(),
- }
-}
-
-// Add adds the id in the trie for the given prefix path.
-func (t *Trie) Add(prefix []byte, id uint64) {
- node := t.root
- for _, val := range prefix {
- child, ok := node.children[val]
- if !ok {
- child = newNode()
- node.children[val] = child
- }
- node = child
- }
- // We only need to add the id to the last node of the given prefix.
- node.ids = append(node.ids, id)
-}
-
-// Get returns prefix matched ids for the given key.
-func (t *Trie) Get(key []byte) map[uint64]struct{} {
- out := make(map[uint64]struct{})
- node := t.root
- // If root has ids that means we have subscribers for "nil/[]byte{}"
- // prefix. Add them to the list.
- if len(node.ids) > 0 {
- for _, i := range node.ids {
- out[i] = struct{}{}
- }
- }
- for _, val := range key {
- child, ok := node.children[val]
- if !ok {
- break
- }
- // We need ids of the all the node in the matching key path.
- for _, id := range child.ids {
- out[id] = struct{}{}
- }
- node = child
- }
- return out
-}
-
-// Delete will delete the id if the id exist in the given index path.
-func (t *Trie) Delete(index []byte, id uint64) {
- node := t.root
- for _, val := range index {
- child, ok := node.children[val]
- if !ok {
- return
- }
- node = child
- }
- // We're just removing the id not the hanging path.
- out := node.ids[:0]
- for _, val := range node.ids {
- if val != id {
- out = append(out, val)
- }
- }
- for i := len(out); i < len(node.ids); i++ {
- node.ids[i] = 0 // garbage collecting
- }
- node.ids = out
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/txn.go b/vendor/github.com/dgraph-io/badger/v2/txn.go
deleted file mode 100644
index 8fc5381e..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/txn.go
+++ /dev/null
@@ -1,823 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "context"
- "encoding/hex"
- "math"
- "sort"
- "strconv"
- "sync"
- "sync/atomic"
-
- "github.com/dgraph-io/badger/v2/y"
- "github.com/dgraph-io/ristretto/z"
- "github.com/pkg/errors"
-)
-
-type oracle struct {
- isManaged bool // Does not change value, so no locking required.
- detectConflicts bool // Determines if the txns should be checked for conflicts.
-
- sync.Mutex // For nextTxnTs and commits.
- // writeChLock lock is for ensuring that transactions go to the write
- // channel in the same order as their commit timestamps.
- writeChLock sync.Mutex
- nextTxnTs uint64
-
- // Used to block NewTransaction, so all previous commits are visible to a new read.
- txnMark *y.WaterMark
-
- // Either of these is used to determine which versions can be permanently
- // discarded during compaction.
- discardTs uint64 // Used by ManagedDB.
- readMark *y.WaterMark // Used by DB.
-
- // committedTxns contains all committed writes (contains fingerprints
- // of keys written and their latest commit counter).
- committedTxns []committedTxn
- lastCleanupTs uint64
-
- // closer is used to stop watermarks.
- closer *y.Closer
-}
-
-type committedTxn struct {
- ts uint64
- // ConflictKeys Keeps track of the entries written at timestamp ts.
- conflictKeys map[uint64]struct{}
-}
-
-func newOracle(opt Options) *oracle {
- orc := &oracle{
- isManaged: opt.managedTxns,
- detectConflicts: opt.DetectConflicts,
- // We're not initializing nextTxnTs and readOnlyTs. It would be done after replay in Open.
- //
- // WaterMarks must be 64-bit aligned for atomic package, hence we must use pointers here.
- // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- readMark: &y.WaterMark{Name: "badger.PendingReads"},
- txnMark: &y.WaterMark{Name: "badger.TxnTimestamp"},
- closer: y.NewCloser(2),
- }
- orc.readMark.Init(orc.closer)
- orc.txnMark.Init(orc.closer)
- return orc
-}
-
-func (o *oracle) Stop() {
- o.closer.SignalAndWait()
-}
-
-func (o *oracle) readTs() uint64 {
- if o.isManaged {
- panic("ReadTs should not be retrieved for managed DB")
- }
-
- var readTs uint64
- o.Lock()
- readTs = o.nextTxnTs - 1
- o.readMark.Begin(readTs)
- o.Unlock()
-
- // Wait for all txns which have no conflicts, have been assigned a commit
- // timestamp and are going through the write to value log and LSM tree
- // process. Not waiting here could mean that some txns which have been
- // committed would not be read.
- y.Check(o.txnMark.WaitForMark(context.Background(), readTs))
- return readTs
-}
-
-func (o *oracle) nextTs() uint64 {
- o.Lock()
- defer o.Unlock()
- return o.nextTxnTs
-}
-
-func (o *oracle) incrementNextTs() {
- o.Lock()
- defer o.Unlock()
- o.nextTxnTs++
-}
-
-// Any deleted or invalid versions at or below ts would be discarded during
-// compaction to reclaim disk space in LSM tree and thence value log.
-func (o *oracle) setDiscardTs(ts uint64) {
- o.Lock()
- defer o.Unlock()
- o.discardTs = ts
- o.cleanupCommittedTransactions()
-}
-
-func (o *oracle) discardAtOrBelow() uint64 {
- if o.isManaged {
- o.Lock()
- defer o.Unlock()
- return o.discardTs
- }
- return o.readMark.DoneUntil()
-}
-
-// hasConflict must be called while having a lock.
-func (o *oracle) hasConflict(txn *Txn) bool {
- if len(txn.reads) == 0 {
- return false
- }
- for _, committedTxn := range o.committedTxns {
- // If the committedTxn.ts is less than txn.readTs that implies that the
- // committedTxn finished before the current transaction started.
- // We don't need to check for conflict in that case.
- // This change assumes linearizability. Lack of linearizability could
- // cause the read ts of a new txn to be lower than the commit ts of
- // a txn before it (@mrjn).
- if committedTxn.ts <= txn.readTs {
- continue
- }
-
- for _, ro := range txn.reads {
- if _, has := committedTxn.conflictKeys[ro]; has {
- return true
- }
- }
- }
-
- return false
-}
-
-func (o *oracle) newCommitTs(txn *Txn) uint64 {
- o.Lock()
- defer o.Unlock()
-
- if o.hasConflict(txn) {
- return 0
- }
-
- var ts uint64
- if !o.isManaged {
- o.doneRead(txn)
- o.cleanupCommittedTransactions()
-
- // This is the general case, when user doesn't specify the read and commit ts.
- ts = o.nextTxnTs
- o.nextTxnTs++
- o.txnMark.Begin(ts)
-
- } else {
- // If commitTs is set, use it instead.
- ts = txn.commitTs
- }
-
- y.AssertTrue(ts >= o.lastCleanupTs)
-
- if o.detectConflicts {
- // We should ensure that txns are not added to o.committedTxns slice when
- // conflict detection is disabled otherwise this slice would keep growing.
- o.committedTxns = append(o.committedTxns, committedTxn{
- ts: ts,
- conflictKeys: txn.conflictKeys,
- })
- }
-
- return ts
-}
-
-func (o *oracle) doneRead(txn *Txn) {
- if !txn.doneRead {
- txn.doneRead = true
- o.readMark.Done(txn.readTs)
- }
-}
-
-func (o *oracle) cleanupCommittedTransactions() { // Must be called under o.Lock
- if !o.detectConflicts {
- // When detectConflicts is set to false, we do not store any
- // committedTxns and so there's nothing to clean up.
- return
- }
- // Same logic as discardAtOrBelow but unlocked
- var maxReadTs uint64
- if o.isManaged {
- maxReadTs = o.discardTs
- } else {
- maxReadTs = o.readMark.DoneUntil()
- }
-
- y.AssertTrue(maxReadTs >= o.lastCleanupTs)
-
- // do not run clean up if the maxReadTs (read timestamp of the
- // oldest transaction that is still in flight) has not increased
- if maxReadTs == o.lastCleanupTs {
- return
- }
- o.lastCleanupTs = maxReadTs
-
- tmp := o.committedTxns[:0]
- for _, txn := range o.committedTxns {
- if txn.ts <= maxReadTs {
- continue
- }
- tmp = append(tmp, txn)
- }
- o.committedTxns = tmp
-}
-
-func (o *oracle) doneCommit(cts uint64) {
- if o.isManaged {
- // No need to update anything.
- return
- }
- o.txnMark.Done(cts)
-}
-
-// Txn represents a Badger transaction.
-type Txn struct {
- readTs uint64
- commitTs uint64
-
- update bool // update is used to conditionally keep track of reads.
- reads []uint64 // contains fingerprints of keys read.
- // contains fingerprints of keys written. This is used for conflict detection.
- conflictKeys map[uint64]struct{}
- readsLock sync.Mutex // guards the reads slice. See addReadKey.
-
- pendingWrites map[string]*Entry // cache stores any writes done by txn.
- duplicateWrites []*Entry // Used in managed mode to store duplicate entries.
-
- db *DB
- discarded bool
- doneRead bool
-
- size int64
- count int64
- numIterators int32
-}
-
-type pendingWritesIterator struct {
- entries []*Entry
- nextIdx int
- readTs uint64
- reversed bool
-}
-
-func (pi *pendingWritesIterator) Next() {
- pi.nextIdx++
-}
-
-func (pi *pendingWritesIterator) Rewind() {
- pi.nextIdx = 0
-}
-
-func (pi *pendingWritesIterator) Seek(key []byte) {
- key = y.ParseKey(key)
- pi.nextIdx = sort.Search(len(pi.entries), func(idx int) bool {
- cmp := bytes.Compare(pi.entries[idx].Key, key)
- if !pi.reversed {
- return cmp >= 0
- }
- return cmp <= 0
- })
-}
-
-func (pi *pendingWritesIterator) Key() []byte {
- y.AssertTrue(pi.Valid())
- entry := pi.entries[pi.nextIdx]
- return y.KeyWithTs(entry.Key, pi.readTs)
-}
-
-func (pi *pendingWritesIterator) Value() y.ValueStruct {
- y.AssertTrue(pi.Valid())
- entry := pi.entries[pi.nextIdx]
- return y.ValueStruct{
- Value: entry.Value,
- Meta: entry.meta,
- UserMeta: entry.UserMeta,
- ExpiresAt: entry.ExpiresAt,
- Version: pi.readTs,
- }
-}
-
-func (pi *pendingWritesIterator) Valid() bool {
- return pi.nextIdx < len(pi.entries)
-}
-
-func (pi *pendingWritesIterator) Close() error {
- return nil
-}
-
-func (txn *Txn) newPendingWritesIterator(reversed bool) *pendingWritesIterator {
- if !txn.update || len(txn.pendingWrites) == 0 {
- return nil
- }
- entries := make([]*Entry, 0, len(txn.pendingWrites))
- for _, e := range txn.pendingWrites {
- entries = append(entries, e)
- }
- // Number of pending writes per transaction shouldn't be too big in general.
- sort.Slice(entries, func(i, j int) bool {
- cmp := bytes.Compare(entries[i].Key, entries[j].Key)
- if !reversed {
- return cmp < 0
- }
- return cmp > 0
- })
- return &pendingWritesIterator{
- readTs: txn.readTs,
- entries: entries,
- reversed: reversed,
- }
-}
-
-func (txn *Txn) checkSize(e *Entry) error {
- count := txn.count + 1
- // Extra bytes for the version in key.
- size := txn.size + int64(e.estimateSize(txn.db.opt.ValueThreshold)) + 10
- if count >= txn.db.opt.maxBatchCount || size >= txn.db.opt.maxBatchSize {
- return ErrTxnTooBig
- }
- txn.count, txn.size = count, size
- return nil
-}
-
-func exceedsSize(prefix string, max int64, key []byte) error {
- return errors.Errorf("%s with size %d exceeded %d limit. %s:\n%s",
- prefix, len(key), max, prefix, hex.Dump(key[:1<<10]))
-}
-
-func (txn *Txn) modify(e *Entry) error {
- const maxKeySize = 65000
-
- switch {
- case !txn.update:
- return ErrReadOnlyTxn
- case txn.discarded:
- return ErrDiscardedTxn
- case len(e.Key) == 0:
- return ErrEmptyKey
- case bytes.HasPrefix(e.Key, badgerPrefix):
- return ErrInvalidKey
- case len(e.Key) > maxKeySize:
- // Key length can't be more than uint16, as determined by table::header. To
- // keep things safe and allow badger move prefix and a timestamp suffix, let's
- // cut it down to 65000, instead of using 65536.
- return exceedsSize("Key", maxKeySize, e.Key)
- case int64(len(e.Value)) > txn.db.opt.ValueLogFileSize:
- return exceedsSize("Value", txn.db.opt.ValueLogFileSize, e.Value)
- case txn.db.opt.InMemory && len(e.Value) > txn.db.opt.ValueThreshold:
- return exceedsSize("Value", int64(txn.db.opt.ValueThreshold), e.Value)
- }
-
- if err := txn.checkSize(e); err != nil {
- return err
- }
-
- // The txn.conflictKeys is used for conflict detection. If conflict detection
- // is disabled, we don't need to store key hashes in this map.
- if txn.db.opt.DetectConflicts {
- fp := z.MemHash(e.Key) // Avoid dealing with byte arrays.
- txn.conflictKeys[fp] = struct{}{}
- }
- // If a duplicate entry was inserted in managed mode, move it to the duplicate writes slice.
- // Add the entry to duplicateWrites only if both the entries have different versions. For
- // same versions, we will overwrite the existing entry.
- if oldEntry, ok := txn.pendingWrites[string(e.Key)]; ok && oldEntry.version != e.version {
- txn.duplicateWrites = append(txn.duplicateWrites, oldEntry)
- }
- txn.pendingWrites[string(e.Key)] = e
- return nil
-}
-
-// Set adds a key-value pair to the database.
-// It will return ErrReadOnlyTxn if update flag was set to false when creating the transaction.
-//
-// The current transaction keeps a reference to the key and val byte slice
-// arguments. Users must not modify key and val until the end of the transaction.
-func (txn *Txn) Set(key, val []byte) error {
- return txn.SetEntry(NewEntry(key, val))
-}
-
-// SetEntry takes an Entry struct and adds the key-value pair in the struct,
-// along with other metadata to the database.
-//
-// The current transaction keeps a reference to the entry passed in argument.
-// Users must not modify the entry until the end of the transaction.
-func (txn *Txn) SetEntry(e *Entry) error {
- return txn.modify(e)
-}
-
-// Delete deletes a key.
-//
-// This is done by adding a delete marker for the key at commit timestamp. Any
-// reads happening before this timestamp would be unaffected. Any reads after
-// this commit would see the deletion.
-//
-// The current transaction keeps a reference to the key byte slice argument.
-// Users must not modify the key until the end of the transaction.
-func (txn *Txn) Delete(key []byte) error {
- e := &Entry{
- Key: key,
- meta: bitDelete,
- }
- return txn.modify(e)
-}
-
-// Get looks for key and returns corresponding Item.
-// If key is not found, ErrKeyNotFound is returned.
-func (txn *Txn) Get(key []byte) (item *Item, rerr error) {
- if len(key) == 0 {
- return nil, ErrEmptyKey
- } else if txn.discarded {
- return nil, ErrDiscardedTxn
- }
-
- item = new(Item)
- if txn.update {
- if e, has := txn.pendingWrites[string(key)]; has && bytes.Equal(key, e.Key) {
- if isDeletedOrExpired(e.meta, e.ExpiresAt) {
- return nil, ErrKeyNotFound
- }
- // Fulfill from cache.
- item.meta = e.meta
- item.val = e.Value
- item.userMeta = e.UserMeta
- item.key = key
- item.status = prefetched
- item.version = txn.readTs
- item.expiresAt = e.ExpiresAt
- // We probably don't need to set db on item here.
- return item, nil
- }
- // Only track reads if this is update txn. No need to track read if txn serviced it
- // internally.
- txn.addReadKey(key)
- }
-
- seek := y.KeyWithTs(key, txn.readTs)
- vs, err := txn.db.get(seek)
- if err != nil {
- return nil, errors.Wrapf(err, "DB::Get key: %q", key)
- }
- if vs.Value == nil && vs.Meta == 0 {
- return nil, ErrKeyNotFound
- }
- if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
- return nil, ErrKeyNotFound
- }
-
- item.key = key
- item.version = vs.Version
- item.meta = vs.Meta
- item.userMeta = vs.UserMeta
- item.db = txn.db
- item.vptr = y.SafeCopy(item.vptr, vs.Value)
- item.txn = txn
- item.expiresAt = vs.ExpiresAt
- return item, nil
-}
-
-func (txn *Txn) addReadKey(key []byte) {
- if txn.update {
- fp := z.MemHash(key)
-
- // Because of the possibility of multiple iterators it is now possible
- // for multiple threads within a read-write transaction to read keys at
- // the same time. The reads slice is not currently thread-safe and
- // needs to be locked whenever we mark a key as read.
- txn.readsLock.Lock()
- txn.reads = append(txn.reads, fp)
- txn.readsLock.Unlock()
- }
-}
-
-// Discard discards a created transaction. This method is very important and must be called. Commit
-// method calls this internally, however, calling this multiple times doesn't cause any issues. So,
-// this can safely be called via a defer right when transaction is created.
-//
-// NOTE: If any operations are run on a discarded transaction, ErrDiscardedTxn is returned.
-func (txn *Txn) Discard() {
- if txn.discarded { // Avoid a re-run.
- return
- }
- if atomic.LoadInt32(&txn.numIterators) > 0 {
- panic("Unclosed iterator at time of Txn.Discard.")
- }
- txn.discarded = true
- if !txn.db.orc.isManaged {
- txn.db.orc.doneRead(txn)
- }
-}
-
-func (txn *Txn) commitAndSend() (func() error, error) {
- orc := txn.db.orc
- // Ensure that the order in which we get the commit timestamp is the same as
- // the order in which we push these updates to the write channel. So, we
- // acquire a writeChLock before getting a commit timestamp, and only release
- // it after pushing the entries to it.
- orc.writeChLock.Lock()
- defer orc.writeChLock.Unlock()
-
- commitTs := orc.newCommitTs(txn)
- // The commitTs can be zero if the transaction is running in managed mode.
- // Individual entries might have their own timestamps.
- if commitTs == 0 && !txn.db.opt.managedTxns {
- return nil, ErrConflict
- }
-
- keepTogether := true
- setVersion := func(e *Entry) {
- if e.version == 0 {
- e.version = commitTs
- } else {
- keepTogether = false
- }
- }
- for _, e := range txn.pendingWrites {
- setVersion(e)
- }
- // The duplicateWrites slice will be non-empty only if there are duplicate
- // entries with different versions.
- for _, e := range txn.duplicateWrites {
- setVersion(e)
- }
-
- entries := make([]*Entry, 0, len(txn.pendingWrites)+len(txn.duplicateWrites)+1)
-
- processEntry := func(e *Entry) {
- // Suffix the keys with commit ts, so the key versions are sorted in
- // descending order of commit timestamp.
- e.Key = y.KeyWithTs(e.Key, e.version)
- // Add bitTxn only if these entries are part of a transaction. We
- // support SetEntryAt(..) in managed mode which means a single
- // transaction can have entries with different timestamps. If entries
- // in a single transaction have different timestamps, we don't add the
- // transaction markers.
- if keepTogether {
- e.meta |= bitTxn
- }
- entries = append(entries, e)
- }
-
- // The following debug information is what led to determining the cause of
- // bank txn violation bug, and it took a whole bunch of effort to narrow it
- // down to here. So, keep this around for at least a couple of months.
- // var b strings.Builder
- // fmt.Fprintf(&b, "Read: %d. Commit: %d. reads: %v. writes: %v. Keys: ",
- // txn.readTs, commitTs, txn.reads, txn.conflictKeys)
- for _, e := range txn.pendingWrites {
- processEntry(e)
- }
- for _, e := range txn.duplicateWrites {
- processEntry(e)
- }
-
- if keepTogether {
- // CommitTs should not be zero if we're inserting transaction markers.
- y.AssertTrue(commitTs != 0)
- e := &Entry{
- Key: y.KeyWithTs(txnKey, commitTs),
- Value: []byte(strconv.FormatUint(commitTs, 10)),
- meta: bitFinTxn,
- }
- entries = append(entries, e)
- }
-
- req, err := txn.db.sendToWriteCh(entries)
- if err != nil {
- orc.doneCommit(commitTs)
- return nil, err
- }
- ret := func() error {
- err := req.Wait()
- // Wait before marking commitTs as done.
- // We can't defer doneCommit above, because it is being called from a
- // callback here.
- orc.doneCommit(commitTs)
- return err
- }
- return ret, nil
-}
-
-func (txn *Txn) commitPrecheck() error {
- if txn.discarded {
- return errors.New("Trying to commit a discarded txn")
- }
- keepTogether := true
- for _, e := range txn.pendingWrites {
- if e.version != 0 {
- keepTogether = false
- }
- }
-
- // If keepTogether is True, it implies transaction markers will be added.
- // In that case, commitTs should not be never be zero. This might happen if
- // someone uses txn.Commit instead of txn.CommitAt in managed mode. This
- // should happen only in managed mode. In normal mode, keepTogether will
- // always be true.
- if keepTogether && txn.db.opt.managedTxns && txn.commitTs == 0 {
- return errors.New("CommitTs cannot be zero. Please use commitAt instead")
- }
- return nil
-}
-
-// Commit commits the transaction, following these steps:
-//
-// 1. If there are no writes, return immediately.
-//
-// 2. Check if read rows were updated since txn started. If so, return ErrConflict.
-//
-// 3. If no conflict, generate a commit timestamp and update written rows' commit ts.
-//
-// 4. Batch up all writes, write them to value log and LSM tree.
-//
-// 5. If callback is provided, Badger will return immediately after checking
-// for conflicts. Writes to the database will happen in the background. If
-// there is a conflict, an error will be returned and the callback will not
-// run. If there are no conflicts, the callback will be called in the
-// background upon successful completion of writes or any error during write.
-//
-// If error is nil, the transaction is successfully committed. In case of a non-nil error, the LSM
-// tree won't be updated, so there's no need for any rollback.
-func (txn *Txn) Commit() error {
- // txn.conflictKeys can be zero if conflict detection is turned off. So we
- // should check txn.pendingWrites.
- if len(txn.pendingWrites) == 0 {
- return nil // Nothing to do.
- }
- // Precheck before discarding txn.
- if err := txn.commitPrecheck(); err != nil {
- return err
- }
- defer txn.Discard()
-
- txnCb, err := txn.commitAndSend()
- if err != nil {
- return err
- }
- // If batchSet failed, LSM would not have been updated. So, no need to rollback anything.
-
- // TODO: What if some of the txns successfully make it to value log, but others fail.
- // Nothing gets updated to LSM, until a restart happens.
- return txnCb()
-}
-
-type txnCb struct {
- commit func() error
- user func(error)
- err error
-}
-
-func runTxnCallback(cb *txnCb) {
- switch {
- case cb == nil:
- panic("txn callback is nil")
- case cb.user == nil:
- panic("Must have caught a nil callback for txn.CommitWith")
- case cb.err != nil:
- cb.user(cb.err)
- case cb.commit != nil:
- err := cb.commit()
- cb.user(err)
- default:
- cb.user(nil)
- }
-}
-
-// CommitWith acts like Commit, but takes a callback, which gets run via a
-// goroutine to avoid blocking this function. The callback is guaranteed to run,
-// so it is safe to increment sync.WaitGroup before calling CommitWith, and
-// decrementing it in the callback; to block until all callbacks are run.
-func (txn *Txn) CommitWith(cb func(error)) {
- if cb == nil {
- panic("Nil callback provided to CommitWith")
- }
-
- if len(txn.pendingWrites) == 0 {
- // Do not run these callbacks from here, because the CommitWith and the
- // callback might be acquiring the same locks. Instead run the callback
- // from another goroutine.
- go runTxnCallback(&txnCb{user: cb, err: nil})
- return
- }
-
- // Precheck before discarding txn.
- if err := txn.commitPrecheck(); err != nil {
- cb(err)
- return
- }
-
- defer txn.Discard()
-
- commitCb, err := txn.commitAndSend()
- if err != nil {
- go runTxnCallback(&txnCb{user: cb, err: err})
- return
- }
-
- go runTxnCallback(&txnCb{user: cb, commit: commitCb})
-}
-
-// ReadTs returns the read timestamp of the transaction.
-func (txn *Txn) ReadTs() uint64 {
- return txn.readTs
-}
-
-// NewTransaction creates a new transaction. Badger supports concurrent execution of transactions,
-// providing serializable snapshot isolation, avoiding write skews. Badger achieves this by tracking
-// the keys read and at Commit time, ensuring that these read keys weren't concurrently modified by
-// another transaction.
-//
-// For read-only transactions, set update to false. In this mode, we don't track the rows read for
-// any changes. Thus, any long running iterations done in this mode wouldn't pay this overhead.
-//
-// Running transactions concurrently is OK. However, a transaction itself isn't thread safe, and
-// should only be run serially. It doesn't matter if a transaction is created by one goroutine and
-// passed down to other, as long as the Txn APIs are called serially.
-//
-// When you create a new transaction, it is absolutely essential to call
-// Discard(). This should be done irrespective of what the update param is set
-// to. Commit API internally runs Discard, but running it twice wouldn't cause
-// any issues.
-//
-// txn := db.NewTransaction(false)
-// defer txn.Discard()
-// // Call various APIs.
-func (db *DB) NewTransaction(update bool) *Txn {
- return db.newTransaction(update, false)
-}
-
-func (db *DB) newTransaction(update, isManaged bool) *Txn {
- if db.opt.ReadOnly && update {
- // DB is read-only, force read-only transaction.
- update = false
- }
-
- txn := &Txn{
- update: update,
- db: db,
- count: 1, // One extra entry for BitFin.
- size: int64(len(txnKey) + 10), // Some buffer for the extra entry.
- }
- if update {
- if db.opt.DetectConflicts {
- txn.conflictKeys = make(map[uint64]struct{})
- }
- txn.pendingWrites = make(map[string]*Entry)
- }
- if !isManaged {
- txn.readTs = db.orc.readTs()
- }
- return txn
-}
-
-// View executes a function creating and managing a read-only transaction for the user. Error
-// returned by the function is relayed by the View method.
-// If View is used with managed transactions, it would assume a read timestamp of MaxUint64.
-func (db *DB) View(fn func(txn *Txn) error) error {
- if db.IsClosed() {
- return ErrDBClosed
- }
- var txn *Txn
- if db.opt.managedTxns {
- txn = db.NewTransactionAt(math.MaxUint64, false)
- } else {
- txn = db.NewTransaction(false)
- }
- defer txn.Discard()
-
- return fn(txn)
-}
-
-// Update executes a function, creating and managing a read-write transaction
-// for the user. Error returned by the function is relayed by the Update method.
-// Update cannot be used with managed transactions.
-func (db *DB) Update(fn func(txn *Txn) error) error {
- if db.IsClosed() {
- return ErrDBClosed
- }
- if db.opt.managedTxns {
- panic("Update can only be used with managedDB=false.")
- }
- txn := db.NewTransaction(true)
- defer txn.Discard()
-
- if err := fn(txn); err != nil {
- return err
- }
-
- return txn.Commit()
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/util.go b/vendor/github.com/dgraph-io/badger/v2/util.go
deleted file mode 100644
index ccf7939f..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/util.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "encoding/hex"
- "io/ioutil"
- "math/rand"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/v2/table"
- "github.com/dgraph-io/badger/v2/y"
- "github.com/pkg/errors"
-)
-
-func (s *levelsController) validate() error {
- for _, l := range s.levels {
- if err := l.validate(); err != nil {
- return errors.Wrap(err, "Levels Controller")
- }
- }
- return nil
-}
-
-// Check does some sanity check on one level of data or in-memory index.
-func (s *levelHandler) validate() error {
- if s.level == 0 {
- return nil
- }
-
- s.RLock()
- defer s.RUnlock()
- numTables := len(s.tables)
- for j := 1; j < numTables; j++ {
- if j >= len(s.tables) {
- return errors.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables)
- }
-
- if y.CompareKeys(s.tables[j-1].Biggest(), s.tables[j].Smallest()) >= 0 {
- return errors.Errorf(
- "Inter: Biggest(j-1) \n%s\n vs Smallest(j): \n%s\n: level=%d j=%d numTables=%d",
- hex.Dump(s.tables[j-1].Biggest()), hex.Dump(s.tables[j].Smallest()),
- s.level, j, numTables)
- }
-
- if y.CompareKeys(s.tables[j].Smallest(), s.tables[j].Biggest()) > 0 {
- return errors.Errorf(
- "Intra: \n%s\n vs \n%s\n: level=%d j=%d numTables=%d",
- hex.Dump(s.tables[j].Smallest()), hex.Dump(s.tables[j].Biggest()), s.level, j, numTables)
- }
- }
- return nil
-}
-
-// func (s *KV) debugPrintMore() { s.lc.debugPrintMore() }
-
-// // debugPrintMore shows key ranges of each level.
-// func (s *levelsController) debugPrintMore() {
-// s.Lock()
-// defer s.Unlock()
-// for i := 0; i < s.kv.opt.MaxLevels; i++ {
-// s.levels[i].debugPrintMore()
-// }
-// }
-
-// func (s *levelHandler) debugPrintMore() {
-// s.RLock()
-// defer s.RUnlock()
-// s.elog.Printf("Level %d:", s.level)
-// for _, t := range s.tables {
-// y.Printf(" [%s, %s]", t.Smallest(), t.Biggest())
-// }
-// y.Printf("\n")
-// }
-
-// reserveFileID reserves a unique file id.
-func (s *levelsController) reserveFileID() uint64 {
- id := atomic.AddUint64(&s.nextFileID, 1)
- return id - 1
-}
-
-func getIDMap(dir string) map[uint64]struct{} {
- fileInfos, err := ioutil.ReadDir(dir)
- y.Check(err)
- idMap := make(map[uint64]struct{})
- for _, info := range fileInfos {
- if info.IsDir() {
- continue
- }
- fileID, ok := table.ParseFileID(info.Name())
- if !ok {
- continue
- }
- idMap[fileID] = struct{}{}
- }
- return idMap
-}
-
-func init() {
- rand.Seed(time.Now().UnixNano())
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/value.go b/vendor/github.com/dgraph-io/badger/v2/value.go
deleted file mode 100644
index 08653e1c..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/value.go
+++ /dev/null
@@ -1,2022 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bufio"
- "bytes"
- "crypto/aes"
- cryptorand "crypto/rand"
- "encoding/binary"
- "encoding/json"
- "fmt"
- "hash"
- "hash/crc32"
- "io"
- "io/ioutil"
- "math"
- "math/rand"
- "os"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/v2/options"
- "github.com/dgraph-io/badger/v2/pb"
- "github.com/dgraph-io/badger/v2/y"
- "github.com/pkg/errors"
- "golang.org/x/net/trace"
-)
-
-// maxVlogFileSize is the maximum size of the vlog file which can be created. Vlog Offset is of
-// uint32, so limiting at max uint32.
-var maxVlogFileSize uint32 = math.MaxUint32
-
-// Values have their first byte being byteData or byteDelete. This helps us distinguish between
-// a key that has never been seen and a key that has been explicitly deleted.
-const (
- bitDelete byte = 1 << 0 // Set if the key has been deleted.
- bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key.
- bitDiscardEarlierVersions byte = 1 << 2 // Set if earlier versions can be discarded.
- // Set if item shouldn't be discarded via compactions (used by merge operator)
- bitMergeEntry byte = 1 << 3
- // The MSB 2 bits are for transactions.
- bitTxn byte = 1 << 6 // Set if the entry is part of a txn.
- bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log.
-
- mi int64 = 1 << 20
-
- // The number of updates after which discard map should be flushed into badger.
- discardStatsFlushThreshold = 100
-
- // size of vlog header.
- // +----------------+------------------+
- // | keyID(8 bytes) | baseIV(12 bytes)|
- // +----------------+------------------+
- vlogHeaderSize = 20
-)
-
-type logFile struct {
- path string
- // This is a lock on the log file. It guards the fd’s value, the file’s
- // existence and the file’s memory map.
- //
- // Use shared ownership when reading/writing the file or memory map, use
- // exclusive ownership to open/close the descriptor, unmap or remove the file.
- lock sync.RWMutex
- fd *os.File
- fid uint32
- fmap []byte
- size uint32
- loadingMode options.FileLoadingMode
- dataKey *pb.DataKey
- baseIV []byte
- registry *KeyRegistry
-}
-
-// encodeEntry will encode entry to the buf
-// layout of entry
-// +--------+-----+-------+-------+
-// | header | key | value | crc32 |
-// +--------+-----+-------+-------+
-func (lf *logFile) encodeEntry(e *Entry, buf *bytes.Buffer, offset uint32) (int, error) {
- h := header{
- klen: uint32(len(e.Key)),
- vlen: uint32(len(e.Value)),
- expiresAt: e.ExpiresAt,
- meta: e.meta,
- userMeta: e.UserMeta,
- }
-
- // encode header.
- var headerEnc [maxHeaderSize]byte
- sz := h.Encode(headerEnc[:])
- y.Check2(buf.Write(headerEnc[:sz]))
- // write hash.
- hash := crc32.New(y.CastagnoliCrcTable)
- y.Check2(hash.Write(headerEnc[:sz]))
- // we'll encrypt only key and value.
- if lf.encryptionEnabled() {
- // TODO: no need to allocate the bytes. we can calculate the encrypted buf one by one
- // since we're using ctr mode of AES encryption. Ordering won't changed. Need some
- // refactoring in XORBlock which will work like stream cipher.
- eBuf := make([]byte, 0, len(e.Key)+len(e.Value))
- eBuf = append(eBuf, e.Key...)
- eBuf = append(eBuf, e.Value...)
- var err error
- eBuf, err = y.XORBlock(eBuf, lf.dataKey.Data, lf.generateIV(offset))
- if err != nil {
- return 0, y.Wrapf(err, "Error while encoding entry for vlog.")
- }
- // write encrypted buf.
- y.Check2(buf.Write(eBuf))
- // write the hash.
- y.Check2(hash.Write(eBuf))
- } else {
- // Encryption is disabled so writing directly to the buffer.
- // write key.
- y.Check2(buf.Write(e.Key))
- // write key hash.
- y.Check2(hash.Write(e.Key))
- // write value.
- y.Check2(buf.Write(e.Value))
- // write value hash.
- y.Check2(hash.Write(e.Value))
- }
- // write crc32 hash.
- var crcBuf [crc32.Size]byte
- binary.BigEndian.PutUint32(crcBuf[:], hash.Sum32())
- y.Check2(buf.Write(crcBuf[:]))
- // return encoded length.
- return len(headerEnc[:sz]) + len(e.Key) + len(e.Value) + len(crcBuf), nil
-}
-
-func (lf *logFile) decodeEntry(buf []byte, offset uint32) (*Entry, error) {
- var h header
- hlen := h.Decode(buf)
- kv := buf[hlen:]
- if lf.encryptionEnabled() {
- var err error
- // No need to worry about mmap. because, XORBlock allocates a byte array to do the
- // xor. So, the given slice is not being mutated.
- if kv, err = lf.decryptKV(kv, offset); err != nil {
- return nil, err
- }
- }
- e := &Entry{
- meta: h.meta,
- UserMeta: h.userMeta,
- ExpiresAt: h.expiresAt,
- offset: offset,
- Key: kv[:h.klen],
- Value: kv[h.klen : h.klen+h.vlen],
- }
- return e, nil
-}
-
-func (lf *logFile) decryptKV(buf []byte, offset uint32) ([]byte, error) {
- return y.XORBlock(buf, lf.dataKey.Data, lf.generateIV(offset))
-}
-
-// KeyID returns datakey's ID.
-func (lf *logFile) keyID() uint64 {
- if lf.dataKey == nil {
- // If there is no datakey, then we'll return 0. Which means no encryption.
- return 0
- }
- return lf.dataKey.KeyId
-}
-
-func (lf *logFile) mmap(size int64) (err error) {
- if lf.loadingMode != options.MemoryMap {
- // Nothing to do
- return nil
- }
- lf.fmap, err = y.Mmap(lf.fd, false, size)
- if err == nil {
- err = y.Madvise(lf.fmap, false) // Disable readahead
- }
- return err
-}
-
-func (lf *logFile) encryptionEnabled() bool {
- return lf.dataKey != nil
-}
-
-func (lf *logFile) munmap() (err error) {
- if lf.loadingMode != options.MemoryMap || len(lf.fmap) == 0 {
- // Nothing to do
- return nil
- }
-
- if err := y.Munmap(lf.fmap); err != nil {
- return errors.Wrapf(err, "Unable to munmap value log: %q", lf.path)
- }
- // This is important. We should set the map to nil because ummap
- // system call doesn't change the length or capacity of the fmap slice.
- lf.fmap = nil
- return nil
-}
-
-// Acquire lock on mmap/file if you are calling this
-func (lf *logFile) read(p valuePointer, s *y.Slice) (buf []byte, err error) {
- var nbr int64
- offset := p.Offset
- if lf.loadingMode == options.FileIO {
- buf = s.Resize(int(p.Len))
- var n int
- n, err = lf.fd.ReadAt(buf, int64(offset))
- nbr = int64(n)
- } else {
- // Do not convert size to uint32, because the lf.fmap can be of size
- // 4GB, which overflows the uint32 during conversion to make the size 0,
- // causing the read to fail with ErrEOF. See issue #585.
- size := int64(len(lf.fmap))
- valsz := p.Len
- lfsz := atomic.LoadUint32(&lf.size)
- if int64(offset) >= size || int64(offset+valsz) > size ||
- // Ensure that the read is within the file's actual size. It might be possible that
- // the offset+valsz length is beyond the file's actual size. This could happen when
- // dropAll and iterations are running simultaneously.
- int64(offset+valsz) > int64(lfsz) {
- err = y.ErrEOF
- } else {
- buf = lf.fmap[offset : offset+valsz]
- nbr = int64(valsz)
- }
- }
- y.NumReads.Add(1)
- y.NumBytesRead.Add(nbr)
- return buf, err
-}
-
-// generateIV will generate IV by appending given offset with the base IV.
-func (lf *logFile) generateIV(offset uint32) []byte {
- iv := make([]byte, aes.BlockSize)
- // baseIV is of 12 bytes.
- y.AssertTrue(12 == copy(iv[:12], lf.baseIV))
- // remaining 4 bytes is obtained from offset.
- binary.BigEndian.PutUint32(iv[12:], offset)
- return iv
-}
-
-func (lf *logFile) doneWriting(offset uint32) error {
- // Sync before acquiring lock. (We call this from write() and thus know we have shared access
- // to the fd.)
- if err := lf.fd.Sync(); err != nil {
- return errors.Wrapf(err, "Unable to sync value log: %q", lf.path)
- }
-
- // Before we were acquiring a lock here on lf.lock, because we were invalidating the file
- // descriptor due to reopening it as read-only. Now, we don't invalidate the fd, but unmap it,
- // truncate it and remap it. That creates a window where we have segfaults because the mmap is
- // no longer valid, while someone might be reading it. Therefore, we need a lock here again.
- lf.lock.Lock()
- defer lf.lock.Unlock()
-
- // Unmap file before we truncate it. Windows cannot truncate a file that is mmapped.
- if err := lf.munmap(); err != nil {
- return errors.Wrapf(err, "failed to munmap vlog file %s", lf.fd.Name())
- }
-
- // TODO: Confirm if we need to run a file sync after truncation.
- // Truncation must run after unmapping, otherwise Windows would crap itself.
- if err := lf.fd.Truncate(int64(offset)); err != nil {
- return errors.Wrapf(err, "Unable to truncate file: %q", lf.path)
- }
-
- // Reinitialize the log file. This will mmap the entire file.
- if err := lf.init(); err != nil {
- return errors.Wrapf(err, "failed to initialize file %s", lf.fd.Name())
- }
-
- // Previously we used to close the file after it was written and reopen it in read-only mode.
- // We no longer open files in read-only mode. We keep all vlog files open in read-write mode.
- return nil
-}
-
-// You must hold lf.lock to sync()
-func (lf *logFile) sync() error {
- return lf.fd.Sync()
-}
-
-var errStop = errors.New("Stop iteration")
-var errTruncate = errors.New("Do truncate")
-var errDeleteVlogFile = errors.New("Delete vlog file")
-
-type logEntry func(e Entry, vp valuePointer) error
-
-type safeRead struct {
- k []byte
- v []byte
-
- recordOffset uint32
- lf *logFile
-}
-
-// hashReader implements io.Reader, io.ByteReader interfaces. It also keeps track of the number
-// bytes read. The hashReader writes to h (hash) what it reads from r.
-type hashReader struct {
- r io.Reader
- h hash.Hash32
- bytesRead int // Number of bytes read.
-}
-
-func newHashReader(r io.Reader) *hashReader {
- hash := crc32.New(y.CastagnoliCrcTable)
- return &hashReader{
- r: r,
- h: hash,
- }
-}
-
-// Read reads len(p) bytes from the reader. Returns the number of bytes read, error on failure.
-func (t *hashReader) Read(p []byte) (int, error) {
- n, err := t.r.Read(p)
- if err != nil {
- return n, err
- }
- t.bytesRead += n
- return t.h.Write(p[:n])
-}
-
-// ReadByte reads exactly one byte from the reader. Returns error on failure.
-func (t *hashReader) ReadByte() (byte, error) {
- b := make([]byte, 1)
- _, err := t.Read(b)
- return b[0], err
-}
-
-// Sum32 returns the sum32 of the underlying hash.
-func (t *hashReader) Sum32() uint32 {
- return t.h.Sum32()
-}
-
-// Entry reads an entry from the provided reader. It also validates the checksum for every entry
-// read. Returns error on failure.
-func (r *safeRead) Entry(reader io.Reader) (*Entry, error) {
- tee := newHashReader(reader)
- var h header
- hlen, err := h.DecodeFrom(tee)
- if err != nil {
- return nil, err
- }
- if h.klen > uint32(1<<16) { // Key length must be below uint16.
- return nil, errTruncate
- }
- kl := int(h.klen)
- if cap(r.k) < kl {
- r.k = make([]byte, 2*kl)
- }
- vl := int(h.vlen)
- if cap(r.v) < vl {
- r.v = make([]byte, 2*vl)
- }
-
- e := &Entry{}
- e.offset = r.recordOffset
- e.hlen = hlen
- buf := make([]byte, h.klen+h.vlen)
- if _, err := io.ReadFull(tee, buf[:]); err != nil {
- if err == io.EOF {
- err = errTruncate
- }
- return nil, err
- }
- if r.lf.encryptionEnabled() {
- if buf, err = r.lf.decryptKV(buf[:], r.recordOffset); err != nil {
- return nil, err
- }
- }
- e.Key = buf[:h.klen]
- e.Value = buf[h.klen:]
- var crcBuf [crc32.Size]byte
- if _, err := io.ReadFull(reader, crcBuf[:]); err != nil {
- if err == io.EOF {
- err = errTruncate
- }
- return nil, err
- }
- crc := y.BytesToU32(crcBuf[:])
- if crc != tee.Sum32() {
- return nil, errTruncate
- }
- e.meta = h.meta
- e.UserMeta = h.userMeta
- e.ExpiresAt = h.expiresAt
- return e, nil
-}
-
-// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
-// Therefore, the kv pair is only valid for the duration of fn call.
-func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
- fi, err := lf.fd.Stat()
- if err != nil {
- return 0, err
- }
- if offset == 0 {
- // If offset is set to zero, let's advance past the encryption key header.
- offset = vlogHeaderSize
- }
- if int64(offset) == fi.Size() {
- // We're at the end of the file already. No need to do anything.
- return offset, nil
- }
- if vlog.opt.ReadOnly {
- // We're not at the end of the file. We'd need to replay the entries, or
- // possibly truncate the file.
- return 0, ErrReplayNeeded
- }
-
- // We're not at the end of the file. Let's Seek to the offset and start reading.
- if _, err := lf.fd.Seek(int64(offset), io.SeekStart); err != nil {
- return 0, errFile(err, lf.path, "Unable to seek")
- }
-
- reader := bufio.NewReader(lf.fd)
- read := &safeRead{
- k: make([]byte, 10),
- v: make([]byte, 10),
- recordOffset: offset,
- lf: lf,
- }
-
- var lastCommit uint64
- var validEndOffset uint32 = offset
-
-loop:
- for {
- e, err := read.Entry(reader)
- switch {
- case err == io.EOF:
- break loop
- case err == io.ErrUnexpectedEOF || err == errTruncate:
- break loop
- case err != nil:
- return 0, err
- case e == nil:
- continue
- }
-
- var vp valuePointer
- vp.Len = uint32(int(e.hlen) + len(e.Key) + len(e.Value) + crc32.Size)
- read.recordOffset += vp.Len
-
- vp.Offset = e.offset
- vp.Fid = lf.fid
-
- switch {
- case e.meta&bitTxn > 0:
- txnTs := y.ParseTs(e.Key)
- if lastCommit == 0 {
- lastCommit = txnTs
- }
- if lastCommit != txnTs {
- break loop
- }
-
- case e.meta&bitFinTxn > 0:
- txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
- if err != nil || lastCommit != txnTs {
- break loop
- }
- // Got the end of txn. Now we can store them.
- lastCommit = 0
- validEndOffset = read.recordOffset
-
- default:
- if lastCommit != 0 {
- // This is most likely an entry which was moved as part of GC.
- // We shouldn't get this entry in the middle of a transaction.
- break loop
- }
- validEndOffset = read.recordOffset
- }
-
- if err := fn(*e, vp); err != nil {
- if err == errStop {
- break
- }
- return 0, errFile(err, lf.path, "Iteration function")
- }
- }
- return validEndOffset, nil
-}
-
-func (vlog *valueLog) rewrite(f *logFile, tr trace.Trace) error {
- vlog.filesLock.RLock()
- maxFid := vlog.maxFid
- vlog.filesLock.RUnlock()
- y.AssertTruef(uint32(f.fid) < maxFid, "fid to move: %d. Current max fid: %d", f.fid, maxFid)
- tr.LazyPrintf("Rewriting fid: %d", f.fid)
-
- wb := make([]*Entry, 0, 1000)
- var size int64
-
- y.AssertTrue(vlog.db != nil)
- var count, moved int
- fe := func(e Entry) error {
- count++
- if count%100000 == 0 {
- tr.LazyPrintf("Processing entry %d", count)
- }
-
- vs, err := vlog.db.get(e.Key)
- if err != nil {
- return err
- }
- if discardEntry(e, vs, vlog.db) {
- return nil
- }
-
- // Value is still present in value log.
- if len(vs.Value) == 0 {
- return errors.Errorf("Empty value: %+v", vs)
- }
- var vp valuePointer
- vp.Decode(vs.Value)
-
- // If the entry found from the LSM Tree points to a newer vlog file, don't do anything.
- if vp.Fid > f.fid {
- return nil
- }
- // If the entry found from the LSM Tree points to an offset greater than the one
- // read from vlog, don't do anything.
- if vp.Offset > e.offset {
- return nil
- }
- // If the entry read from LSM Tree and vlog file point to the same vlog file and offset,
- // insert them back into the DB.
- // NOTE: It might be possible that the entry read from the LSM Tree points to
- // an older vlog file. See the comments in the else part.
- if vp.Fid == f.fid && vp.Offset == e.offset {
- moved++
- // This new entry only contains the key, and a pointer to the value.
- ne := new(Entry)
- ne.meta = 0 // Remove all bits. Different keyspace doesn't need these bits.
- ne.UserMeta = e.UserMeta
- ne.ExpiresAt = e.ExpiresAt
-
- // Create a new key in a separate keyspace, prefixed by moveKey. We are not
- // allowed to rewrite an older version of key in the LSM tree, because then this older
- // version would be at the top of the LSM tree. To work correctly, reads expect the
- // latest versions to be at the top, and the older versions at the bottom.
- if bytes.HasPrefix(e.Key, badgerMove) {
- ne.Key = append([]byte{}, e.Key...)
- } else {
- ne.Key = make([]byte, len(badgerMove)+len(e.Key))
- n := copy(ne.Key, badgerMove)
- copy(ne.Key[n:], e.Key)
- }
-
- ne.Value = append([]byte{}, e.Value...)
- es := int64(ne.estimateSize(vlog.opt.ValueThreshold))
- // Consider size of value as well while considering the total size
- // of the batch. There have been reports of high memory usage in
- // rewrite because we don't consider the value size. See #1292.
- es += int64(len(e.Value))
-
- // Ensure length and size of wb is within transaction limits.
- if int64(len(wb)+1) >= vlog.opt.maxBatchCount ||
- size+es >= vlog.opt.maxBatchSize {
- tr.LazyPrintf("request has %d entries, size %d", len(wb), size)
- if err := vlog.db.batchSet(wb); err != nil {
- return err
- }
- size = 0
- wb = wb[:0]
- }
- wb = append(wb, ne)
- size += es
- } else {
- // It might be possible that the entry read from LSM Tree points to an older vlog file.
- // This can happen in the following situation. Assume DB is opened with
- // numberOfVersionsToKeep=1
- //
- // Now, if we have ONLY one key in the system "FOO" which has been updated 3 times and
- // the same key has been garbage collected 3 times, we'll have 3 versions of the movekey
- // for the same key "FOO".
- // NOTE: moveKeyi is the moveKey with version i
- // Assume we have 3 move keys in L0.
- // - moveKey1 (points to vlog file 10),
- // - moveKey2 (points to vlog file 14) and
- // - moveKey3 (points to vlog file 15).
-
- // Also, assume there is another move key "moveKey1" (points to vlog file 6) (this is
- // also a move Key for key "FOO" ) on upper levels (let's say 3). The move key
- // "moveKey1" on level 0 was inserted because vlog file 6 was GCed.
- //
- // Here's what the arrangement looks like
- // L0 => (moveKey1 => vlog10), (moveKey2 => vlog14), (moveKey3 => vlog15)
- // L1 => ....
- // L2 => ....
- // L3 => (moveKey1 => vlog6)
- //
- // When L0 compaction runs, it keeps only moveKey3 because the number of versions
- // to keep is set to 1. (we've dropped moveKey1's latest version)
- //
- // The new arrangement of keys is
- // L0 => ....
- // L1 => (moveKey3 => vlog15)
- // L2 => ....
- // L3 => (moveKey1 => vlog6)
- //
- // Now if we try to GC vlog file 10, the entry read from vlog file will point to vlog10
- // but the entry read from LSM Tree will point to vlog6. The move key read from LSM tree
- // will point to vlog6 because we've asked for version 1 of the move key.
- //
- // This might seem like an issue but it's not really an issue because the user has set
- // the number of versions to keep to 1 and the latest version of moveKey points to the
- // correct vlog file and offset. The stale move key on L3 will be eventually dropped by
- // compaction because there is a newer versions in the upper levels.
- }
- return nil
- }
-
- _, err := vlog.iterate(f, 0, func(e Entry, vp valuePointer) error {
- return fe(e)
- })
- if err != nil {
- return err
- }
-
- tr.LazyPrintf("request has %d entries, size %d", len(wb), size)
- batchSize := 1024
- var loops int
- for i := 0; i < len(wb); {
- loops++
- if batchSize == 0 {
- vlog.db.opt.Warningf("We shouldn't reach batch size of zero.")
- return ErrNoRewrite
- }
- end := i + batchSize
- if end > len(wb) {
- end = len(wb)
- }
- if err := vlog.db.batchSet(wb[i:end]); err != nil {
- if err == ErrTxnTooBig {
- // Decrease the batch size to half.
- batchSize = batchSize / 2
- tr.LazyPrintf("Dropped batch size to %d", batchSize)
- continue
- }
- return err
- }
- i += batchSize
- }
- tr.LazyPrintf("Processed %d entries in %d loops", len(wb), loops)
- tr.LazyPrintf("Total entries: %d. Moved: %d", count, moved)
- tr.LazyPrintf("Removing fid: %d", f.fid)
- var deleteFileNow bool
- // Entries written to LSM. Remove the older file now.
- {
- vlog.filesLock.Lock()
- // Just a sanity-check.
- if _, ok := vlog.filesMap[f.fid]; !ok {
- vlog.filesLock.Unlock()
- return errors.Errorf("Unable to find fid: %d", f.fid)
- }
- if vlog.iteratorCount() == 0 {
- delete(vlog.filesMap, f.fid)
- deleteFileNow = true
- } else {
- vlog.filesToBeDeleted = append(vlog.filesToBeDeleted, f.fid)
- }
- vlog.filesLock.Unlock()
- }
-
- if deleteFileNow {
- if err := vlog.deleteLogFile(f); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (vlog *valueLog) deleteMoveKeysFor(fid uint32, tr trace.Trace) error {
- db := vlog.db
- var result []*Entry
- var count, pointers uint64
- tr.LazyPrintf("Iterating over move keys to find invalids for fid: %d", fid)
- err := db.View(func(txn *Txn) error {
- opt := DefaultIteratorOptions
- opt.InternalAccess = true
- opt.PrefetchValues = false
- itr := txn.NewIterator(opt)
- defer itr.Close()
-
- for itr.Seek(badgerMove); itr.ValidForPrefix(badgerMove); itr.Next() {
- count++
- item := itr.Item()
- if item.meta&bitValuePointer == 0 {
- continue
- }
- pointers++
- var vp valuePointer
- vp.Decode(item.vptr)
- if vp.Fid == fid {
- e := &Entry{Key: y.KeyWithTs(item.Key(), item.Version()), meta: bitDelete}
- result = append(result, e)
- }
- }
- return nil
- })
- if err != nil {
- tr.LazyPrintf("Got error while iterating move keys: %v", err)
- tr.SetError()
- return err
- }
- tr.LazyPrintf("Num total move keys: %d. Num pointers: %d", count, pointers)
- tr.LazyPrintf("Number of invalid move keys found: %d", len(result))
- batchSize := 10240
- for i := 0; i < len(result); {
- end := i + batchSize
- if end > len(result) {
- end = len(result)
- }
- if err := db.batchSet(result[i:end]); err != nil {
- if err == ErrTxnTooBig {
- batchSize /= 2
- tr.LazyPrintf("Dropped batch size to %d", batchSize)
- continue
- }
- tr.LazyPrintf("Error while doing batchSet: %v", err)
- tr.SetError()
- return err
- }
- i += batchSize
- }
- tr.LazyPrintf("Move keys deletion done.")
- return nil
-}
-
-func (vlog *valueLog) incrIteratorCount() {
- atomic.AddInt32(&vlog.numActiveIterators, 1)
-}
-
-func (vlog *valueLog) iteratorCount() int {
- return int(atomic.LoadInt32(&vlog.numActiveIterators))
-}
-
-func (vlog *valueLog) decrIteratorCount() error {
- num := atomic.AddInt32(&vlog.numActiveIterators, -1)
- if num != 0 {
- return nil
- }
-
- vlog.filesLock.Lock()
- lfs := make([]*logFile, 0, len(vlog.filesToBeDeleted))
- for _, id := range vlog.filesToBeDeleted {
- lfs = append(lfs, vlog.filesMap[id])
- delete(vlog.filesMap, id)
- }
- vlog.filesToBeDeleted = nil
- vlog.filesLock.Unlock()
-
- for _, lf := range lfs {
- if err := vlog.deleteLogFile(lf); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (vlog *valueLog) deleteLogFile(lf *logFile) error {
- if lf == nil {
- return nil
- }
- lf.lock.Lock()
- defer lf.lock.Unlock()
-
- path := vlog.fpath(lf.fid)
- if err := lf.munmap(); err != nil {
- _ = lf.fd.Close()
- return err
- }
- lf.fmap = nil
- if err := lf.fd.Close(); err != nil {
- return err
- }
- return os.Remove(path)
-}
-
-func (vlog *valueLog) dropAll() (int, error) {
- // If db is opened in InMemory mode, we don't need to do anything since there are no vlog files.
- if vlog.db.opt.InMemory {
- return 0, nil
- }
- // We don't want to block dropAll on any pending transactions. So, don't worry about iterator
- // count.
- var count int
- deleteAll := func() error {
- vlog.filesLock.Lock()
- defer vlog.filesLock.Unlock()
- for _, lf := range vlog.filesMap {
- if err := vlog.deleteLogFile(lf); err != nil {
- return err
- }
- count++
- }
- vlog.filesMap = make(map[uint32]*logFile)
- return nil
- }
- if err := deleteAll(); err != nil {
- return count, err
- }
-
- vlog.db.opt.Infof("Value logs deleted. Creating value log file: 0")
- if _, err := vlog.createVlogFile(0); err != nil { // Called while writes are stopped.
- return count, err
- }
- return count, nil
-}
-
-// lfDiscardStats keeps track of the amount of data that could be discarded for
-// a given logfile.
-type lfDiscardStats struct {
- sync.RWMutex
- m map[uint32]int64
- flushChan chan map[uint32]int64
- closer *y.Closer
- updatesSinceFlush int
-}
-
-type valueLog struct {
- dirPath string
-
- // guards our view of which files exist, which to be deleted, how many active iterators
- filesLock sync.RWMutex
- filesMap map[uint32]*logFile
- maxFid uint32
- filesToBeDeleted []uint32
- // A refcount of iterators -- when this hits zero, we can delete the filesToBeDeleted.
- numActiveIterators int32
-
- db *DB
- writableLogOffset uint32 // read by read, written by write. Must access via atomics.
- numEntriesWritten uint32
- opt Options
-
- garbageCh chan struct{}
- lfDiscardStats *lfDiscardStats
-}
-
-func vlogFilePath(dirPath string, fid uint32) string {
- return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
-}
-
-func (vlog *valueLog) fpath(fid uint32) string {
- return vlogFilePath(vlog.dirPath, fid)
-}
-
-func (vlog *valueLog) populateFilesMap() error {
- vlog.filesMap = make(map[uint32]*logFile)
-
- files, err := ioutil.ReadDir(vlog.dirPath)
- if err != nil {
- return errFile(err, vlog.dirPath, "Unable to open log dir.")
- }
-
- found := make(map[uint64]struct{})
- for _, file := range files {
- if !strings.HasSuffix(file.Name(), ".vlog") {
- continue
- }
- fsz := len(file.Name())
- fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
- if err != nil {
- return errFile(err, file.Name(), "Unable to parse log id.")
- }
- if _, ok := found[fid]; ok {
- return errFile(err, file.Name(), "Duplicate file found. Please delete one.")
- }
- found[fid] = struct{}{}
-
- lf := &logFile{
- fid: uint32(fid),
- path: vlog.fpath(uint32(fid)),
- loadingMode: vlog.opt.ValueLogLoadingMode,
- registry: vlog.db.registry,
- }
- vlog.filesMap[uint32(fid)] = lf
- if vlog.maxFid < uint32(fid) {
- vlog.maxFid = uint32(fid)
- }
- }
- return nil
-}
-
-func (lf *logFile) open(path string, flags uint32) error {
- var err error
- if lf.fd, err = y.OpenExistingFile(path, flags); err != nil {
- return y.Wrapf(err, "Error while opening file in logfile %s", path)
- }
-
- fi, err := lf.fd.Stat()
- if err != nil {
- return errFile(err, lf.path, "Unable to run file.Stat")
- }
- sz := fi.Size()
- y.AssertTruef(
- sz <= math.MaxUint32,
- "file size: %d greater than %d",
- uint32(sz), uint32(math.MaxUint32),
- )
- lf.size = uint32(sz)
- if sz < vlogHeaderSize {
- // Every vlog file should have at least vlogHeaderSize. If it is less than vlogHeaderSize
- // then it must have been corrupted. But no need to handle here. log replayer will truncate
- // and bootstrap the logfile. So ignoring here.
- return nil
- }
- buf := make([]byte, vlogHeaderSize)
- if _, err = lf.fd.Read(buf); err != nil {
- return y.Wrapf(err, "Error while reading vlog file %d", lf.fid)
- }
- keyID := binary.BigEndian.Uint64(buf[:8])
- var dk *pb.DataKey
- // retrieve datakey.
- if dk, err = lf.registry.dataKey(keyID); err != nil {
- return y.Wrapf(err, "While opening vlog file %d", lf.fid)
- }
- lf.dataKey = dk
- lf.baseIV = buf[8:]
- y.AssertTrue(len(lf.baseIV) == 12)
- return nil
-}
-
-// bootstrap will initialize the log file with key id and baseIV.
-// The below figure shows the layout of log file.
-// +----------------+------------------+------------------+
-// | keyID(8 bytes) | baseIV(12 bytes)| entry... |
-// +----------------+------------------+------------------+
-func (lf *logFile) bootstrap() error {
- var err error
- // delete all the data. because bootstrap is been called while creating vlog and as well
- // as replaying log. While replaying log, there may be any data left. So we need to truncate
- // everything.
- if err = lf.fd.Truncate(0); err != nil {
- return y.Wrapf(err, "Error while bootstraping.")
- }
-
- if _, err = lf.fd.Seek(0, io.SeekStart); err != nil {
- return y.Wrapf(err, "Error while SeekStart for the logfile %d in logFile.bootstarp", lf.fid)
- }
- // generate data key for the log file.
- var dk *pb.DataKey
- if dk, err = lf.registry.latestDataKey(); err != nil {
- return y.Wrapf(err, "Error while retrieving datakey in logFile.bootstarp")
- }
- lf.dataKey = dk
- // We'll always preserve vlogHeaderSize for key id and baseIV.
- buf := make([]byte, vlogHeaderSize)
- // write key id to the buf.
- // key id will be zero if the logfile is in plain text.
- binary.BigEndian.PutUint64(buf[:8], lf.keyID())
- // generate base IV. It'll be used with offset of the vptr to encrypt the entry.
- if _, err := cryptorand.Read(buf[8:]); err != nil {
- return y.Wrapf(err, "Error while creating base IV, while creating logfile")
- }
- // Initialize base IV.
- lf.baseIV = buf[8:]
- y.AssertTrue(len(lf.baseIV) == 12)
- // write the key id and base IV to the file.
- _, err = lf.fd.Write(buf)
- return err
-}
-
-func (vlog *valueLog) createVlogFile(fid uint32) (*logFile, error) {
- path := vlog.fpath(fid)
-
- lf := &logFile{
- fid: fid,
- path: path,
- loadingMode: vlog.opt.ValueLogLoadingMode,
- registry: vlog.db.registry,
- }
- // writableLogOffset is only written by write func, by read by Read func.
- // To avoid a race condition, all reads and updates to this variable must be
- // done via atomics.
- var err error
- if lf.fd, err = y.CreateSyncedFile(path, vlog.opt.SyncWrites); err != nil {
- return nil, errFile(err, lf.path, "Create value log file")
- }
-
- removeFile := func() {
- // Remove the file so that we don't get an error when createVlogFile is
- // called for the same fid, again. This could happen if there is an
- // transient error because of which we couldn't create a new file
- // and the second attempt to create the file succeeds.
- y.Check(os.Remove(lf.fd.Name()))
- }
-
- if err = lf.bootstrap(); err != nil {
- removeFile()
- return nil, err
- }
-
- if err = syncDir(vlog.dirPath); err != nil {
- removeFile()
- return nil, errFile(err, vlog.dirPath, "Sync value log dir")
- }
-
- if err = lf.mmap(2 * vlog.opt.ValueLogFileSize); err != nil {
- removeFile()
- return nil, errFile(err, lf.path, "Mmap value log file")
- }
-
- vlog.filesLock.Lock()
- vlog.filesMap[fid] = lf
- vlog.maxFid = fid
- // writableLogOffset is only written by write func, by read by Read func.
- // To avoid a race condition, all reads and updates to this variable must be
- // done via atomics.
- atomic.StoreUint32(&vlog.writableLogOffset, vlogHeaderSize)
- vlog.numEntriesWritten = 0
- vlog.filesLock.Unlock()
-
- return lf, nil
-}
-
-func errFile(err error, path string, msg string) error {
- return fmt.Errorf("%s. Path=%s. Error=%v", msg, path, err)
-}
-
-func (vlog *valueLog) replayLog(lf *logFile, offset uint32, replayFn logEntry) error {
- fi, err := lf.fd.Stat()
- if err != nil {
- return errFile(err, lf.path, "Unable to run file.Stat")
- }
-
- // Alright, let's iterate now.
- endOffset, err := vlog.iterate(lf, offset, replayFn)
- if err != nil {
- return errFile(err, lf.path, "Unable to replay logfile")
- }
- if int64(endOffset) == fi.Size() {
- return nil
- }
-
- // End offset is different from file size. So, we should truncate the file
- // to that size.
- if !vlog.opt.Truncate {
- vlog.db.opt.Warningf("Truncate Needed. File %s size: %d Endoffset: %d",
- lf.fd.Name(), fi.Size(), endOffset)
- return ErrTruncateNeeded
- }
-
- // The entire file should be truncated (i.e. it should be deleted).
- // If fid == maxFid then it's okay to truncate the entire file since it will be
- // used for future additions. Also, it's okay if the last file has size zero.
- // We mmap 2*opt.ValueLogSize for the last file. See vlog.Open() function
- // if endOffset <= vlogHeaderSize && lf.fid != vlog.maxFid {
-
- if endOffset <= vlogHeaderSize {
- if lf.fid != vlog.maxFid {
- return errDeleteVlogFile
- }
- return lf.bootstrap()
- }
-
- vlog.db.opt.Infof("Truncating vlog file %s to offset: %d", lf.fd.Name(), endOffset)
- if err := lf.fd.Truncate(int64(endOffset)); err != nil {
- return errFile(err, lf.path, fmt.Sprintf(
- "Truncation needed at offset %d. Can be done manually as well.", endOffset))
- }
- return nil
-}
-
-// init initializes the value log struct. This initialization needs to happen
-// before compactions start.
-func (vlog *valueLog) init(db *DB) {
- vlog.opt = db.opt
- vlog.db = db
- // We don't need to open any vlog files or collect stats for GC if DB is opened
- // in InMemory mode. InMemory mode doesn't create any files/directories on disk.
- if vlog.opt.InMemory {
- return
- }
- vlog.dirPath = vlog.opt.ValueDir
-
- vlog.garbageCh = make(chan struct{}, 1) // Only allow one GC at a time.
- vlog.lfDiscardStats = &lfDiscardStats{
- m: make(map[uint32]int64),
- closer: y.NewCloser(1),
- flushChan: make(chan map[uint32]int64, 16),
- }
-}
-
-func (vlog *valueLog) open(db *DB, ptr valuePointer, replayFn logEntry) error {
- // We don't need to open any vlog files or collect stats for GC if DB is opened
- // in InMemory mode. InMemory mode doesn't create any files/directories on disk.
- if db.opt.InMemory {
- return nil
- }
-
- go vlog.flushDiscardStats()
- if err := vlog.populateFilesMap(); err != nil {
- return err
- }
- // If no files are found, then create a new file.
- if len(vlog.filesMap) == 0 {
- _, err := vlog.createVlogFile(0)
- return y.Wrapf(err, "Error while creating log file in valueLog.open")
- }
- fids := vlog.sortedFids()
- for _, fid := range fids {
- lf, ok := vlog.filesMap[fid]
- y.AssertTrue(ok)
- var flags uint32
- switch {
- case vlog.opt.ReadOnly:
- // If we have read only, we don't need SyncWrites.
- flags |= y.ReadOnly
- // Set sync flag.
- case vlog.opt.SyncWrites:
- flags |= y.Sync
- }
-
- // We cannot mmap the files upfront here. Windows does not like mmapped files to be
- // truncated. We might need to truncate files during a replay.
- var err error
- if err = lf.open(vlog.fpath(fid), flags); err != nil {
- return errors.Wrapf(err, "Open existing file: %q", lf.path)
- }
-
- // This file is before the value head pointer. So, we don't need to
- // replay it, and can just open it in readonly mode.
- if fid < ptr.Fid {
- // Mmap the file here, we don't need to replay it.
- if err := lf.init(); err != nil {
- return err
- }
- continue
- }
-
- var offset uint32
- if fid == ptr.Fid {
- offset = ptr.Offset + ptr.Len
- }
- vlog.db.opt.Infof("Replaying file id: %d at offset: %d\n", fid, offset)
- now := time.Now()
- // Replay and possible truncation done. Now we can open the file as per
- // user specified options.
- if err := vlog.replayLog(lf, offset, replayFn); err != nil {
- // Log file is corrupted. Delete it.
- if err == errDeleteVlogFile {
- delete(vlog.filesMap, fid)
- // Close the fd of the file before deleting the file otherwise windows complaints.
- if err := lf.fd.Close(); err != nil {
- return errors.Wrapf(err, "failed to close vlog file %s", lf.fd.Name())
- }
- path := vlog.fpath(lf.fid)
- if err := os.Remove(path); err != nil {
- return y.Wrapf(err, "failed to delete empty value log file: %q", path)
- }
- continue
- }
- return err
- }
- vlog.db.opt.Infof("Replay took: %s\n", time.Since(now))
-
- if fid < vlog.maxFid {
- // This file has been replayed. It can now be mmapped.
- // For maxFid, the mmap would be done by the specially written code below.
- if err := lf.init(); err != nil {
- return err
- }
- }
- }
- // Seek to the end to start writing.
- last, ok := vlog.filesMap[vlog.maxFid]
- y.AssertTrue(ok)
- // We'll create a new vlog if the last vlog is encrypted and db is opened in
- // plain text mode or vice versa. A single vlog file can't have both
- // encrypted entries and plain text entries.
- if last.encryptionEnabled() != vlog.db.shouldEncrypt() {
- newid := vlog.maxFid + 1
- _, err := vlog.createVlogFile(newid)
- if err != nil {
- return y.Wrapf(err, "Error while creating log file %d in valueLog.open", newid)
- }
- last, ok = vlog.filesMap[newid]
- y.AssertTrue(ok)
- }
- lastOffset, err := last.fd.Seek(0, io.SeekEnd)
- if err != nil {
- return errFile(err, last.path, "file.Seek to end")
- }
- vlog.writableLogOffset = uint32(lastOffset)
-
- // Update the head to point to the updated tail. Otherwise, even after doing a successful
- // replay and closing the DB, the value log head does not get updated, which causes the replay
- // to happen repeatedly.
- vlog.db.vhead = valuePointer{Fid: vlog.maxFid, Offset: uint32(lastOffset)}
-
- // Map the file if needed. When we create a file, it is automatically mapped.
- if err = last.mmap(2 * vlog.opt.ValueLogFileSize); err != nil {
- return errFile(err, last.path, "Map log file")
- }
- if err := vlog.populateDiscardStats(); err != nil {
- // Print the error and continue. We don't want to prevent value log open if there's an error
- // with the fetching discards stats.
- db.opt.Errorf("Failed to populate discard stats: %s", err)
- }
- return nil
-}
-
-func (lf *logFile) init() error {
- fstat, err := lf.fd.Stat()
- if err != nil {
- return errors.Wrapf(err, "Unable to check stat for %q", lf.path)
- }
- sz := fstat.Size()
- if sz == 0 {
- // File is empty. We don't need to mmap it. Return.
- return nil
- }
- y.AssertTrue(sz <= math.MaxUint32)
- lf.size = uint32(sz)
- if err = lf.mmap(sz); err != nil {
- _ = lf.fd.Close()
- return errors.Wrapf(err, "Unable to map file: %q", fstat.Name())
- }
- return nil
-}
-
-func (vlog *valueLog) stopFlushDiscardStats() {
- if vlog.lfDiscardStats != nil {
- vlog.lfDiscardStats.closer.Signal()
- }
-}
-
-func (vlog *valueLog) Close() error {
- if vlog == nil || vlog.db == nil || vlog.db.opt.InMemory {
- return nil
- }
- // close flushDiscardStats.
- vlog.lfDiscardStats.closer.SignalAndWait()
-
- vlog.opt.Debugf("Stopping garbage collection of values.")
-
- var err error
- for id, f := range vlog.filesMap {
- f.lock.Lock() // We won’t release the lock.
- if munmapErr := f.munmap(); munmapErr != nil && err == nil {
- err = munmapErr
- }
-
- maxFid := vlog.maxFid
- // TODO(ibrahim) - Do we need the following truncations on non-windows
- // platforms? We expand the file only on windows and the vlog.woffset()
- // should point to end of file on all other platforms.
- if !vlog.opt.ReadOnly && id == maxFid {
- // truncate writable log file to correct offset.
- if truncErr := f.fd.Truncate(
- int64(vlog.woffset())); truncErr != nil && err == nil {
- err = truncErr
- }
- }
-
- if closeErr := f.fd.Close(); closeErr != nil && err == nil {
- err = closeErr
- }
- }
- return err
-}
-
-// sortedFids returns the file id's not pending deletion, sorted. Assumes we have shared access to
-// filesMap.
-func (vlog *valueLog) sortedFids() []uint32 {
- toBeDeleted := make(map[uint32]struct{})
- for _, fid := range vlog.filesToBeDeleted {
- toBeDeleted[fid] = struct{}{}
- }
- ret := make([]uint32, 0, len(vlog.filesMap))
- for fid := range vlog.filesMap {
- if _, ok := toBeDeleted[fid]; !ok {
- ret = append(ret, fid)
- }
- }
- sort.Slice(ret, func(i, j int) bool {
- return ret[i] < ret[j]
- })
- return ret
-}
-
-type request struct {
- // Input values
- Entries []*Entry
- // Output values and wait group stuff below
- Ptrs []valuePointer
- Wg sync.WaitGroup
- Err error
- ref int32
-}
-
-func (req *request) reset() {
- req.Entries = req.Entries[:0]
- req.Ptrs = req.Ptrs[:0]
- req.Wg = sync.WaitGroup{}
- req.Err = nil
- req.ref = 0
-}
-
-func (req *request) IncrRef() {
- atomic.AddInt32(&req.ref, 1)
-}
-
-func (req *request) DecrRef() {
- nRef := atomic.AddInt32(&req.ref, -1)
- if nRef > 0 {
- return
- }
- req.Entries = nil
- requestPool.Put(req)
-}
-
-func (req *request) Wait() error {
- req.Wg.Wait()
- err := req.Err
- req.DecrRef() // DecrRef after writing to DB.
- return err
-}
-
-type requests []*request
-
-func (reqs requests) DecrRef() {
- for _, req := range reqs {
- req.DecrRef()
- }
-}
-
-func (reqs requests) IncrRef() {
- for _, req := range reqs {
- req.IncrRef()
- }
-}
-
-// sync function syncs content of latest value log file to disk. Syncing of value log directory is
-// not required here as it happens every time a value log file rotation happens(check createVlogFile
-// function). During rotation, previous value log file also gets synced to disk. It only syncs file
-// if fid >= vlog.maxFid. In some cases such as replay(while opening db), it might be called with
-// fid < vlog.maxFid. To sync irrespective of file id just call it with math.MaxUint32.
-func (vlog *valueLog) sync(fid uint32) error {
- if vlog.opt.SyncWrites || vlog.opt.InMemory {
- return nil
- }
-
- vlog.filesLock.RLock()
- maxFid := vlog.maxFid
- // During replay it is possible to get sync call with fid less than maxFid.
- // Because older file has already been synced, we can return from here.
- if fid < maxFid || len(vlog.filesMap) == 0 {
- vlog.filesLock.RUnlock()
- return nil
- }
- curlf := vlog.filesMap[maxFid]
- // Sometimes it is possible that vlog.maxFid has been increased but file creation
- // with same id is still in progress and this function is called. In those cases
- // entry for the file might not be present in vlog.filesMap.
- if curlf == nil {
- vlog.filesLock.RUnlock()
- return nil
- }
- curlf.lock.RLock()
- vlog.filesLock.RUnlock()
-
- err := curlf.sync()
- curlf.lock.RUnlock()
- return err
-}
-
-func (vlog *valueLog) woffset() uint32 {
- return atomic.LoadUint32(&vlog.writableLogOffset)
-}
-
-// validateWrites will check whether the given requests can fit into 4GB vlog file.
-// NOTE: 4GB is the maximum size we can create for vlog because value pointer offset is of type
-// uint32. If we create more than 4GB, it will overflow uint32. So, limiting the size to 4GB.
-func (vlog *valueLog) validateWrites(reqs []*request) error {
- vlogOffset := uint64(vlog.woffset())
- for _, req := range reqs {
- // calculate size of the request.
- size := estimateRequestSize(req)
- estimatedVlogOffset := vlogOffset + size
- if estimatedVlogOffset > uint64(maxVlogFileSize) {
- return errors.Errorf("Request size offset %d is bigger than maximum offset %d",
- estimatedVlogOffset, maxVlogFileSize)
- }
-
- if estimatedVlogOffset >= uint64(vlog.opt.ValueLogFileSize) {
- // We'll create a new vlog file if the estimated offset is greater or equal to
- // max vlog size. So, resetting the vlogOffset.
- vlogOffset = 0
- continue
- }
- // Estimated vlog offset will become current vlog offset if the vlog is not rotated.
- vlogOffset = estimatedVlogOffset
- }
- return nil
-}
-
-// estimateRequestSize returns the size that needed to be written for the given request.
-func estimateRequestSize(req *request) uint64 {
- size := uint64(0)
- for _, e := range req.Entries {
- size += uint64(maxHeaderSize + len(e.Key) + len(e.Value) + crc32.Size)
- }
- return size
-}
-
-// write is thread-unsafe by design and should not be called concurrently.
-func (vlog *valueLog) write(reqs []*request) error {
- if vlog.db.opt.InMemory {
- return nil
- }
- // Validate writes before writing to vlog. Because, we don't want to partially write and return
- // an error.
- if err := vlog.validateWrites(reqs); err != nil {
- return err
- }
-
- vlog.filesLock.RLock()
- maxFid := vlog.maxFid
- curlf, ok := vlog.filesMap[maxFid]
- if !ok {
- var fids []uint32
- for fid := range vlog.filesMap {
- fids = append(fids, fid)
- }
- return errors.Errorf("Cannot find MaxFid: %d in filesMap: %+v", maxFid, fids)
- }
- vlog.filesLock.RUnlock()
-
- var buf bytes.Buffer
- flushWrites := func() error {
- if buf.Len() == 0 {
- return nil
- }
- vlog.opt.Debugf("Flushing buffer of size %d to vlog", buf.Len())
- n, err := curlf.fd.Write(buf.Bytes())
- if err != nil {
- return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path)
- }
- buf.Reset()
- y.NumWrites.Add(1)
- y.NumBytesWritten.Add(int64(n))
- vlog.opt.Debugf("Done")
- atomic.AddUint32(&vlog.writableLogOffset, uint32(n))
- atomic.StoreUint32(&curlf.size, vlog.writableLogOffset)
- return nil
- }
- toDisk := func() error {
- if err := flushWrites(); err != nil {
- return err
- }
- if vlog.woffset() > uint32(vlog.opt.ValueLogFileSize) ||
- vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries {
- if err := curlf.doneWriting(vlog.woffset()); err != nil {
- return err
- }
-
- newid := vlog.maxFid + 1
- y.AssertTruef(newid > 0, "newid has overflown uint32: %v", newid)
- newlf, err := vlog.createVlogFile(newid)
- if err != nil {
- return err
- }
- curlf = newlf
- atomic.AddInt32(&vlog.db.logRotates, 1)
- }
- return nil
- }
- for i := range reqs {
- b := reqs[i]
- b.Ptrs = b.Ptrs[:0]
- var written int
- for j := range b.Entries {
- e := b.Entries[j]
- if e.skipVlog {
- b.Ptrs = append(b.Ptrs, valuePointer{})
- continue
- }
- var p valuePointer
-
- p.Fid = curlf.fid
- // Use the offset including buffer length so far.
- p.Offset = vlog.woffset() + uint32(buf.Len())
- plen, err := curlf.encodeEntry(e, &buf, p.Offset) // Now encode the entry into buffer.
- if err != nil {
- return err
- }
- p.Len = uint32(plen)
- b.Ptrs = append(b.Ptrs, p)
- written++
-
- // It is possible that the size of the buffer grows beyond the max size of the value
- // log (this happens when a transaction contains entries with large value sizes) and
- // badger might run into out of memory errors. We flush the buffer here if it's size
- // grows beyond the max value log size.
- if int64(buf.Len()) > vlog.db.opt.ValueLogFileSize {
- if err := flushWrites(); err != nil {
- return err
- }
- }
- }
- vlog.numEntriesWritten += uint32(written)
- // We write to disk here so that all entries that are part of the same transaction are
- // written to the same vlog file.
- writeNow :=
- vlog.woffset()+uint32(buf.Len()) > uint32(vlog.opt.ValueLogFileSize) ||
- vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries)
- if writeNow {
- if err := toDisk(); err != nil {
- return err
- }
- }
- }
- return toDisk()
-}
-
-// Gets the logFile and acquires and RLock() for the mmap. You must call RUnlock on the file
-// (if non-nil)
-func (vlog *valueLog) getFileRLocked(vp valuePointer) (*logFile, error) {
- vlog.filesLock.RLock()
- defer vlog.filesLock.RUnlock()
- ret, ok := vlog.filesMap[vp.Fid]
- if !ok {
- // log file has gone away, will need to retry the operation.
- return nil, ErrRetry
- }
-
- // Check for valid offset if we are reading from writable log.
- maxFid := vlog.maxFid
- if vp.Fid == maxFid {
- currentOffset := vlog.woffset()
- if vp.Offset >= currentOffset {
- return nil, errors.Errorf(
- "Invalid value pointer offset: %d greater than current offset: %d",
- vp.Offset, currentOffset)
- }
- }
-
- ret.lock.RLock()
- return ret, nil
-}
-
-// Read reads the value log at a given location.
-// TODO: Make this read private.
-func (vlog *valueLog) Read(vp valuePointer, s *y.Slice) ([]byte, func(), error) {
- buf, lf, err := vlog.readValueBytes(vp, s)
- // log file is locked so, decide whether to lock immediately or let the caller to
- // unlock it, after caller uses it.
- cb := vlog.getUnlockCallback(lf)
- if err != nil {
- return nil, cb, err
- }
-
- if vlog.opt.VerifyValueChecksum {
- hash := crc32.New(y.CastagnoliCrcTable)
- if _, err := hash.Write(buf[:len(buf)-crc32.Size]); err != nil {
- runCallback(cb)
- return nil, nil, errors.Wrapf(err, "failed to write hash for vp %+v", vp)
- }
- // Fetch checksum from the end of the buffer.
- checksum := buf[len(buf)-crc32.Size:]
- if hash.Sum32() != y.BytesToU32(checksum) {
- runCallback(cb)
- return nil, nil, errors.Wrapf(y.ErrChecksumMismatch, "value corrupted for vp: %+v", vp)
- }
- }
- var h header
- headerLen := h.Decode(buf)
- kv := buf[headerLen:]
- if lf.encryptionEnabled() {
- kv, err = lf.decryptKV(kv, vp.Offset)
- if err != nil {
- return nil, cb, err
- }
- }
- if uint32(len(kv)) < h.klen+h.vlen {
- vlog.db.opt.Logger.Errorf("Invalid read: vp: %+v", vp)
- return nil, nil, errors.Errorf("Invalid read: Len: %d read at:[%d:%d]",
- len(kv), h.klen, h.klen+h.vlen)
- }
- return kv[h.klen : h.klen+h.vlen], cb, nil
-}
-
-// getUnlockCallback will returns a function which unlock the logfile if the logfile is mmaped.
-// otherwise, it unlock the logfile and return nil.
-func (vlog *valueLog) getUnlockCallback(lf *logFile) func() {
- if lf == nil {
- return nil
- }
- if vlog.opt.ValueLogLoadingMode == options.MemoryMap {
- return lf.lock.RUnlock
- }
- lf.lock.RUnlock()
- return nil
-}
-
-// readValueBytes return vlog entry slice and read locked log file. Caller should take care of
-// logFile unlocking.
-func (vlog *valueLog) readValueBytes(vp valuePointer, s *y.Slice) ([]byte, *logFile, error) {
- lf, err := vlog.getFileRLocked(vp)
- if err != nil {
- return nil, nil, err
- }
-
- buf, err := lf.read(vp, s)
- return buf, lf, err
-}
-
-func (vlog *valueLog) pickLog(head valuePointer, tr trace.Trace) (files []*logFile) {
- vlog.filesLock.RLock()
- defer vlog.filesLock.RUnlock()
- fids := vlog.sortedFids()
- switch {
- case len(fids) <= 1:
- tr.LazyPrintf("Only one or less value log file.")
- return nil
- case head.Fid == 0:
- tr.LazyPrintf("Head pointer is at zero.")
- return nil
- }
-
- // Pick a candidate that contains the largest amount of discardable data
- candidate := struct {
- fid uint32
- discard int64
- }{math.MaxUint32, 0}
- vlog.lfDiscardStats.RLock()
- for _, fid := range fids {
- if fid >= head.Fid {
- break
- }
- if vlog.lfDiscardStats.m[fid] > candidate.discard {
- candidate.fid = fid
- candidate.discard = vlog.lfDiscardStats.m[fid]
- }
- }
- vlog.lfDiscardStats.RUnlock()
-
- if candidate.fid != math.MaxUint32 { // Found a candidate
- tr.LazyPrintf("Found candidate via discard stats: %v", candidate)
- files = append(files, vlog.filesMap[candidate.fid])
- } else {
- tr.LazyPrintf("Could not find candidate via discard stats. Randomly picking one.")
- }
-
- // Fallback to randomly picking a log file
- var idxHead int
- for i, fid := range fids {
- if fid == head.Fid {
- idxHead = i
- break
- }
- }
- if idxHead == 0 { // Not found or first file
- tr.LazyPrintf("Could not find any file.")
- return nil
- }
- idx := rand.Intn(idxHead) // Don’t include head.Fid. We pick a random file before it.
- if idx > 0 {
- idx = rand.Intn(idx + 1) // Another level of rand to favor smaller fids.
- }
- tr.LazyPrintf("Randomly chose fid: %d", fids[idx])
- files = append(files, vlog.filesMap[fids[idx]])
- return files
-}
-
-func discardEntry(e Entry, vs y.ValueStruct, db *DB) bool {
- if vs.Version != y.ParseTs(e.Key) {
- // Version not found. Discard.
- return true
- }
- if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
- return true
- }
- if (vs.Meta & bitValuePointer) == 0 {
- // Key also stores the value in LSM. Discard.
- return true
- }
- if (vs.Meta & bitFinTxn) > 0 {
- // Just a txn finish entry. Discard.
- return true
- }
- if bytes.HasPrefix(e.Key, badgerMove) {
- // Verify the actual key entry without the badgerPrefix has not been deleted.
- // If this is not done the badgerMove entry will be kept forever moving from
- // vlog to vlog during rewrites.
- avs, err := db.get(e.Key[len(badgerMove):])
- if err != nil {
- return false
- }
- return avs.Version == 0
- }
- return false
-}
-
-func (vlog *valueLog) doRunGC(lf *logFile, discardRatio float64, tr trace.Trace) (err error) {
- // Update stats before exiting
- defer func() {
- if err == nil {
- vlog.lfDiscardStats.Lock()
- delete(vlog.lfDiscardStats.m, lf.fid)
- vlog.lfDiscardStats.Unlock()
- }
- }()
-
- type reason struct {
- total float64
- discard float64
- count int
- }
-
- fi, err := lf.fd.Stat()
- if err != nil {
- tr.LazyPrintf("Error while finding file size: %v", err)
- tr.SetError()
- return err
- }
-
- // Set up the sampling window sizes.
- sizeWindow := float64(fi.Size()) * 0.1 // 10% of the file as window.
- sizeWindowM := sizeWindow / (1 << 20) // in MBs.
- countWindow := int(float64(vlog.opt.ValueLogMaxEntries) * 0.01) // 1% of num entries.
- tr.LazyPrintf("Size window: %5.2f. Count window: %d.", sizeWindow, countWindow)
-
- // Pick a random start point for the log.
- skipFirstM := float64(rand.Int63n(fi.Size())) // Pick a random starting location.
- skipFirstM -= sizeWindow // Avoid hitting EOF by moving back by window.
- skipFirstM /= float64(mi) // Convert to MBs.
- tr.LazyPrintf("Skip first %5.2f MB of file of size: %d MB", skipFirstM, fi.Size()/mi)
- var skipped float64
-
- var r reason
- start := time.Now()
- y.AssertTrue(vlog.db != nil)
- s := new(y.Slice)
- var numIterations int
- _, err = vlog.iterate(lf, 0, func(e Entry, vp valuePointer) error {
- numIterations++
- esz := float64(vp.Len) / (1 << 20) // in MBs.
- if skipped < skipFirstM {
- skipped += esz
- return nil
- }
-
- // Sample until we reach the window sizes or exceed 10 seconds.
- if r.count > countWindow {
- tr.LazyPrintf("Stopping sampling after %d entries.", countWindow)
- return errStop
- }
- if r.total > sizeWindowM {
- tr.LazyPrintf("Stopping sampling after reaching window size.")
- return errStop
- }
- if time.Since(start) > 10*time.Second {
- tr.LazyPrintf("Stopping sampling after 10 seconds.")
- return errStop
- }
- r.total += esz
- r.count++
-
- vs, err := vlog.db.get(e.Key)
- if err != nil {
- return err
- }
- if discardEntry(e, vs, vlog.db) {
- r.discard += esz
- return nil
- }
-
- // Value is still present in value log.
- y.AssertTrue(len(vs.Value) > 0)
- vp.Decode(vs.Value)
-
- if vp.Fid > lf.fid {
- // Value is present in a later log. Discard.
- r.discard += esz
- return nil
- }
- if vp.Offset > e.offset {
- // Value is present in a later offset, but in the same log.
- r.discard += esz
- return nil
- }
- if vp.Fid == lf.fid && vp.Offset == e.offset {
- // This is still the active entry. This would need to be rewritten.
-
- } else {
- vlog.opt.Debugf("Reason=%+v\n", r)
- buf, lf, err := vlog.readValueBytes(vp, s)
- // we need to decide, whether to unlock the lock file immediately based on the
- // loading mode. getUnlockCallback will take care of it.
- cb := vlog.getUnlockCallback(lf)
- if err != nil {
- runCallback(cb)
- return errStop
- }
- ne, err := lf.decodeEntry(buf, vp.Offset)
- if err != nil {
- runCallback(cb)
- return errStop
- }
- ne.print("Latest Entry Header in LSM")
- e.print("Latest Entry in Log")
- runCallback(cb)
- return errors.Errorf("This shouldn't happen. Latest Pointer:%+v. Meta:%v.",
- vp, vs.Meta)
- }
- return nil
- })
-
- if err != nil {
- tr.LazyPrintf("Error while iterating for RunGC: %v", err)
- tr.SetError()
- return err
- }
- tr.LazyPrintf("Fid: %d. Skipped: %5.2fMB Num iterations: %d. Data status=%+v\n",
- lf.fid, skipped, numIterations, r)
-
- // If we couldn't sample at least a 1000 KV pairs or at least 75% of the window size,
- // and what we can discard is below the threshold, we should skip the rewrite.
- if (r.count < countWindow && r.total < sizeWindowM*0.75) || r.discard < discardRatio*r.total {
- tr.LazyPrintf("Skipping GC on fid: %d", lf.fid)
- return ErrNoRewrite
- }
- if err = vlog.rewrite(lf, tr); err != nil {
- return err
- }
- tr.LazyPrintf("Done rewriting.")
- return nil
-}
-
-func (vlog *valueLog) waitOnGC(lc *y.Closer) {
- defer lc.Done()
-
- <-lc.HasBeenClosed() // Wait for lc to be closed.
-
- // Block any GC in progress to finish, and don't allow any more writes to runGC by filling up
- // the channel of size 1.
- vlog.garbageCh <- struct{}{}
-}
-
-func (vlog *valueLog) runGC(discardRatio float64, head valuePointer) error {
- select {
- case vlog.garbageCh <- struct{}{}:
- // Pick a log file for GC.
- tr := trace.New("Badger.ValueLog", "GC")
- tr.SetMaxEvents(100)
- defer func() {
- tr.Finish()
- <-vlog.garbageCh
- }()
-
- var err error
- files := vlog.pickLog(head, tr)
- if len(files) == 0 {
- tr.LazyPrintf("PickLog returned zero results.")
- return ErrNoRewrite
- }
- tried := make(map[uint32]bool)
- for _, lf := range files {
- if _, done := tried[lf.fid]; done {
- continue
- }
- tried[lf.fid] = true
- err = vlog.doRunGC(lf, discardRatio, tr)
- if err == nil {
- return vlog.deleteMoveKeysFor(lf.fid, tr)
- }
- }
- return err
- default:
- return ErrRejected
- }
-}
-
-func (vlog *valueLog) updateDiscardStats(stats map[uint32]int64) {
- if vlog.opt.InMemory {
- return
- }
-
- select {
- case vlog.lfDiscardStats.flushChan <- stats:
- default:
- vlog.opt.Warningf("updateDiscardStats called: discard stats flushChan full, " +
- "returning without pushing to flushChan")
- }
-}
-
-func (vlog *valueLog) flushDiscardStats() {
- defer vlog.lfDiscardStats.closer.Done()
-
- mergeStats := func(stats map[uint32]int64) ([]byte, error) {
- vlog.lfDiscardStats.Lock()
- defer vlog.lfDiscardStats.Unlock()
- for fid, count := range stats {
- vlog.lfDiscardStats.m[fid] += count
- vlog.lfDiscardStats.updatesSinceFlush++
- }
-
- if vlog.lfDiscardStats.updatesSinceFlush > discardStatsFlushThreshold {
- encodedDS, err := json.Marshal(vlog.lfDiscardStats.m)
- if err != nil {
- return nil, err
- }
- vlog.lfDiscardStats.updatesSinceFlush = 0
- return encodedDS, nil
- }
- return nil, nil
- }
-
- process := func(stats map[uint32]int64) error {
- encodedDS, err := mergeStats(stats)
- if err != nil || encodedDS == nil {
- return err
- }
-
- entries := []*Entry{{
- Key: y.KeyWithTs(lfDiscardStatsKey, 1),
- Value: encodedDS,
- }}
- req, err := vlog.db.sendToWriteCh(entries)
- // No special handling of ErrBlockedWrites is required as err is just logged in
- // for loop below.
- if err != nil {
- return errors.Wrapf(err, "failed to push discard stats to write channel")
- }
- return req.Wait()
- }
-
- closer := vlog.lfDiscardStats.closer
- for {
- select {
- case <-closer.HasBeenClosed():
- // For simplicity just return without processing already present in stats in flushChan.
- return
- case stats := <-vlog.lfDiscardStats.flushChan:
- if err := process(stats); err != nil {
- vlog.opt.Errorf("unable to process discardstats with error: %s", err)
- }
- }
- }
-}
-
-// populateDiscardStats populates vlog.lfDiscardStats.
-// This function will be called while initializing valueLog.
-func (vlog *valueLog) populateDiscardStats() error {
- key := y.KeyWithTs(lfDiscardStatsKey, math.MaxUint64)
- var statsMap map[uint32]int64
- var val []byte
- var vp valuePointer
- for {
- vs, err := vlog.db.get(key)
- if err != nil {
- return err
- }
- // Value doesn't exist.
- if vs.Meta == 0 && len(vs.Value) == 0 {
- vlog.opt.Debugf("Value log discard stats empty")
- return nil
- }
- vp.Decode(vs.Value)
- // Entry stored in LSM tree.
- if vs.Meta&bitValuePointer == 0 {
- val = y.SafeCopy(val, vs.Value)
- break
- }
- // Read entry from value log.
- result, cb, err := vlog.Read(vp, new(y.Slice))
- runCallback(cb)
- val = y.SafeCopy(val, result)
- // The result is stored in val. We can break the loop from here.
- if err == nil {
- break
- }
- if err != ErrRetry {
- return err
- }
- // If we're at this point it means we haven't found the value yet and if the current key has
- // badger move prefix, we should break from here since we've already tried the original key
- // and the key with move prefix. "val" would be empty since we haven't found the value yet.
- if bytes.HasPrefix(key, badgerMove) {
- break
- }
- // If we're at this point it means the discard stats key was moved by the GC and the actual
- // entry is the one prefixed by badger move key.
- // Prepend existing key with badger move and search for the key.
- key = append(badgerMove, key...)
- }
-
- if len(val) == 0 {
- return nil
- }
- if err := json.Unmarshal(val, &statsMap); err != nil {
- return errors.Wrapf(err, "failed to unmarshal discard stats")
- }
- vlog.opt.Debugf("Value Log Discard stats: %v", statsMap)
- vlog.lfDiscardStats.flushChan <- statsMap
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/checksum.go b/vendor/github.com/dgraph-io/badger/v2/y/checksum.go
deleted file mode 100644
index ab202484..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/checksum.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "hash/crc32"
-
- "github.com/dgraph-io/badger/v2/pb"
-
- "github.com/cespare/xxhash"
- "github.com/pkg/errors"
-)
-
-// ErrChecksumMismatch is returned at checksum mismatch.
-var ErrChecksumMismatch = errors.New("checksum mismatch")
-
-// CalculateChecksum calculates checksum for data using ct checksum type.
-func CalculateChecksum(data []byte, ct pb.Checksum_Algorithm) uint64 {
- switch ct {
- case pb.Checksum_CRC32C:
- return uint64(crc32.Checksum(data, CastagnoliCrcTable))
- case pb.Checksum_XXHash64:
- return xxhash.Sum64(data)
- default:
- panic("checksum type not supported")
- }
-}
-
-// VerifyChecksum validates the checksum for the data against the given expected checksum.
-func VerifyChecksum(data []byte, expected *pb.Checksum) error {
- actual := CalculateChecksum(data, expected.Algo)
- if actual != expected.Sum {
- return Wrapf(ErrChecksumMismatch, "actual: %d, expected: %d", actual, expected.Sum)
- }
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/encrypt.go b/vendor/github.com/dgraph-io/badger/v2/y/encrypt.go
deleted file mode 100644
index dbfe019f..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/encrypt.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "crypto/aes"
- "crypto/cipher"
- "crypto/rand"
-)
-
-// XORBlock encrypts the given data with AES and XOR's with IV.
-// Can be used for both encryption and decryption. IV is of
-// AES block size.
-func XORBlock(src, key, iv []byte) ([]byte, error) {
- block, err := aes.NewCipher(key)
- if err != nil {
- return nil, err
- }
- stream := cipher.NewCTR(block, iv)
- dst := make([]byte, len(src))
- stream.XORKeyStream(dst, src)
- return dst, nil
-}
-
-// GenerateIV generates IV.
-func GenerateIV() ([]byte, error) {
- iv := make([]byte, aes.BlockSize)
- _, err := rand.Read(iv)
- return iv, err
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/error.go b/vendor/github.com/dgraph-io/badger/v2/y/error.go
deleted file mode 100644
index 59bb2835..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/error.go
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-// This file contains some functions for error handling. Note that we are moving
-// towards using x.Trace, i.e., rpc tracing using net/tracer. But for now, these
-// functions are useful for simple checks logged on one machine.
-// Some common use cases are:
-// (1) You receive an error from external lib, and would like to check/log fatal.
-// For this, use x.Check, x.Checkf. These will check for err != nil, which is
-// more common in Go. If you want to check for boolean being true, use
-// x.Assert, x.Assertf.
-// (2) You receive an error from external lib, and would like to pass on with some
-// stack trace information. In this case, use x.Wrap or x.Wrapf.
-// (3) You want to generate a new error with stack trace info. Use x.Errorf.
-
-import (
- "fmt"
- "log"
-
- "github.com/pkg/errors"
-)
-
-var debugMode = true
-
-// Check logs fatal if err != nil.
-func Check(err error) {
- if err != nil {
- log.Fatalf("%+v", Wrap(err))
- }
-}
-
-// Check2 acts as convenience wrapper around Check, using the 2nd argument as error.
-func Check2(_ interface{}, err error) {
- Check(err)
-}
-
-// AssertTrue asserts that b is true. Otherwise, it would log fatal.
-func AssertTrue(b bool) {
- if !b {
- log.Fatalf("%+v", errors.Errorf("Assert failed"))
- }
-}
-
-// AssertTruef is AssertTrue with extra info.
-func AssertTruef(b bool, format string, args ...interface{}) {
- if !b {
- log.Fatalf("%+v", errors.Errorf(format, args...))
- }
-}
-
-// Wrap wraps errors from external lib.
-func Wrap(err error) error {
- if !debugMode {
- return err
- }
- return errors.Wrap(err, "")
-}
-
-// Wrapf is Wrap with extra info.
-func Wrapf(err error, format string, args ...interface{}) error {
- if !debugMode {
- if err == nil {
- return nil
- }
- return fmt.Errorf(format+" error: %+v", append(args, err)...)
- }
- return errors.Wrapf(err, format, args...)
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/event_log.go b/vendor/github.com/dgraph-io/badger/v2/y/event_log.go
deleted file mode 100644
index ba9dcb1f..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/event_log.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "golang.org/x/net/trace"
-
-var (
- NoEventLog trace.EventLog = nilEventLog{}
-)
-
-type nilEventLog struct{}
-
-func (nel nilEventLog) Printf(format string, a ...interface{}) {}
-
-func (nel nilEventLog) Errorf(format string, a ...interface{}) {}
-
-func (nel nilEventLog) Finish() {}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/file_dsync.go b/vendor/github.com/dgraph-io/badger/v2/y/file_dsync.go
deleted file mode 100644
index ea4d9ab2..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/file_dsync.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// +build !dragonfly,!freebsd,!windows,!plan9
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "golang.org/x/sys/unix"
-
-func init() {
- datasyncFileFlag = unix.O_DSYNC
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/file_nodsync.go b/vendor/github.com/dgraph-io/badger/v2/y/file_nodsync.go
deleted file mode 100644
index 54a2184e..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/file_nodsync.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// +build dragonfly freebsd windows plan9
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "syscall"
-
-func init() {
- datasyncFileFlag = syscall.O_SYNC
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/iterator.go b/vendor/github.com/dgraph-io/badger/v2/y/iterator.go
deleted file mode 100644
index 6d0f677c..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/iterator.go
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "bytes"
- "encoding/binary"
-)
-
-// ValueStruct represents the value info that can be associated with a key, but also the internal
-// Meta field.
-type ValueStruct struct {
- Meta byte
- UserMeta byte
- ExpiresAt uint64
- Value []byte
-
- Version uint64 // This field is not serialized. Only for internal usage.
-}
-
-func sizeVarint(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-
-// EncodedSize is the size of the ValueStruct when encoded
-func (v *ValueStruct) EncodedSize() uint32 {
- sz := len(v.Value) + 2 // meta, usermeta.
- if v.ExpiresAt == 0 {
- return uint32(sz + 1)
- }
-
- enc := sizeVarint(v.ExpiresAt)
- return uint32(sz + enc)
-}
-
-// Decode uses the length of the slice to infer the length of the Value field.
-func (v *ValueStruct) Decode(b []byte) {
- v.Meta = b[0]
- v.UserMeta = b[1]
- var sz int
- v.ExpiresAt, sz = binary.Uvarint(b[2:])
- v.Value = b[2+sz:]
-}
-
-// Encode expects a slice of length at least v.EncodedSize().
-func (v *ValueStruct) Encode(b []byte) {
- b[0] = v.Meta
- b[1] = v.UserMeta
- sz := binary.PutUvarint(b[2:], v.ExpiresAt)
- copy(b[2+sz:], v.Value)
-}
-
-// EncodeTo should be kept in sync with the Encode function above. The reason
-// this function exists is to avoid creating byte arrays per key-value pair in
-// table/builder.go.
-func (v *ValueStruct) EncodeTo(buf *bytes.Buffer) {
- buf.WriteByte(v.Meta)
- buf.WriteByte(v.UserMeta)
- var enc [binary.MaxVarintLen64]byte
- sz := binary.PutUvarint(enc[:], v.ExpiresAt)
- buf.Write(enc[:sz])
- buf.Write(v.Value)
-}
-
-// Iterator is an interface for a basic iterator.
-type Iterator interface {
- Next()
- Rewind()
- Seek(key []byte)
- Key() []byte
- Value() ValueStruct
- Valid() bool
-
- // All iterators should be closed so that file garbage collection works.
- Close() error
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/metrics.go b/vendor/github.com/dgraph-io/badger/v2/y/metrics.go
deleted file mode 100644
index 742e1aea..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/metrics.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "expvar"
-
-var (
- // LSMSize has size of the LSM in bytes
- LSMSize *expvar.Map
- // VlogSize has size of the value log in bytes
- VlogSize *expvar.Map
- // PendingWrites tracks the number of pending writes.
- PendingWrites *expvar.Map
-
- // These are cumulative
-
- // NumReads has cumulative number of reads
- NumReads *expvar.Int
- // NumWrites has cumulative number of writes
- NumWrites *expvar.Int
- // NumBytesRead has cumulative number of bytes read
- NumBytesRead *expvar.Int
- // NumBytesWritten has cumulative number of bytes written
- NumBytesWritten *expvar.Int
- // NumLSMGets is number of LMS gets
- NumLSMGets *expvar.Map
- // NumLSMBloomHits is number of LMS bloom hits
- NumLSMBloomHits *expvar.Map
- // NumGets is number of gets
- NumGets *expvar.Int
- // NumPuts is number of puts
- NumPuts *expvar.Int
- // NumBlockedPuts is number of blocked puts
- NumBlockedPuts *expvar.Int
- // NumMemtableGets is number of memtable gets
- NumMemtableGets *expvar.Int
-)
-
-// These variables are global and have cumulative values for all kv stores.
-func init() {
- NumReads = expvar.NewInt("badger_v2_disk_reads_total")
- NumWrites = expvar.NewInt("badger_v2_disk_writes_total")
- NumBytesRead = expvar.NewInt("badger_v2_read_bytes")
- NumBytesWritten = expvar.NewInt("badger_v2_written_bytes")
- NumLSMGets = expvar.NewMap("badger_v2_lsm_level_gets_total")
- NumLSMBloomHits = expvar.NewMap("badger_v2_lsm_bloom_hits_total")
- NumGets = expvar.NewInt("badger_v2_gets_total")
- NumPuts = expvar.NewInt("badger_v2_puts_total")
- NumBlockedPuts = expvar.NewInt("badger_v2_blocked_puts_total")
- NumMemtableGets = expvar.NewInt("badger_v2_memtable_gets_total")
- LSMSize = expvar.NewMap("badger_v2_lsm_size_bytes")
- VlogSize = expvar.NewMap("badger_v2_vlog_size_bytes")
- PendingWrites = expvar.NewMap("badger_v2_pending_writes_total")
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/mmap.go b/vendor/github.com/dgraph-io/badger/v2/y/mmap.go
deleted file mode 100644
index 4a477af3..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/mmap.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "os"
-)
-
-// Mmap uses the mmap system call to memory-map a file. If writable is true,
-// memory protection of the pages is set so that they may be written to as well.
-func Mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
- return mmap(fd, writable, size)
-}
-
-// Munmap unmaps a previously mapped slice.
-func Munmap(b []byte) error {
- return munmap(b)
-}
-
-// Madvise uses the madvise system call to give advise about the use of memory
-// when using a slice that is memory-mapped to a file. Set the readahead flag to
-// false if page references are expected in random order.
-func Madvise(b []byte, readahead bool) error {
- return madvise(b, readahead)
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/mmap_darwin.go b/vendor/github.com/dgraph-io/badger/v2/y/mmap_darwin.go
deleted file mode 100644
index 10b756ba..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/mmap_darwin.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "os"
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// Mmap uses the mmap system call to memory-map a file. If writable is true,
-// memory protection of the pages is set so that they may be written to as well.
-func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
- mtype := unix.PROT_READ
- if writable {
- mtype |= unix.PROT_WRITE
- }
- return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
-}
-
-// Munmap unmaps a previously mapped slice.
-func munmap(b []byte) error {
- return unix.Munmap(b)
-}
-
-// This is required because the unix package does not support the madvise system call on OS X.
-func madvise(b []byte, readahead bool) error {
- advice := unix.MADV_NORMAL
- if !readahead {
- advice = unix.MADV_RANDOM
- }
-
- _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])),
- uintptr(len(b)), uintptr(advice))
- if e1 != 0 {
- return e1
- }
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/mmap_plan9.go b/vendor/github.com/dgraph-io/badger/v2/y/mmap_plan9.go
deleted file mode 100644
index 21db76bf..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/mmap_plan9.go
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "os"
- "syscall"
-)
-
-// Mmap uses the mmap system call to memory-map a file. If writable is true,
-// memory protection of the pages is set so that they may be written to as well.
-func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
- return nil, syscall.EPLAN9
-}
-
-// Munmap unmaps a previously mapped slice.
-func munmap(b []byte) error {
- return syscall.EPLAN9
-}
-
-// Madvise uses the madvise system call to give advise about the use of memory
-// when using a slice that is memory-mapped to a file. Set the readahead flag to
-// false if page references are expected in random order.
-func madvise(b []byte, readahead bool) error {
- return syscall.EPLAN9
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/mmap_unix.go b/vendor/github.com/dgraph-io/badger/v2/y/mmap_unix.go
deleted file mode 100644
index 003f5972..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/mmap_unix.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// +build !windows,!darwin,!plan9
-
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-// Mmap uses the mmap system call to memory-map a file. If writable is true,
-// memory protection of the pages is set so that they may be written to as well.
-func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
- mtype := unix.PROT_READ
- if writable {
- mtype |= unix.PROT_WRITE
- }
- return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
-}
-
-// Munmap unmaps a previously mapped slice.
-func munmap(b []byte) error {
- return unix.Munmap(b)
-}
-
-// Madvise uses the madvise system call to give advise about the use of memory
-// when using a slice that is memory-mapped to a file. Set the readahead flag to
-// false if page references are expected in random order.
-func madvise(b []byte, readahead bool) error {
- flags := unix.MADV_NORMAL
- if !readahead {
- flags = unix.MADV_RANDOM
- }
- return unix.Madvise(b, flags)
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/mmap_windows.go b/vendor/github.com/dgraph-io/badger/v2/y/mmap_windows.go
deleted file mode 100644
index b2419af9..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/mmap_windows.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// +build windows
-
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "fmt"
- "os"
- "syscall"
- "unsafe"
-)
-
-func mmap(fd *os.File, write bool, size int64) ([]byte, error) {
- protect := syscall.PAGE_READONLY
- access := syscall.FILE_MAP_READ
-
- if write {
- protect = syscall.PAGE_READWRITE
- access = syscall.FILE_MAP_WRITE
- }
- fi, err := fd.Stat()
- if err != nil {
- return nil, err
- }
-
- // In windows, we cannot mmap a file more than it's actual size.
- // So truncate the file to the size of the mmap.
- if fi.Size() < size {
- if err := fd.Truncate(size); err != nil {
- return nil, fmt.Errorf("truncate: %s", err)
- }
- }
-
- // Open a file mapping handle.
- sizelo := uint32(size >> 32)
- sizehi := uint32(size) & 0xffffffff
-
- handler, err := syscall.CreateFileMapping(syscall.Handle(fd.Fd()), nil,
- uint32(protect), sizelo, sizehi, nil)
- if err != nil {
- return nil, os.NewSyscallError("CreateFileMapping", err)
- }
-
- // Create the memory map.
- addr, err := syscall.MapViewOfFile(handler, uint32(access), 0, 0, uintptr(size))
- if addr == 0 {
- return nil, os.NewSyscallError("MapViewOfFile", err)
- }
-
- // Close mapping handle.
- if err := syscall.CloseHandle(syscall.Handle(handler)); err != nil {
- return nil, os.NewSyscallError("CloseHandle", err)
- }
-
- // Slice memory layout
- // Copied this snippet from golang/sys package
- var sl = struct {
- addr uintptr
- len int
- cap int
- }{addr, int(size), int(size)}
-
- // Use unsafe to turn sl into a []byte.
- data := *(*[]byte)(unsafe.Pointer(&sl))
-
- return data, nil
-}
-
-func munmap(b []byte) error {
- return syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&b[0])))
-}
-
-func madvise(b []byte, readahead bool) error {
- // Do Nothing. We don’t care about this setting on Windows
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/watermark.go b/vendor/github.com/dgraph-io/badger/v2/y/watermark.go
deleted file mode 100644
index 1462cb73..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/watermark.go
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Copyright 2016-2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "container/heap"
- "context"
- "sync/atomic"
-)
-
-type uint64Heap []uint64
-
-func (u uint64Heap) Len() int { return len(u) }
-func (u uint64Heap) Less(i, j int) bool { return u[i] < u[j] }
-func (u uint64Heap) Swap(i, j int) { u[i], u[j] = u[j], u[i] }
-func (u *uint64Heap) Push(x interface{}) { *u = append(*u, x.(uint64)) }
-func (u *uint64Heap) Pop() interface{} {
- old := *u
- n := len(old)
- x := old[n-1]
- *u = old[0 : n-1]
- return x
-}
-
-// mark contains one of more indices, along with a done boolean to indicate the
-// status of the index: begin or done. It also contains waiters, who could be
-// waiting for the watermark to reach >= a certain index.
-type mark struct {
- // Either this is an (index, waiter) pair or (index, done) or (indices, done).
- index uint64
- waiter chan struct{}
- indices []uint64
- done bool // Set to true if the index is done.
-}
-
-// WaterMark is used to keep track of the minimum un-finished index. Typically, an index k becomes
-// finished or "done" according to a WaterMark once Done(k) has been called
-// 1. as many times as Begin(k) has, AND
-// 2. a positive number of times.
-//
-// An index may also become "done" by calling SetDoneUntil at a time such that it is not
-// inter-mingled with Begin/Done calls.
-//
-// Since doneUntil and lastIndex addresses are passed to sync/atomic packages, we ensure that they
-// are 64-bit aligned by putting them at the beginning of the structure.
-type WaterMark struct {
- doneUntil uint64
- lastIndex uint64
- Name string
- markCh chan mark
-}
-
-// Init initializes a WaterMark struct. MUST be called before using it.
-func (w *WaterMark) Init(closer *Closer) {
- w.markCh = make(chan mark, 100)
- go w.process(closer)
-}
-
-// Begin sets the last index to the given value.
-func (w *WaterMark) Begin(index uint64) {
- atomic.StoreUint64(&w.lastIndex, index)
- w.markCh <- mark{index: index, done: false}
-}
-
-// BeginMany works like Begin but accepts multiple indices.
-func (w *WaterMark) BeginMany(indices []uint64) {
- atomic.StoreUint64(&w.lastIndex, indices[len(indices)-1])
- w.markCh <- mark{index: 0, indices: indices, done: false}
-}
-
-// Done sets a single index as done.
-func (w *WaterMark) Done(index uint64) {
- w.markCh <- mark{index: index, done: true}
-}
-
-// DoneMany works like Done but accepts multiple indices.
-func (w *WaterMark) DoneMany(indices []uint64) {
- w.markCh <- mark{index: 0, indices: indices, done: true}
-}
-
-// DoneUntil returns the maximum index that has the property that all indices
-// less than or equal to it are done.
-func (w *WaterMark) DoneUntil() uint64 {
- return atomic.LoadUint64(&w.doneUntil)
-}
-
-// SetDoneUntil sets the maximum index that has the property that all indices
-// less than or equal to it are done.
-func (w *WaterMark) SetDoneUntil(val uint64) {
- atomic.StoreUint64(&w.doneUntil, val)
-}
-
-// LastIndex returns the last index for which Begin has been called.
-func (w *WaterMark) LastIndex() uint64 {
- return atomic.LoadUint64(&w.lastIndex)
-}
-
-// WaitForMark waits until the given index is marked as done.
-func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error {
- if w.DoneUntil() >= index {
- return nil
- }
- waitCh := make(chan struct{})
- w.markCh <- mark{index: index, waiter: waitCh}
-
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-waitCh:
- return nil
- }
-}
-
-// process is used to process the Mark channel. This is not thread-safe,
-// so only run one goroutine for process. One is sufficient, because
-// all goroutine ops use purely memory and cpu.
-// Each index has to emit atleast one begin watermark in serial order otherwise waiters
-// can get blocked idefinitely. Example: We had an watermark at 100 and a waiter at 101,
-// if no watermark is emitted at index 101 then waiter would get stuck indefinitely as it
-// can't decide whether the task at 101 has decided not to emit watermark or it didn't get
-// scheduled yet.
-func (w *WaterMark) process(closer *Closer) {
- defer closer.Done()
-
- var indices uint64Heap
- // pending maps raft proposal index to the number of pending mutations for this proposal.
- pending := make(map[uint64]int)
- waiters := make(map[uint64][]chan struct{})
-
- heap.Init(&indices)
-
- processOne := func(index uint64, done bool) {
- // If not already done, then set. Otherwise, don't undo a done entry.
- prev, present := pending[index]
- if !present {
- heap.Push(&indices, index)
- }
-
- delta := 1
- if done {
- delta = -1
- }
- pending[index] = prev + delta
-
- // Update mark by going through all indices in order; and checking if they have
- // been done. Stop at the first index, which isn't done.
- doneUntil := w.DoneUntil()
- if doneUntil > index {
- AssertTruef(false, "Name: %s doneUntil: %d. Index: %d", w.Name, doneUntil, index)
- }
-
- until := doneUntil
- loops := 0
-
- for len(indices) > 0 {
- min := indices[0]
- if done := pending[min]; done > 0 {
- break // len(indices) will be > 0.
- }
- // Even if done is called multiple times causing it to become
- // negative, we should still pop the index.
- heap.Pop(&indices)
- delete(pending, min)
- until = min
- loops++
- }
-
- if until != doneUntil {
- AssertTrue(atomic.CompareAndSwapUint64(&w.doneUntil, doneUntil, until))
- }
-
- notifyAndRemove := func(idx uint64, toNotify []chan struct{}) {
- for _, ch := range toNotify {
- close(ch)
- }
- delete(waiters, idx) // Release the memory back.
- }
-
- if until-doneUntil <= uint64(len(waiters)) {
- // Issue #908 showed that if doneUntil is close to 2^60, while until is zero, this loop
- // can hog up CPU just iterating over integers creating a busy-wait loop. So, only do
- // this path if until - doneUntil is less than the number of waiters.
- for idx := doneUntil + 1; idx <= until; idx++ {
- if toNotify, ok := waiters[idx]; ok {
- notifyAndRemove(idx, toNotify)
- }
- }
- } else {
- for idx, toNotify := range waiters {
- if idx <= until {
- notifyAndRemove(idx, toNotify)
- }
- }
- } // end of notifying waiters.
- }
-
- for {
- select {
- case <-closer.HasBeenClosed():
- return
- case mark := <-w.markCh:
- if mark.waiter != nil {
- doneUntil := atomic.LoadUint64(&w.doneUntil)
- if doneUntil >= mark.index {
- close(mark.waiter)
- } else {
- ws, ok := waiters[mark.index]
- if !ok {
- waiters[mark.index] = []chan struct{}{mark.waiter}
- } else {
- waiters[mark.index] = append(ws, mark.waiter)
- }
- }
- } else {
- if mark.index > 0 {
- processOne(mark.index, mark.done)
- }
- for _, index := range mark.indices {
- processOne(index, mark.done)
- }
- }
- }
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/y.go b/vendor/github.com/dgraph-io/badger/v2/y/y.go
deleted file mode 100644
index 554a413e..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/y.go
+++ /dev/null
@@ -1,516 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "hash/crc32"
- "io"
- "math"
- "os"
- "reflect"
- "sync"
- "time"
- "unsafe"
-
- "github.com/pkg/errors"
-)
-
-var (
- // ErrEOF indicates an end of file when trying to read from a memory mapped file
- // and encountering the end of slice.
- ErrEOF = errors.New("End of mapped region")
-)
-
-const (
- // Sync indicates that O_DSYNC should be set on the underlying file,
- // ensuring that data writes do not return until the data is flushed
- // to disk.
- Sync = 1 << iota
- // ReadOnly opens the underlying file on a read-only basis.
- ReadOnly
-)
-
-var (
- // This is O_DSYNC (datasync) on platforms that support it -- see file_unix.go
- datasyncFileFlag = 0x0
-
- // CastagnoliCrcTable is a CRC32 polynomial table
- CastagnoliCrcTable = crc32.MakeTable(crc32.Castagnoli)
-
- // Dummy channel for nil closers.
- dummyCloserChan = make(chan struct{})
-)
-
-// OpenExistingFile opens an existing file, errors if it doesn't exist.
-func OpenExistingFile(filename string, flags uint32) (*os.File, error) {
- openFlags := os.O_RDWR
- if flags&ReadOnly != 0 {
- openFlags = os.O_RDONLY
- }
-
- if flags&Sync != 0 {
- openFlags |= datasyncFileFlag
- }
- return os.OpenFile(filename, openFlags, 0)
-}
-
-// CreateSyncedFile creates a new file (using O_EXCL), errors if it already existed.
-func CreateSyncedFile(filename string, sync bool) (*os.File, error) {
- flags := os.O_RDWR | os.O_CREATE | os.O_EXCL
- if sync {
- flags |= datasyncFileFlag
- }
- return os.OpenFile(filename, flags, 0600)
-}
-
-// OpenSyncedFile creates the file if one doesn't exist.
-func OpenSyncedFile(filename string, sync bool) (*os.File, error) {
- flags := os.O_RDWR | os.O_CREATE
- if sync {
- flags |= datasyncFileFlag
- }
- return os.OpenFile(filename, flags, 0600)
-}
-
-// OpenTruncFile opens the file with O_RDWR | O_CREATE | O_TRUNC
-func OpenTruncFile(filename string, sync bool) (*os.File, error) {
- flags := os.O_RDWR | os.O_CREATE | os.O_TRUNC
- if sync {
- flags |= datasyncFileFlag
- }
- return os.OpenFile(filename, flags, 0600)
-}
-
-// SafeCopy does append(a[:0], src...).
-func SafeCopy(a, src []byte) []byte {
- return append(a[:0], src...)
-}
-
-// Copy copies a byte slice and returns the copied slice.
-func Copy(a []byte) []byte {
- b := make([]byte, len(a))
- copy(b, a)
- return b
-}
-
-// KeyWithTs generates a new key by appending ts to key.
-func KeyWithTs(key []byte, ts uint64) []byte {
- out := make([]byte, len(key)+8)
- copy(out, key)
- binary.BigEndian.PutUint64(out[len(key):], math.MaxUint64-ts)
- return out
-}
-
-// ParseTs parses the timestamp from the key bytes.
-func ParseTs(key []byte) uint64 {
- if len(key) <= 8 {
- return 0
- }
- return math.MaxUint64 - binary.BigEndian.Uint64(key[len(key)-8:])
-}
-
-// CompareKeys checks the key without timestamp and checks the timestamp if keyNoTs
-// is same.
-// a would be sorted higher than aa if we use bytes.compare
-// All keys should have timestamp.
-func CompareKeys(key1, key2 []byte) int {
- if cmp := bytes.Compare(key1[:len(key1)-8], key2[:len(key2)-8]); cmp != 0 {
- return cmp
- }
- return bytes.Compare(key1[len(key1)-8:], key2[len(key2)-8:])
-}
-
-// ParseKey parses the actual key from the key bytes.
-func ParseKey(key []byte) []byte {
- if key == nil {
- return nil
- }
-
- return key[:len(key)-8]
-}
-
-// SameKey checks for key equality ignoring the version timestamp suffix.
-func SameKey(src, dst []byte) bool {
- if len(src) != len(dst) {
- return false
- }
- return bytes.Equal(ParseKey(src), ParseKey(dst))
-}
-
-// Slice holds a reusable buf, will reallocate if you request a larger size than ever before.
-// One problem is with n distinct sizes in random order it'll reallocate log(n) times.
-type Slice struct {
- buf []byte
-}
-
-// Resize reuses the Slice's buffer (or makes a new one) and returns a slice in that buffer of
-// length sz.
-func (s *Slice) Resize(sz int) []byte {
- if cap(s.buf) < sz {
- s.buf = make([]byte, sz)
- }
- return s.buf[0:sz]
-}
-
-// FixedDuration returns a string representation of the given duration with the
-// hours, minutes, and seconds.
-func FixedDuration(d time.Duration) string {
- str := fmt.Sprintf("%02ds", int(d.Seconds())%60)
- if d >= time.Minute {
- str = fmt.Sprintf("%02dm", int(d.Minutes())%60) + str
- }
- if d >= time.Hour {
- str = fmt.Sprintf("%02dh", int(d.Hours())) + str
- }
- return str
-}
-
-// Closer holds the two things we need to close a goroutine and wait for it to finish: a chan
-// to tell the goroutine to shut down, and a WaitGroup with which to wait for it to finish shutting
-// down.
-type Closer struct {
- closed chan struct{}
- waiting sync.WaitGroup
- closeOnce sync.Once
-}
-
-// NewCloser constructs a new Closer, with an initial count on the WaitGroup.
-func NewCloser(initial int) *Closer {
- ret := &Closer{closed: make(chan struct{})}
- ret.waiting.Add(initial)
- return ret
-}
-
-// AddRunning Add()'s delta to the WaitGroup.
-func (lc *Closer) AddRunning(delta int) {
- lc.waiting.Add(delta)
-}
-
-// Signal signals the HasBeenClosed signal.
-func (lc *Closer) Signal() {
- // Todo(ibrahim): Change Signal to return error on next badger breaking change.
- lc.closeOnce.Do(func() {
- close(lc.closed)
- })
-}
-
-// HasBeenClosed gets signaled when Signal() is called.
-func (lc *Closer) HasBeenClosed() <-chan struct{} {
- if lc == nil {
- return dummyCloserChan
- }
- return lc.closed
-}
-
-// Done calls Done() on the WaitGroup.
-func (lc *Closer) Done() {
- if lc == nil {
- return
- }
- lc.waiting.Done()
-}
-
-// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done
-// calls to balance out.)
-func (lc *Closer) Wait() {
- lc.waiting.Wait()
-}
-
-// SignalAndWait calls Signal(), then Wait().
-func (lc *Closer) SignalAndWait() {
- lc.Signal()
- lc.Wait()
-}
-
-// Throttle allows a limited number of workers to run at a time. It also
-// provides a mechanism to check for errors encountered by workers and wait for
-// them to finish.
-type Throttle struct {
- once sync.Once
- wg sync.WaitGroup
- ch chan struct{}
- errCh chan error
- finishErr error
-}
-
-// NewThrottle creates a new throttle with a max number of workers.
-func NewThrottle(max int) *Throttle {
- return &Throttle{
- ch: make(chan struct{}, max),
- errCh: make(chan error, max),
- }
-}
-
-// Do should be called by workers before they start working. It blocks if there
-// are already maximum number of workers working. If it detects an error from
-// previously Done workers, it would return it.
-func (t *Throttle) Do() error {
- for {
- select {
- case t.ch <- struct{}{}:
- t.wg.Add(1)
- return nil
- case err := <-t.errCh:
- if err != nil {
- return err
- }
- }
- }
-}
-
-// Done should be called by workers when they finish working. They can also
-// pass the error status of work done.
-func (t *Throttle) Done(err error) {
- if err != nil {
- t.errCh <- err
- }
- select {
- case <-t.ch:
- default:
- panic("Throttle Do Done mismatch")
- }
- t.wg.Done()
-}
-
-// Finish waits until all workers have finished working. It would return any error passed by Done.
-// If Finish is called multiple time, it will wait for workers to finish only once(first time).
-// From next calls, it will return same error as found on first call.
-func (t *Throttle) Finish() error {
- t.once.Do(func() {
- t.wg.Wait()
- close(t.ch)
- close(t.errCh)
- for err := range t.errCh {
- if err != nil {
- t.finishErr = err
- return
- }
- }
- })
-
- return t.finishErr
-}
-
-// U32ToBytes converts the given Uint32 to bytes
-func U32ToBytes(v uint32) []byte {
- var uBuf [4]byte
- binary.BigEndian.PutUint32(uBuf[:], v)
- return uBuf[:]
-}
-
-// BytesToU32 converts the given byte slice to uint32
-func BytesToU32(b []byte) uint32 {
- return binary.BigEndian.Uint32(b)
-}
-
-// U32SliceToBytes converts the given Uint32 slice to byte slice
-func U32SliceToBytes(u32s []uint32) []byte {
- if len(u32s) == 0 {
- return nil
- }
- var b []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- hdr.Len = len(u32s) * 4
- hdr.Cap = hdr.Len
- hdr.Data = uintptr(unsafe.Pointer(&u32s[0]))
- return b
-}
-
-// BytesToU32Slice converts the given byte slice to uint32 slice
-func BytesToU32Slice(b []byte) []uint32 {
- if len(b) == 0 {
- return nil
- }
- var u32s []uint32
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&u32s))
- hdr.Len = len(b) / 4
- hdr.Cap = hdr.Len
- hdr.Data = uintptr(unsafe.Pointer(&b[0]))
- return u32s
-}
-
-// page struct contains one underlying buffer.
-type page struct {
- buf []byte
-}
-
-// PageBuffer consists of many pages. A page is a wrapper over []byte. PageBuffer can act as a
-// replacement of bytes.Buffer. Instead of having single underlying buffer, it has multiple
-// underlying buffers. Hence it avoids any copy during relocation(as happens in bytes.Buffer).
-// PageBuffer allocates memory in pages. Once a page is full, it will allocate page with double the
-// size of previous page. Its function are not thread safe.
-type PageBuffer struct {
- pages []*page
-
- length int // Length of PageBuffer.
- nextPageSize int // Size of next page to be allocated.
-}
-
-// NewPageBuffer returns a new PageBuffer with first page having size pageSize.
-func NewPageBuffer(pageSize int) *PageBuffer {
- b := &PageBuffer{}
- b.pages = append(b.pages, &page{buf: make([]byte, 0, pageSize)})
- b.nextPageSize = pageSize * 2
- return b
-}
-
-// Write writes data to PageBuffer b. It returns number of bytes written and any error encountered.
-func (b *PageBuffer) Write(data []byte) (int, error) {
- dataLen := len(data)
- for {
- cp := b.pages[len(b.pages)-1] // Current page.
-
- n := copy(cp.buf[len(cp.buf):cap(cp.buf)], data)
- cp.buf = cp.buf[:len(cp.buf)+n]
- b.length += n
-
- if len(data) == n {
- break
- }
- data = data[n:]
-
- b.pages = append(b.pages, &page{buf: make([]byte, 0, b.nextPageSize)})
- b.nextPageSize *= 2
- }
-
- return dataLen, nil
-}
-
-// WriteByte writes data byte to PageBuffer and returns any encountered error.
-func (b *PageBuffer) WriteByte(data byte) error {
- _, err := b.Write([]byte{data})
- return err
-}
-
-// Len returns length of PageBuffer.
-func (b *PageBuffer) Len() int {
- return b.length
-}
-
-// pageForOffset returns pageIdx and startIdx for the offset.
-func (b *PageBuffer) pageForOffset(offset int) (int, int) {
- AssertTrue(offset < b.length)
-
- var pageIdx, startIdx, sizeNow int
- for i := 0; i < len(b.pages); i++ {
- cp := b.pages[i]
-
- if sizeNow+len(cp.buf)-1 < offset {
- sizeNow += len(cp.buf)
- } else {
- pageIdx = i
- startIdx = offset - sizeNow
- break
- }
- }
-
- return pageIdx, startIdx
-}
-
-// Truncate truncates PageBuffer to length n.
-func (b *PageBuffer) Truncate(n int) {
- pageIdx, startIdx := b.pageForOffset(n)
- // For simplicity of the code reject extra pages. These pages can be kept.
- b.pages = b.pages[:pageIdx+1]
- cp := b.pages[len(b.pages)-1]
- cp.buf = cp.buf[:startIdx]
- b.length = n
-}
-
-// Bytes returns whole Buffer data as single []byte.
-func (b *PageBuffer) Bytes() []byte {
- buf := make([]byte, b.length)
- written := 0
- for i := 0; i < len(b.pages); i++ {
- written += copy(buf[written:], b.pages[i].buf)
- }
-
- return buf
-}
-
-// WriteTo writes whole buffer to w. It returns number of bytes written and any error encountered.
-func (b *PageBuffer) WriteTo(w io.Writer) (int64, error) {
- written := int64(0)
- for i := 0; i < len(b.pages); i++ {
- n, err := w.Write(b.pages[i].buf)
- written += int64(n)
- if err != nil {
- return written, err
- }
- }
-
- return written, nil
-}
-
-// NewReaderAt returns a reader which starts reading from offset in page buffer.
-func (b *PageBuffer) NewReaderAt(offset int) *PageBufferReader {
- pageIdx, startIdx := b.pageForOffset(offset)
-
- return &PageBufferReader{
- buf: b,
- pageIdx: pageIdx,
- startIdx: startIdx,
- }
-}
-
-// PageBufferReader is a reader for PageBuffer.
-type PageBufferReader struct {
- buf *PageBuffer // Underlying page buffer.
- pageIdx int // Idx of page from where it will start reading.
- startIdx int // Idx inside page - buf.pages[pageIdx] from where it will start reading.
-}
-
-// Read reads upto len(p) bytes. It returns number of bytes read and any error encountered.
-func (r *PageBufferReader) Read(p []byte) (int, error) {
- // Check if there is enough to Read.
- pc := len(r.buf.pages)
-
- read := 0
- for r.pageIdx < pc && read < len(p) {
- cp := r.buf.pages[r.pageIdx] // Current Page.
- endIdx := len(cp.buf) // Last Idx up to which we can read from this page.
-
- n := copy(p[read:], cp.buf[r.startIdx:endIdx])
- read += n
- r.startIdx += n
-
- // Instead of len(cp.buf), we comparing with cap(cp.buf). This ensures that we move to next
- // page only when we have read all data. Reading from last page is an edge case. We don't
- // want to move to next page until last page is full to its capacity.
- if r.startIdx >= cap(cp.buf) {
- // We should move to next page.
- r.pageIdx++
- r.startIdx = 0
- continue
- }
-
- // When last page in not full to its capacity and we have read all data up to its
- // length, just break out of the loop.
- if r.pageIdx == pc-1 {
- break
- }
- }
-
- if read == 0 {
- return read, io.EOF
- }
-
- return read, nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/v2/y/zstd.go b/vendor/github.com/dgraph-io/badger/v2/y/zstd.go
deleted file mode 100644
index 57018680..00000000
--- a/vendor/github.com/dgraph-io/badger/v2/y/zstd.go
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "sync"
-
- "github.com/klauspost/compress/zstd"
-)
-
-var (
- decoder *zstd.Decoder
- encoder *zstd.Encoder
-
- encOnce, decOnce sync.Once
-)
-
-// ZSTDDecompress decompresses a block using ZSTD algorithm.
-func ZSTDDecompress(dst, src []byte) ([]byte, error) {
- decOnce.Do(func() {
- var err error
- decoder, err = zstd.NewReader(nil)
- Check(err)
- })
- return decoder.DecodeAll(src, dst[:0])
-}
-
-// ZSTDCompress compresses a block using ZSTD algorithm.
-func ZSTDCompress(dst, src []byte, compressionLevel int) ([]byte, error) {
- encOnce.Do(func() {
- var err error
- level := zstd.EncoderLevelFromZstd(compressionLevel)
- encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(level))
- Check(err)
- })
- return encoder.EncodeAll(src, dst[:0]), nil
-}
-
-// ZSTDCompressBound returns the worst case size needed for a destination buffer.
-// Klauspost ZSTD library does not provide any API for Compression Bound. This
-// calculation is based on the DataDog ZSTD library.
-// See https://pkg.go.dev/github.com/DataDog/zstd#CompressBound
-func ZSTDCompressBound(srcSize int) int {
- lowLimit := 128 << 10 // 128 kB
- var margin int
- if srcSize < lowLimit {
- margin = (lowLimit - srcSize) >> 11
- }
- return srcSize + (srcSize >> 8) + margin
-}
diff --git a/vendor/github.com/dgraph-io/badger/value.go b/vendor/github.com/dgraph-io/badger/value.go
deleted file mode 100644
index 53d60e0a..00000000
--- a/vendor/github.com/dgraph-io/badger/value.go
+++ /dev/null
@@ -1,1661 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bufio"
- "bytes"
- "encoding/binary"
- "encoding/json"
- "fmt"
- "hash/crc32"
- "io"
- "io/ioutil"
- "math"
- "math/rand"
- "os"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/options"
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
- "golang.org/x/net/trace"
-)
-
-// Values have their first byte being byteData or byteDelete. This helps us distinguish between
-// a key that has never been seen and a key that has been explicitly deleted.
-const (
- bitDelete byte = 1 << 0 // Set if the key has been deleted.
- bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key.
- bitDiscardEarlierVersions byte = 1 << 2 // Set if earlier versions can be discarded.
- // Set if item shouldn't be discarded via compactions (used by merge operator)
- bitMergeEntry byte = 1 << 3
- // The MSB 2 bits are for transactions.
- bitTxn byte = 1 << 6 // Set if the entry is part of a txn.
- bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log.
-
- mi int64 = 1 << 20
-
- // The number of updates after which discard map should be flushed into badger.
- discardStatsFlushThreshold = 100
-)
-
-type logFile struct {
- path string
- // This is a lock on the log file. It guards the fd’s value, the file’s
- // existence and the file’s memory map.
- //
- // Use shared ownership when reading/writing the file or memory map, use
- // exclusive ownership to open/close the descriptor, unmap or remove the file.
- lock sync.RWMutex
- fd *os.File
- fid uint32
- fmap []byte
- size uint32
- loadingMode options.FileLoadingMode
-}
-
-func (lf *logFile) mmap(size int64) (err error) {
- if lf.loadingMode != options.MemoryMap {
- // Nothing to do
- return nil
- }
- lf.fmap, err = y.Mmap(lf.fd, false, size)
- if err == nil {
- err = y.Madvise(lf.fmap, false) // Disable readahead
- }
- return err
-}
-
-func (lf *logFile) munmap() (err error) {
- if lf.loadingMode != options.MemoryMap || len(lf.fmap) == 0 {
- // Nothing to do
- return nil
- }
-
- if err := y.Munmap(lf.fmap); err != nil {
- return errors.Wrapf(err, "Unable to munmap value log: %q", lf.path)
- }
- // This is important. We should set the map to nil because ummap
- // system call doesn't change the length or capacity of the fmap slice.
- lf.fmap = nil
- return nil
-}
-
-// Acquire lock on mmap/file if you are calling this
-func (lf *logFile) read(p valuePointer, s *y.Slice) (buf []byte, err error) {
- var nbr int64
- offset := p.Offset
- if lf.loadingMode == options.FileIO {
- buf = s.Resize(int(p.Len))
- var n int
- n, err = lf.fd.ReadAt(buf, int64(offset))
- nbr = int64(n)
- } else {
- // Do not convert size to uint32, because the lf.fmap can be of size
- // 4GB, which overflows the uint32 during conversion to make the size 0,
- // causing the read to fail with ErrEOF. See issue #585.
- size := int64(len(lf.fmap))
- valsz := p.Len
- lfsz := atomic.LoadUint32(&lf.size)
- if int64(offset) >= size || int64(offset+valsz) > size ||
- // Ensure that the read is within the file's actual size. It might be possible that
- // the offset+valsz length is beyond the file's actual size. This could happen when
- // dropAll and iterations are running simultaneously.
- int64(offset+valsz) > int64(lfsz) {
- err = y.ErrEOF
- } else {
- buf = lf.fmap[offset : offset+valsz]
- nbr = int64(valsz)
- }
- }
- y.NumReads.Add(1)
- y.NumBytesRead.Add(nbr)
- return buf, err
-}
-
-func (lf *logFile) doneWriting(offset uint32) error {
- // Sync before acquiring lock. (We call this from write() and thus know we have shared access
- // to the fd.)
- if err := y.FileSync(lf.fd); err != nil {
- return errors.Wrapf(err, "Unable to sync value log: %q", lf.path)
- }
-
- // Before we were acquiring a lock here on lf.lock, because we were invalidating the file
- // descriptor due to reopening it as read-only. Now, we don't invalidate the fd, but unmap it,
- // truncate it and remap it. That creates a window where we have segfaults because the mmap is
- // no longer valid, while someone might be reading it. Therefore, we need a lock here again.
- lf.lock.Lock()
- defer lf.lock.Unlock()
-
- // Unmap file before we truncate it. Windows cannot truncate a file that is mmapped.
- if err := lf.munmap(); err != nil {
- return errors.Wrapf(err, "failed to munmap vlog file %s", lf.fd.Name())
- }
-
- // TODO: Confirm if we need to run a file sync after truncation.
- // Truncation must run after unmapping, otherwise Windows would crap itself.
- if err := lf.fd.Truncate(int64(offset)); err != nil {
- return errors.Wrapf(err, "Unable to truncate file: %q", lf.path)
- }
-
- fstat, err := lf.fd.Stat()
- if err != nil {
- return errors.Wrapf(err, "Unable to check stat for %q", lf.path)
- }
- sz := fstat.Size()
- if sz == 0 {
- // File is empty. We don't need to mmap it. Return.
- return nil
- }
- y.AssertTrue(sz <= math.MaxUint32)
- lf.size = uint32(sz)
- if err = lf.mmap(sz); err != nil {
- _ = lf.fd.Close()
- return errors.Wrapf(err, "Unable to map file: %q", fstat.Name())
- }
- // Previously we used to close the file after it was written and reopen it in read-only mode.
- // We no longer open files in read-only mode. We keep all vlog files open in read-write mode.
- return nil
-}
-
-// You must hold lf.lock to sync()
-func (lf *logFile) sync() error {
- return y.FileSync(lf.fd)
-}
-
-var errStop = errors.New("Stop iteration")
-var errTruncate = errors.New("Do truncate")
-var errDeleteVlogFile = errors.New("Delete vlog file")
-
-type logEntry func(e Entry, vp valuePointer) error
-
-type safeRead struct {
- k []byte
- v []byte
-
- recordOffset uint32
-}
-
-func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) {
- var hbuf [headerBufSize]byte
- var err error
-
- hash := crc32.New(y.CastagnoliCrcTable)
- tee := io.TeeReader(reader, hash)
- if _, err = io.ReadFull(tee, hbuf[:]); err != nil {
- return nil, err
- }
-
- var h header
- h.Decode(hbuf[:])
- if h.klen > uint32(1<<16) { // Key length must be below uint16.
- return nil, errTruncate
- }
- kl := int(h.klen)
- if cap(r.k) < kl {
- r.k = make([]byte, 2*kl)
- }
- vl := int(h.vlen)
- if cap(r.v) < vl {
- r.v = make([]byte, 2*vl)
- }
-
- e := &Entry{}
- e.offset = r.recordOffset
- e.Key = r.k[:kl]
- e.Value = r.v[:vl]
-
- if _, err = io.ReadFull(tee, e.Key); err != nil {
- if err == io.EOF {
- err = errTruncate
- }
- return nil, err
- }
- if _, err = io.ReadFull(tee, e.Value); err != nil {
- if err == io.EOF {
- err = errTruncate
- }
- return nil, err
- }
- var crcBuf [4]byte
- if _, err = io.ReadFull(reader, crcBuf[:]); err != nil {
- if err == io.EOF {
- err = errTruncate
- }
- return nil, err
- }
- crc := binary.BigEndian.Uint32(crcBuf[:])
- if crc != hash.Sum32() {
- return nil, errTruncate
- }
- e.meta = h.meta
- e.UserMeta = h.userMeta
- e.ExpiresAt = h.expiresAt
- return e, nil
-}
-
-// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
-// Therefore, the kv pair is only valid for the duration of fn call.
-func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
- fi, err := lf.fd.Stat()
- if err != nil {
- return 0, err
- }
- if int64(offset) == fi.Size() {
- // We're at the end of the file already. No need to do anything.
- return offset, nil
- }
- if vlog.opt.ReadOnly {
- // We're not at the end of the file. We'd need to replay the entries, or
- // possibly truncate the file.
- return 0, ErrReplayNeeded
- }
- if int64(offset) > fi.Size() {
- // Return 0 which would truncate the entire file. This was the original behavior before
- // commit 7539f0a:Fix windows dataloss issue (#1134) was merged.
- return 0, nil
- }
- // We're not at the end of the file. Let's Seek to the offset and start reading.
- if _, err := lf.fd.Seek(int64(offset), io.SeekStart); err != nil {
- return 0, errFile(err, lf.path, "Unable to seek")
- }
-
- reader := bufio.NewReader(lf.fd)
- read := &safeRead{
- k: make([]byte, 10),
- v: make([]byte, 10),
- recordOffset: offset,
- }
-
- var lastCommit uint64
- var validEndOffset uint32 = offset
- for {
- e, err := read.Entry(reader)
- if err == io.EOF {
- break
- } else if err == io.ErrUnexpectedEOF || err == errTruncate {
- break
- } else if err != nil {
- return 0, err
- } else if e == nil {
- continue
- }
-
- var vp valuePointer
- vp.Len = uint32(headerBufSize + len(e.Key) + len(e.Value) + crc32.Size)
- read.recordOffset += vp.Len
-
- vp.Offset = e.offset
- vp.Fid = lf.fid
-
- if e.meta&bitTxn > 0 {
- txnTs := y.ParseTs(e.Key)
- if lastCommit == 0 {
- lastCommit = txnTs
- }
- if lastCommit != txnTs {
- break
- }
-
- } else if e.meta&bitFinTxn > 0 {
- txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
- if err != nil || lastCommit != txnTs {
- break
- }
- // Got the end of txn. Now we can store them.
- lastCommit = 0
- validEndOffset = read.recordOffset
-
- } else {
- if lastCommit != 0 {
- // This is most likely an entry which was moved as part of GC.
- // We shouldn't get this entry in the middle of a transaction.
- break
- }
- validEndOffset = read.recordOffset
- }
-
- if err := fn(*e, vp); err != nil {
- if err == errStop {
- break
- }
- return 0, errFile(err, lf.path, "Iteration function")
- }
- }
- return validEndOffset, nil
-}
-
-func (vlog *valueLog) rewrite(f *logFile, tr trace.Trace) error {
- maxFid := atomic.LoadUint32(&vlog.maxFid)
- y.AssertTruef(uint32(f.fid) < maxFid, "fid to move: %d. Current max fid: %d", f.fid, maxFid)
- tr.LazyPrintf("Rewriting fid: %d", f.fid)
-
- wb := make([]*Entry, 0, 1000)
- var size int64
-
- y.AssertTrue(vlog.db != nil)
- var count, moved int
- fe := func(e Entry) error {
- count++
- if count%100000 == 0 {
- tr.LazyPrintf("Processing entry %d", count)
- }
-
- vs, err := vlog.db.get(e.Key)
- if err != nil {
- return err
- }
- if discardEntry(e, vs, vlog.db) {
- return nil
- }
-
- // Value is still present in value log.
- if len(vs.Value) == 0 {
- return errors.Errorf("Empty value: %+v", vs)
- }
- var vp valuePointer
- vp.Decode(vs.Value)
-
- // If the entry found from the LSM Tree points to a newer vlog file, don't do anything.
- if vp.Fid > f.fid {
- return nil
- }
- // If the entry found from the LSM Tree points to an offset greater than the one
- // read from vlog, don't do anything.
- if vp.Offset > e.offset {
- return nil
- }
- // If the entry read from LSM Tree and vlog file point to the same vlog file and offset,
- // insert them back into the DB.
- // NOTE: It might be possible that the entry read from the LSM Tree points to
- // an older vlog file. See the comments in the else part.
- if vp.Fid == f.fid && vp.Offset == e.offset {
- moved++
- // This new entry only contains the key, and a pointer to the value.
- ne := new(Entry)
- ne.meta = 0 // Remove all bits. Different keyspace doesn't need these bits.
- ne.UserMeta = e.UserMeta
- ne.ExpiresAt = e.ExpiresAt
-
- // Create a new key in a separate keyspace, prefixed by moveKey. We are not
- // allowed to rewrite an older version of key in the LSM tree, because then this older
- // version would be at the top of the LSM tree. To work correctly, reads expect the
- // latest versions to be at the top, and the older versions at the bottom.
- if bytes.HasPrefix(e.Key, badgerMove) {
- ne.Key = append([]byte{}, e.Key...)
- } else {
- ne.Key = make([]byte, len(badgerMove)+len(e.Key))
- n := copy(ne.Key, badgerMove)
- copy(ne.Key[n:], e.Key)
- }
-
- ne.Value = append([]byte{}, e.Value...)
- es := int64(ne.estimateSize(vlog.opt.ValueThreshold))
- // Consider size of value as well while considering the total size
- // of the batch. There have been reports of high memory usage in
- // rewrite because we don't consider the value size. See #1292.
- es += int64(len(e.Value))
-
- // Ensure length and size of wb is within transaction limits.
- if int64(len(wb)+1) >= vlog.opt.maxBatchCount ||
- size+es >= vlog.opt.maxBatchSize {
- tr.LazyPrintf("request has %d entries, size %d", len(wb), size)
- if err := vlog.db.batchSet(wb); err != nil {
- return err
- }
- size = 0
- wb = wb[:0]
- }
- wb = append(wb, ne)
- size += es
- } else {
- // It might be possible that the entry read from LSM Tree points to an older vlog file.
- // This can happen in the following situation. Assume DB is opened with
- // numberOfVersionsToKeep=1
- //
- // Now, if we have ONLY one key in the system "FOO" which has been updated 3 times and
- // the same key has been garbage collected 3 times, we'll have 3 versions of the movekey
- // for the same key "FOO".
- // NOTE: moveKeyi is the moveKey with version i
- // Assume we have 3 move keys in L0.
- // - moveKey1 (points to vlog file 10),
- // - moveKey2 (points to vlog file 14) and
- // - moveKey3 (points to vlog file 15).
-
- // Also, assume there is another move key "moveKey1" (points to vlog file 6) (this is
- // also a move Key for key "FOO" ) on upper levels (let's say 3). The move key
- // "moveKey1" on level 0 was inserted because vlog file 6 was GCed.
- //
- // Here's what the arrangement looks like
- // L0 => (moveKey1 => vlog10), (moveKey2 => vlog14), (moveKey3 => vlog15)
- // L1 => ....
- // L2 => ....
- // L3 => (moveKey1 => vlog6)
- //
- // When L0 compaction runs, it keeps only moveKey3 because the number of versions
- // to keep is set to 1. (we've dropped moveKey1's latest version)
- //
- // The new arrangement of keys is
- // L0 => ....
- // L1 => (moveKey3 => vlog15)
- // L2 => ....
- // L3 => (moveKey1 => vlog6)
- //
- // Now if we try to GC vlog file 10, the entry read from vlog file will point to vlog10
- // but the entry read from LSM Tree will point to vlog6. The move key read from LSM tree
- // will point to vlog6 because we've asked for version 1 of the move key.
- //
- // This might seem like an issue but it's not really an issue because the user has set
- // the number of versions to keep to 1 and the latest version of moveKey points to the
- // correct vlog file and offset. The stale move key on L3 will be eventually dropped by
- // compaction because there is a newer versions in the upper levels.
- }
- return nil
- }
-
- _, err := vlog.iterate(f, 0, func(e Entry, vp valuePointer) error {
- return fe(e)
- })
- if err != nil {
- return err
- }
-
- tr.LazyPrintf("request has %d entries, size %d", len(wb), size)
- batchSize := 1024
- var loops int
- for i := 0; i < len(wb); {
- loops++
- if batchSize == 0 {
- vlog.db.opt.Warningf("We shouldn't reach batch size of zero.")
- return ErrNoRewrite
- }
- end := i + batchSize
- if end > len(wb) {
- end = len(wb)
- }
- if err := vlog.db.batchSet(wb[i:end]); err != nil {
- if err == ErrTxnTooBig {
- // Decrease the batch size to half.
- batchSize = batchSize / 2
- tr.LazyPrintf("Dropped batch size to %d", batchSize)
- continue
- }
- return err
- }
- i += batchSize
- }
- tr.LazyPrintf("Processed %d entries in %d loops", len(wb), loops)
- tr.LazyPrintf("Total entries: %d. Moved: %d", count, moved)
- tr.LazyPrintf("Removing fid: %d", f.fid)
- var deleteFileNow bool
- // Entries written to LSM. Remove the older file now.
- {
- vlog.filesLock.Lock()
- // Just a sanity-check.
- if _, ok := vlog.filesMap[f.fid]; !ok {
- vlog.filesLock.Unlock()
- return errors.Errorf("Unable to find fid: %d", f.fid)
- }
- if vlog.iteratorCount() == 0 {
- delete(vlog.filesMap, f.fid)
- deleteFileNow = true
- } else {
- vlog.filesToBeDeleted = append(vlog.filesToBeDeleted, f.fid)
- }
- vlog.filesLock.Unlock()
- }
-
- if deleteFileNow {
- if err := vlog.deleteLogFile(f); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (vlog *valueLog) deleteMoveKeysFor(fid uint32, tr trace.Trace) error {
- db := vlog.db
- var result []*Entry
- var count, pointers uint64
- tr.LazyPrintf("Iterating over move keys to find invalids for fid: %d", fid)
- err := db.View(func(txn *Txn) error {
- opt := DefaultIteratorOptions
- opt.InternalAccess = true
- opt.PrefetchValues = false
- itr := txn.NewIterator(opt)
- defer itr.Close()
-
- for itr.Seek(badgerMove); itr.ValidForPrefix(badgerMove); itr.Next() {
- count++
- item := itr.Item()
- if item.meta&bitValuePointer == 0 {
- continue
- }
- pointers++
- var vp valuePointer
- vp.Decode(item.vptr)
- if vp.Fid == fid {
- e := &Entry{Key: y.KeyWithTs(item.Key(), item.Version()), meta: bitDelete}
- result = append(result, e)
- }
- }
- return nil
- })
- if err != nil {
- tr.LazyPrintf("Got error while iterating move keys: %v", err)
- tr.SetError()
- return err
- }
- tr.LazyPrintf("Num total move keys: %d. Num pointers: %d", count, pointers)
- tr.LazyPrintf("Number of invalid move keys found: %d", len(result))
- batchSize := 10240
- for i := 0; i < len(result); {
- end := i + batchSize
- if end > len(result) {
- end = len(result)
- }
- if err := db.batchSet(result[i:end]); err != nil {
- if err == ErrTxnTooBig {
- batchSize /= 2
- tr.LazyPrintf("Dropped batch size to %d", batchSize)
- continue
- }
- tr.LazyPrintf("Error while doing batchSet: %v", err)
- tr.SetError()
- return err
- }
- i += batchSize
- }
- tr.LazyPrintf("Move keys deletion done.")
- return nil
-}
-
-func (vlog *valueLog) incrIteratorCount() {
- atomic.AddInt32(&vlog.numActiveIterators, 1)
-}
-
-func (vlog *valueLog) iteratorCount() int {
- return int(atomic.LoadInt32(&vlog.numActiveIterators))
-}
-
-func (vlog *valueLog) decrIteratorCount() error {
- num := atomic.AddInt32(&vlog.numActiveIterators, -1)
- if num != 0 {
- return nil
- }
-
- vlog.filesLock.Lock()
- lfs := make([]*logFile, 0, len(vlog.filesToBeDeleted))
- for _, id := range vlog.filesToBeDeleted {
- lfs = append(lfs, vlog.filesMap[id])
- delete(vlog.filesMap, id)
- }
- vlog.filesToBeDeleted = nil
- vlog.filesLock.Unlock()
-
- for _, lf := range lfs {
- if err := vlog.deleteLogFile(lf); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (vlog *valueLog) deleteLogFile(lf *logFile) error {
- if lf == nil {
- return nil
- }
- lf.lock.Lock()
- defer lf.lock.Unlock()
-
- path := vlog.fpath(lf.fid)
- if err := lf.munmap(); err != nil {
- _ = lf.fd.Close()
- return err
- }
- lf.fmap = nil
- if err := lf.fd.Close(); err != nil {
- return err
- }
- return os.Remove(path)
-}
-
-func (vlog *valueLog) dropAll() (int, error) {
- // We don't want to block dropAll on any pending transactions. So, don't worry about iterator
- // count.
- var count int
- deleteAll := func() error {
- vlog.filesLock.Lock()
- defer vlog.filesLock.Unlock()
- for _, lf := range vlog.filesMap {
- if err := vlog.deleteLogFile(lf); err != nil {
- return err
- }
- count++
- }
- vlog.filesMap = make(map[uint32]*logFile)
- return nil
- }
- if err := deleteAll(); err != nil {
- return count, err
- }
-
- vlog.db.opt.Infof("Value logs deleted. Creating value log file: 0")
- if _, err := vlog.createVlogFile(0); err != nil {
- return count, err
- }
- atomic.StoreUint32(&vlog.maxFid, 0)
- return count, nil
-}
-
-// lfDiscardStats keeps track of the amount of data that could be discarded for
-// a given logfile.
-type lfDiscardStats struct {
- sync.RWMutex
- m map[uint32]int64
- flushChan chan map[uint32]int64
- closer *y.Closer
- updatesSinceFlush int
-}
-
-type valueLog struct {
- dirPath string
- elog trace.EventLog
-
- // guards our view of which files exist, which to be deleted, how many active iterators
- filesLock sync.RWMutex
- filesMap map[uint32]*logFile
- filesToBeDeleted []uint32
- // A refcount of iterators -- when this hits zero, we can delete the filesToBeDeleted.
- numActiveIterators int32
-
- db *DB
- maxFid uint32 // accessed via atomics.
- writableLogOffset uint32 // read by read, written by write. Must access via atomics.
- numEntriesWritten uint32
- opt Options
-
- garbageCh chan struct{}
- lfDiscardStats *lfDiscardStats
-}
-
-func vlogFilePath(dirPath string, fid uint32) string {
- return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
-}
-
-func (vlog *valueLog) fpath(fid uint32) string {
- return vlogFilePath(vlog.dirPath, fid)
-}
-
-func (vlog *valueLog) populateFilesMap() error {
- vlog.filesMap = make(map[uint32]*logFile)
-
- files, err := ioutil.ReadDir(vlog.dirPath)
- if err != nil {
- return errFile(err, vlog.dirPath, "Unable to open log dir.")
- }
-
- found := make(map[uint64]struct{})
- for _, file := range files {
- if !strings.HasSuffix(file.Name(), ".vlog") {
- continue
- }
- fsz := len(file.Name())
- fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
- if err != nil {
- return errFile(err, file.Name(), "Unable to parse log id.")
- }
- if _, ok := found[fid]; ok {
- return errFile(err, file.Name(), "Duplicate file found. Please delete one.")
- }
- found[fid] = struct{}{}
-
- lf := &logFile{
- fid: uint32(fid),
- path: vlog.fpath(uint32(fid)),
- loadingMode: vlog.opt.ValueLogLoadingMode,
- }
- vlog.filesMap[uint32(fid)] = lf
- if vlog.maxFid < uint32(fid) {
- vlog.maxFid = uint32(fid)
- }
- }
- return nil
-}
-
-func (vlog *valueLog) createVlogFile(fid uint32) (*logFile, error) {
- path := vlog.fpath(fid)
- lf := &logFile{
- fid: fid,
- path: path,
- loadingMode: vlog.opt.ValueLogLoadingMode,
- }
- // writableLogOffset is only written by write func, by read by Read func.
- // To avoid a race condition, all reads and updates to this variable must be
- // done via atomics.
- atomic.StoreUint32(&vlog.writableLogOffset, 0)
- vlog.numEntriesWritten = 0
-
- var err error
- if lf.fd, err = y.CreateSyncedFile(path, vlog.opt.SyncWrites); err != nil {
- return nil, errFile(err, lf.path, "Create value log file")
- }
-
- removeFile := func() {
- // Remove the file so that we don't get an error when createVlogFile is
- // called for the same fid, again. This could happen if there is an
- // transient error because of which we couldn't create a new file
- // and the second attempt to create the file succeeds.
- y.Check(os.Remove(lf.fd.Name()))
- }
-
- if err = syncDir(vlog.dirPath); err != nil {
- removeFile()
- return nil, errFile(err, vlog.dirPath, "Sync value log dir")
- }
-
- if err = lf.mmap(2 * vlog.opt.ValueLogFileSize); err != nil {
- removeFile()
- return nil, errFile(err, lf.path, "Mmap value log file")
- }
-
- vlog.filesLock.Lock()
- vlog.filesMap[fid] = lf
- vlog.filesLock.Unlock()
-
- return lf, nil
-}
-
-func errFile(err error, path string, msg string) error {
- return fmt.Errorf("%s. Path=%s. Error=%v", msg, path, err)
-}
-
-func (vlog *valueLog) replayLog(lf *logFile, offset uint32, replayFn logEntry) error {
- fi, err := lf.fd.Stat()
- if err != nil {
- return errFile(err, lf.path, "Unable to run file.Stat")
- }
-
- // Alright, let's iterate now.
- endOffset, err := vlog.iterate(lf, offset, replayFn)
- if err != nil {
- return errFile(err, lf.path, "Unable to replay logfile")
- }
- if int64(endOffset) == fi.Size() {
- return nil
- }
-
- // End offset is different from file size. So, we should truncate the file
- // to that size.
- y.AssertTrue(int64(endOffset) <= fi.Size())
- if !vlog.opt.Truncate {
- return ErrTruncateNeeded
- }
-
- // The entire file should be truncated (i.e. it should be deleted).
- // If fid == maxFid then it's okay to truncate the entire file since it will be
- // used for future additions. Also, it's okay if the last file has size zero.
- // We mmap 2*opt.ValueLogSize for the last file. See vlog.Open() function
- if endOffset == 0 && lf.fid != vlog.maxFid {
- return errDeleteVlogFile
- }
- if err := lf.fd.Truncate(int64(endOffset)); err != nil {
- return errFile(err, lf.path, fmt.Sprintf(
- "Truncation needed at offset %d. Can be done manually as well.", endOffset))
- }
- return nil
-}
-
-// init initializes the value log struct. This initialization needs to happen
-// before compactions start.
-func (vlog *valueLog) init(db *DB) {
- vlog.opt = db.opt
- vlog.db = db
- vlog.dirPath = vlog.opt.ValueDir
- vlog.elog = y.NoEventLog
- if db.opt.EventLogging {
- vlog.elog = trace.NewEventLog("Badger", "Valuelog")
- }
- vlog.garbageCh = make(chan struct{}, 1) // Only allow one GC at a time.
- vlog.lfDiscardStats = &lfDiscardStats{
- m: make(map[uint32]int64),
- closer: y.NewCloser(1),
- flushChan: make(chan map[uint32]int64, 16),
- }
-}
-
-func (vlog *valueLog) open(db *DB, ptr valuePointer, replayFn logEntry) error {
- go vlog.flushDiscardStats()
- if err := vlog.populateFilesMap(); err != nil {
- return err
- }
- // If no files are found, then create a new file.
- if len(vlog.filesMap) == 0 {
- _, err := vlog.createVlogFile(0)
- return err
- }
-
- fids := vlog.sortedFids()
- for _, fid := range fids {
- lf, ok := vlog.filesMap[fid]
- y.AssertTrue(ok)
- var flags uint32
- switch {
- case vlog.opt.ReadOnly:
- // If we have read only, we don't need SyncWrites.
- flags |= y.ReadOnly
- // Set sync flag.
- case vlog.opt.SyncWrites:
- flags |= y.Sync
- }
-
- // We cannot mmap the files upfront here. Windows does not like mmapped files to be
- // truncated. We might need to truncate files during a replay.
- if err := lf.open(vlog.fpath(fid), flags); err != nil {
- return err
- }
- // This file is before the value head pointer. So, we don't need to
- // replay it, and can just open it in readonly mode.
- if fid < ptr.Fid {
- // Mmap the file here, we don't need to replay it.
- if err := lf.mmap(int64(lf.size)); err != nil {
- return err
- }
- continue
- }
-
- var offset uint32
- if fid == ptr.Fid {
- offset = ptr.Offset + ptr.Len
- }
- vlog.db.opt.Infof("Replaying file id: %d at offset: %d\n", fid, offset)
- now := time.Now()
- // Replay and possible truncation done. Now we can open the file as per
- // user specified options.
- if err := vlog.replayLog(lf, offset, replayFn); err != nil {
- // Log file is corrupted. Delete it.
- if err == errDeleteVlogFile {
- delete(vlog.filesMap, fid)
- // Close the fd of the file before deleting the file otherwise windows complaints.
- if err := lf.fd.Close(); err != nil {
- return errors.Wrapf(err, "failed to close vlog file %s", lf.fd.Name())
- }
- path := vlog.fpath(lf.fid)
- if err := os.Remove(path); err != nil {
- return y.Wrapf(err, "failed to delete empty value log file: %q", path)
- }
- continue
- }
- return err
- }
- vlog.db.opt.Infof("Replay took: %s\n", time.Since(now))
- if fid < vlog.maxFid {
- // This file has been replayed. It can now be mmapped.
- // For maxFid, the mmap would be done by the specially written code below.
- if err := lf.mmap(int64(lf.size)); err != nil {
- return err
- }
- }
- }
-
- // Seek to the end to start writing.
- last, ok := vlog.filesMap[vlog.maxFid]
- y.AssertTrue(ok)
- lastOffset, err := last.fd.Seek(0, io.SeekEnd)
- if err != nil {
- return errFile(err, last.path, "file.Seek to end")
- }
- vlog.writableLogOffset = uint32(lastOffset)
-
- // Update the head to point to the updated tail. Otherwise, even after doing a successful
- // replay and closing the DB, the value log head does not get updated, which causes the replay
- // to happen repeatedly.
- vlog.db.vhead = valuePointer{Fid: vlog.maxFid, Offset: uint32(lastOffset)}
-
- // Map the file if needed. When we create a file, it is automatically mapped.
- if err = last.mmap(2 * db.opt.ValueLogFileSize); err != nil {
- return errFile(err, last.path, "Map log file")
- }
- if err := vlog.populateDiscardStats(); err != nil {
- // Print the error and continue. We don't want to prevent value log open if there's an error
- // with the fetching discards stats.
- db.opt.Errorf("Failed to populate discard stats: %s", err)
- }
- return nil
-}
-
-func (lf *logFile) open(path string, flags uint32) error {
- var err error
- if lf.fd, err = y.OpenExistingFile(path, flags); err != nil {
- return y.Wrapf(err, "Error while opening file in logfile %s", path)
- }
-
- fi, err := lf.fd.Stat()
- if err != nil {
- return errFile(err, lf.path, "Unable to run file.Stat")
- }
- sz := fi.Size()
- y.AssertTruef(
- sz <= math.MaxUint32,
- "file size: %d greater than %d",
- uint32(sz), uint32(math.MaxUint32),
- )
- lf.size = uint32(sz)
- return nil
-}
-
-func (vlog *valueLog) Close() error {
- // close flushDiscardStats.
- vlog.lfDiscardStats.closer.SignalAndWait()
-
- vlog.elog.Printf("Stopping garbage collection of values.")
- defer vlog.elog.Finish()
-
- var err error
- for id, f := range vlog.filesMap {
- f.lock.Lock() // We won’t release the lock.
- if munmapErr := f.munmap(); munmapErr != nil && err == nil {
- err = munmapErr
- }
-
- maxFid := atomic.LoadUint32(&vlog.maxFid)
- if !vlog.opt.ReadOnly && id == maxFid {
- // truncate writable log file to correct offset.
- if truncErr := f.fd.Truncate(
- int64(vlog.woffset())); truncErr != nil && err == nil {
- err = truncErr
- }
- }
-
- if closeErr := f.fd.Close(); closeErr != nil && err == nil {
- err = closeErr
- }
- }
- return err
-}
-
-// sortedFids returns the file id's not pending deletion, sorted. Assumes we have shared access to
-// filesMap.
-func (vlog *valueLog) sortedFids() []uint32 {
- toBeDeleted := make(map[uint32]struct{})
- for _, fid := range vlog.filesToBeDeleted {
- toBeDeleted[fid] = struct{}{}
- }
- ret := make([]uint32, 0, len(vlog.filesMap))
- for fid := range vlog.filesMap {
- if _, ok := toBeDeleted[fid]; !ok {
- ret = append(ret, fid)
- }
- }
- sort.Slice(ret, func(i, j int) bool {
- return ret[i] < ret[j]
- })
- return ret
-}
-
-type request struct {
- // Input values
- Entries []*Entry
- // Output values and wait group stuff below
- Ptrs []valuePointer
- Wg sync.WaitGroup
- Err error
- ref int32
-}
-
-func (req *request) reset() {
- req.Entries = req.Entries[:0]
- req.Ptrs = req.Ptrs[:0]
- req.Wg = sync.WaitGroup{}
- req.Err = nil
- req.ref = 0
-}
-
-func (req *request) IncrRef() {
- atomic.AddInt32(&req.ref, 1)
-}
-
-func (req *request) DecrRef() {
- nRef := atomic.AddInt32(&req.ref, -1)
- if nRef > 0 {
- return
- }
- req.Entries = nil
- requestPool.Put(req)
-}
-
-func (req *request) Wait() error {
- req.Wg.Wait()
- err := req.Err
- req.DecrRef() // DecrRef after writing to DB.
- return err
-}
-
-type requests []*request
-
-func (reqs requests) DecrRef() {
- for _, req := range reqs {
- req.DecrRef()
- }
-}
-
-func (reqs requests) IncrRef() {
- for _, req := range reqs {
- req.IncrRef()
- }
-}
-
-// sync function syncs content of latest value log file to disk. Syncing of value log directory is
-// not required here as it happens every time a value log file rotation happens(check createVlogFile
-// function). During rotation, previous value log file also gets synced to disk. It only syncs file
-// if fid >= vlog.maxFid. In some cases such as replay(while opening db), it might be called with
-// fid < vlog.maxFid. To sync irrespective of file id just call it with math.MaxUint32.
-func (vlog *valueLog) sync(fid uint32) error {
- if vlog.opt.SyncWrites {
- return nil
- }
-
- vlog.filesLock.RLock()
- maxFid := atomic.LoadUint32(&vlog.maxFid)
- // During replay it is possible to get sync call with fid less than maxFid.
- // Because older file has already been synced, we can return from here.
- if fid < maxFid || len(vlog.filesMap) == 0 {
- vlog.filesLock.RUnlock()
- return nil
- }
- curlf := vlog.filesMap[maxFid]
- // Sometimes it is possible that vlog.maxFid has been increased but file creation
- // with same id is still in progress and this function is called. In those cases
- // entry for the file might not be present in vlog.filesMap.
- if curlf == nil {
- vlog.filesLock.RUnlock()
- return nil
- }
- curlf.lock.RLock()
- vlog.filesLock.RUnlock()
-
- err := curlf.sync()
- curlf.lock.RUnlock()
- return err
-}
-
-func (vlog *valueLog) woffset() uint32 {
- return atomic.LoadUint32(&vlog.writableLogOffset)
-}
-
-// write is thread-unsafe by design and should not be called concurrently.
-func (vlog *valueLog) write(reqs []*request) error {
- vlog.filesLock.RLock()
- maxFid := atomic.LoadUint32(&vlog.maxFid)
- curlf := vlog.filesMap[maxFid]
- vlog.filesLock.RUnlock()
-
- var buf bytes.Buffer
- flushWrites := func() error {
- if buf.Len() == 0 {
- return nil
- }
- vlog.elog.Printf("Flushing buffer of size %d to vlog", buf.Len())
- n, err := curlf.fd.Write(buf.Bytes())
- if err != nil {
- return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path)
- }
- buf.Reset()
- y.NumWrites.Add(1)
- y.NumBytesWritten.Add(int64(n))
- vlog.elog.Printf("Done")
- atomic.AddUint32(&vlog.writableLogOffset, uint32(n))
- atomic.StoreUint32(&curlf.size, vlog.writableLogOffset)
- return nil
- }
- toDisk := func() error {
- if err := flushWrites(); err != nil {
- return err
- }
- if vlog.woffset() > uint32(vlog.opt.ValueLogFileSize) ||
- vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries {
- if err := curlf.doneWriting(vlog.woffset()); err != nil {
- return err
- }
-
- newid := atomic.AddUint32(&vlog.maxFid, 1)
- y.AssertTruef(newid > 0, "newid has overflown uint32: %v", newid)
- newlf, err := vlog.createVlogFile(newid)
- if err != nil {
- return err
- }
- curlf = newlf
- atomic.AddInt32(&vlog.db.logRotates, 1)
- }
- return nil
- }
-
- for i := range reqs {
- b := reqs[i]
- b.Ptrs = b.Ptrs[:0]
- var written int
- for j := range b.Entries {
- e := b.Entries[j]
- if e.skipVlog {
- b.Ptrs = append(b.Ptrs, valuePointer{})
- continue
- }
- var p valuePointer
-
- p.Fid = curlf.fid
- // Use the offset including buffer length so far.
- p.Offset = vlog.woffset() + uint32(buf.Len())
- plen, err := encodeEntry(e, &buf) // Now encode the entry into buffer.
- if err != nil {
- return err
- }
- p.Len = uint32(plen)
- b.Ptrs = append(b.Ptrs, p)
- written++
-
- // It is possible that the size of the buffer grows beyond the max size of the value
- // log (this happens when a transaction contains entries with large value sizes) and
- // badger might run into out of memory errors. We flush the buffer here if it's size
- // grows beyond the max value log size.
- if int64(buf.Len()) > vlog.db.opt.ValueLogFileSize {
- if err := flushWrites(); err != nil {
- return err
- }
- }
- }
- vlog.numEntriesWritten += uint32(written)
- // We write to disk here so that all entries that are part of the same transaction are
- // written to the same vlog file.
- writeNow :=
- vlog.woffset()+uint32(buf.Len()) > uint32(vlog.opt.ValueLogFileSize) ||
- vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries)
- if writeNow {
- if err := toDisk(); err != nil {
- return err
- }
- }
- }
- return toDisk()
-}
-
-// Gets the logFile and acquires and RLock() for the mmap. You must call RUnlock on the file
-// (if non-nil)
-func (vlog *valueLog) getFileRLocked(fid uint32) (*logFile, error) {
- vlog.filesLock.RLock()
- defer vlog.filesLock.RUnlock()
- ret, ok := vlog.filesMap[fid]
- if !ok {
- // log file has gone away, will need to retry the operation.
- return nil, ErrRetry
- }
- ret.lock.RLock()
- return ret, nil
-}
-
-// Read reads the value log at a given location.
-// TODO: Make this read private.
-func (vlog *valueLog) Read(vp valuePointer, s *y.Slice) ([]byte, func(), error) {
- // Check for valid offset if we are reading from writable log.
- maxFid := atomic.LoadUint32(&vlog.maxFid)
- if vp.Fid == maxFid && vp.Offset >= vlog.woffset() {
- return nil, nil, errors.Errorf(
- "Invalid value pointer offset: %d greater than current offset: %d",
- vp.Offset, vlog.woffset())
- }
-
- buf, cb, err := vlog.readValueBytes(vp, s)
- if err != nil {
- return nil, cb, err
- }
-
- if vlog.opt.VerifyValueChecksum {
- hash := crc32.New(y.CastagnoliCrcTable)
- if _, err := hash.Write(buf[:len(buf)-crc32.Size]); err != nil {
- runCallback(cb)
- return nil, nil, errors.Wrapf(err, "failed to write hash for vp %+v", vp)
- }
- // Fetch checksum from the end of the buffer.
- checksum := buf[len(buf)-crc32.Size:]
- res := binary.BigEndian.Uint32(checksum)
- if hash.Sum32() != res {
- runCallback(cb)
- return nil, nil, errors.Errorf("checksum mismatch Error: value corrupted for vp: %+v", vp)
- }
- }
- var h header
- h.Decode(buf)
- n := uint32(headerBufSize) + h.klen
- return buf[n : n+h.vlen], cb, nil
-}
-
-func (vlog *valueLog) readValueBytes(vp valuePointer, s *y.Slice) ([]byte, func(), error) {
- lf, err := vlog.getFileRLocked(vp.Fid)
- if err != nil {
- return nil, nil, err
- }
-
- buf, err := lf.read(vp, s)
- if vlog.opt.ValueLogLoadingMode == options.MemoryMap {
- return buf, lf.lock.RUnlock, err
- }
- // If we are using File I/O we unlock the file immediately
- // and return an empty function as callback.
- lf.lock.RUnlock()
- return buf, nil, err
-}
-
-// Test helper
-func valueBytesToEntry(buf []byte) (e Entry) {
- var h header
- h.Decode(buf)
- n := uint32(headerBufSize)
-
- e.Key = buf[n : n+h.klen]
- n += h.klen
- e.meta = h.meta
- e.UserMeta = h.userMeta
- e.Value = buf[n : n+h.vlen]
- return
-}
-
-func (vlog *valueLog) pickLog(head valuePointer, tr trace.Trace) (files []*logFile) {
- vlog.filesLock.RLock()
- defer vlog.filesLock.RUnlock()
- fids := vlog.sortedFids()
- if len(fids) <= 1 {
- tr.LazyPrintf("Only one or less value log file.")
- return nil
- } else if head.Fid == 0 {
- tr.LazyPrintf("Head pointer is at zero.")
- return nil
- }
-
- // Pick a candidate that contains the largest amount of discardable data
- candidate := struct {
- fid uint32
- discard int64
- }{math.MaxUint32, 0}
- vlog.lfDiscardStats.RLock()
- for _, fid := range fids {
- if fid >= head.Fid {
- break
- }
- if vlog.lfDiscardStats.m[fid] > candidate.discard {
- candidate.fid = fid
- candidate.discard = vlog.lfDiscardStats.m[fid]
- }
- }
- vlog.lfDiscardStats.RUnlock()
-
- if candidate.fid != math.MaxUint32 { // Found a candidate
- tr.LazyPrintf("Found candidate via discard stats: %v", candidate)
- files = append(files, vlog.filesMap[candidate.fid])
- } else {
- tr.LazyPrintf("Could not find candidate via discard stats. Randomly picking one.")
- }
-
- // Fallback to randomly picking a log file
- var idxHead int
- for i, fid := range fids {
- if fid == head.Fid {
- idxHead = i
- break
- }
- }
- if idxHead == 0 { // Not found or first file
- tr.LazyPrintf("Could not find any file.")
- return nil
- }
- idx := rand.Intn(idxHead) // Don’t include head.Fid. We pick a random file before it.
- if idx > 0 {
- idx = rand.Intn(idx + 1) // Another level of rand to favor smaller fids.
- }
- tr.LazyPrintf("Randomly chose fid: %d", fids[idx])
- files = append(files, vlog.filesMap[fids[idx]])
- return files
-}
-
-func discardEntry(e Entry, vs y.ValueStruct, db *DB) bool {
- if vs.Version != y.ParseTs(e.Key) {
- // Version not found. Discard.
- return true
- }
- if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
- return true
- }
- if (vs.Meta & bitValuePointer) == 0 {
- // Key also stores the value in LSM. Discard.
- return true
- }
- if (vs.Meta & bitFinTxn) > 0 {
- // Just a txn finish entry. Discard.
- return true
- }
- if bytes.HasPrefix(e.Key, badgerMove) {
- // Verify the actual key entry without the badgerPrefix has not been deleted.
- // If this is not done the badgerMove entry will be kept forever moving from
- // vlog to vlog during rewrites.
- avs, err := db.get(e.Key[len(badgerMove):])
- if err != nil {
- return false
- }
- return avs.Version == 0
- }
- return false
-}
-
-func (vlog *valueLog) doRunGC(lf *logFile, discardRatio float64, tr trace.Trace) (err error) {
- // Update stats before exiting
- defer func() {
- if err == nil {
- vlog.lfDiscardStats.Lock()
- delete(vlog.lfDiscardStats.m, lf.fid)
- vlog.lfDiscardStats.Unlock()
- }
- }()
-
- type reason struct {
- total float64
- discard float64
- count int
- }
-
- fi, err := lf.fd.Stat()
- if err != nil {
- tr.LazyPrintf("Error while finding file size: %v", err)
- tr.SetError()
- return err
- }
-
- // Set up the sampling window sizes.
- sizeWindow := float64(fi.Size()) * 0.1 // 10% of the file as window.
- sizeWindowM := sizeWindow / (1 << 20) // in MBs.
- countWindow := int(float64(vlog.opt.ValueLogMaxEntries) * 0.01) // 1% of num entries.
- tr.LazyPrintf("Size window: %5.2f. Count window: %d.", sizeWindow, countWindow)
-
- // Pick a random start point for the log.
- skipFirstM := float64(rand.Int63n(fi.Size())) // Pick a random starting location.
- skipFirstM -= sizeWindow // Avoid hitting EOF by moving back by window.
- skipFirstM /= float64(mi) // Convert to MBs.
- tr.LazyPrintf("Skip first %5.2f MB of file of size: %d MB", skipFirstM, fi.Size()/mi)
- var skipped float64
-
- var r reason
- start := time.Now()
- y.AssertTrue(vlog.db != nil)
- s := new(y.Slice)
- var numIterations int
- _, err = vlog.iterate(lf, 0, func(e Entry, vp valuePointer) error {
- numIterations++
- esz := float64(vp.Len) / (1 << 20) // in MBs.
- if skipped < skipFirstM {
- skipped += esz
- return nil
- }
-
- // Sample until we reach the window sizes or exceed 10 seconds.
- if r.count > countWindow {
- tr.LazyPrintf("Stopping sampling after %d entries.", countWindow)
- return errStop
- }
- if r.total > sizeWindowM {
- tr.LazyPrintf("Stopping sampling after reaching window size.")
- return errStop
- }
- if time.Since(start) > 10*time.Second {
- tr.LazyPrintf("Stopping sampling after 10 seconds.")
- return errStop
- }
- r.total += esz
- r.count++
-
- vs, err := vlog.db.get(e.Key)
- if err != nil {
- return err
- }
- if discardEntry(e, vs, vlog.db) {
- r.discard += esz
- return nil
- }
-
- // Value is still present in value log.
- y.AssertTrue(len(vs.Value) > 0)
- vp.Decode(vs.Value)
-
- if vp.Fid > lf.fid {
- // Value is present in a later log. Discard.
- r.discard += esz
- return nil
- }
- if vp.Offset > e.offset {
- // Value is present in a later offset, but in the same log.
- r.discard += esz
- return nil
- }
- if vp.Fid == lf.fid && vp.Offset == e.offset {
- // This is still the active entry. This would need to be rewritten.
-
- } else {
- vlog.elog.Printf("Reason=%+v\n", r)
-
- buf, cb, err := vlog.readValueBytes(vp, s)
- if err != nil {
- return errStop
- }
- ne := valueBytesToEntry(buf)
- ne.offset = vp.Offset
- ne.print("Latest Entry Header in LSM")
- e.print("Latest Entry in Log")
- runCallback(cb)
- return errors.Errorf("This shouldn't happen. Latest Pointer:%+v. Meta:%v.",
- vp, vs.Meta)
- }
- return nil
- })
-
- if err != nil {
- tr.LazyPrintf("Error while iterating for RunGC: %v", err)
- tr.SetError()
- return err
- }
- tr.LazyPrintf("Fid: %d. Skipped: %5.2fMB Num iterations: %d. Data status=%+v\n",
- lf.fid, skipped, numIterations, r)
-
- // If we couldn't sample at least a 1000 KV pairs or at least 75% of the window size,
- // and what we can discard is below the threshold, we should skip the rewrite.
- if (r.count < countWindow && r.total < sizeWindowM*0.75) || r.discard < discardRatio*r.total {
- tr.LazyPrintf("Skipping GC on fid: %d", lf.fid)
- return ErrNoRewrite
- }
- if err = vlog.rewrite(lf, tr); err != nil {
- return err
- }
- tr.LazyPrintf("Done rewriting.")
- return nil
-}
-
-func (vlog *valueLog) waitOnGC(lc *y.Closer) {
- defer lc.Done()
-
- <-lc.HasBeenClosed() // Wait for lc to be closed.
-
- // Block any GC in progress to finish, and don't allow any more writes to runGC by filling up
- // the channel of size 1.
- vlog.garbageCh <- struct{}{}
-}
-
-func (vlog *valueLog) runGC(discardRatio float64, head valuePointer) error {
- select {
- case vlog.garbageCh <- struct{}{}:
- // Pick a log file for GC.
- tr := trace.New("Badger.ValueLog", "GC")
- tr.SetMaxEvents(100)
- defer func() {
- tr.Finish()
- <-vlog.garbageCh
- }()
-
- var err error
- files := vlog.pickLog(head, tr)
- if len(files) == 0 {
- tr.LazyPrintf("PickLog returned zero results.")
- return ErrNoRewrite
- }
- tried := make(map[uint32]bool)
- for _, lf := range files {
- if _, done := tried[lf.fid]; done {
- continue
- }
- tried[lf.fid] = true
- err = vlog.doRunGC(lf, discardRatio, tr)
- if err == nil {
- return vlog.deleteMoveKeysFor(lf.fid, tr)
- }
- }
- return err
- default:
- return ErrRejected
- }
-}
-
-func (vlog *valueLog) updateDiscardStats(stats map[uint32]int64) {
- select {
- case vlog.lfDiscardStats.flushChan <- stats:
- default:
- vlog.opt.Warningf("updateDiscardStats called: discard stats flushChan full, " +
- "returning without pushing to flushChan")
- }
-}
-
-func (vlog *valueLog) flushDiscardStats() {
- defer vlog.lfDiscardStats.closer.Done()
-
- mergeStats := func(stats map[uint32]int64) ([]byte, error) {
- vlog.lfDiscardStats.Lock()
- defer vlog.lfDiscardStats.Unlock()
- for fid, count := range stats {
- vlog.lfDiscardStats.m[fid] += count
- vlog.lfDiscardStats.updatesSinceFlush++
- }
-
- if vlog.lfDiscardStats.updatesSinceFlush > discardStatsFlushThreshold {
- encodedDS, err := json.Marshal(vlog.lfDiscardStats.m)
- if err != nil {
- return nil, err
- }
- vlog.lfDiscardStats.updatesSinceFlush = 0
- return encodedDS, nil
- }
- return nil, nil
- }
-
- process := func(stats map[uint32]int64) error {
- encodedDS, err := mergeStats(stats)
- if err != nil || encodedDS == nil {
- return err
- }
-
- entries := []*Entry{{
- Key: y.KeyWithTs(lfDiscardStatsKey, 1),
- Value: encodedDS,
- }}
- req, err := vlog.db.sendToWriteCh(entries)
- // No special handling of ErrBlockedWrites is required as err is just logged in
- // for loop below.
- if err != nil {
- return errors.Wrapf(err, "failed to push discard stats to write channel")
- }
- return req.Wait()
- }
-
- closer := vlog.lfDiscardStats.closer
- for {
- select {
- case <-closer.HasBeenClosed():
- // For simplicity just return without processing already present in stats in flushChan.
- return
- case stats := <-vlog.lfDiscardStats.flushChan:
- if err := process(stats); err != nil {
- vlog.opt.Errorf("unable to process discardstats with error: %s", err)
- }
- }
- }
-}
-
-// populateDiscardStats populates vlog.lfDiscardStats.
-// This function will be called while initializing valueLog.
-func (vlog *valueLog) populateDiscardStats() error {
- key := y.KeyWithTs(lfDiscardStatsKey, math.MaxUint64)
- var statsMap map[uint32]int64
- var val []byte
- var vp valuePointer
- for {
- vs, err := vlog.db.get(key)
- if err != nil {
- return err
- }
- // Value doesn't exist.
- if vs.Meta == 0 && len(vs.Value) == 0 {
- vlog.opt.Debugf("Value log discard stats empty")
- return nil
- }
- vp.Decode(vs.Value)
- // Entry stored in LSM tree.
- if vs.Meta&bitValuePointer == 0 {
- val = y.SafeCopy(val, vs.Value)
- break
- }
- // Read entry from value log.
- result, cb, err := vlog.Read(vp, new(y.Slice))
- runCallback(cb)
- val = y.SafeCopy(val, result)
- // The result is stored in val. We can break the loop from here.
- if err == nil {
- break
- }
- if err != ErrRetry {
- return err
- }
- // If we're at this point it means we haven't found the value yet and if the current key has
- // badger move prefix, we should break from here since we've already tried the original key
- // and the key with move prefix. "val" would be empty since we haven't found the value yet.
- if bytes.HasPrefix(key, badgerMove) {
- break
- }
- // If we're at this point it means the discard stats key was moved by the GC and the actual
- // entry is the one prefixed by badger move key.
- // Prepend existing key with badger move and search for the key.
- key = append(badgerMove, key...)
- }
-
- if len(val) == 0 {
- return nil
- }
- if err := json.Unmarshal(val, &statsMap); err != nil {
- return errors.Wrapf(err, "failed to unmarshal discard stats")
- }
- vlog.opt.Debugf("Value Log Discard stats: %v", statsMap)
- vlog.lfDiscardStats.flushChan <- statsMap
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/error.go b/vendor/github.com/dgraph-io/badger/y/error.go
deleted file mode 100644
index 59bb2835..00000000
--- a/vendor/github.com/dgraph-io/badger/y/error.go
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-// This file contains some functions for error handling. Note that we are moving
-// towards using x.Trace, i.e., rpc tracing using net/tracer. But for now, these
-// functions are useful for simple checks logged on one machine.
-// Some common use cases are:
-// (1) You receive an error from external lib, and would like to check/log fatal.
-// For this, use x.Check, x.Checkf. These will check for err != nil, which is
-// more common in Go. If you want to check for boolean being true, use
-// x.Assert, x.Assertf.
-// (2) You receive an error from external lib, and would like to pass on with some
-// stack trace information. In this case, use x.Wrap or x.Wrapf.
-// (3) You want to generate a new error with stack trace info. Use x.Errorf.
-
-import (
- "fmt"
- "log"
-
- "github.com/pkg/errors"
-)
-
-var debugMode = true
-
-// Check logs fatal if err != nil.
-func Check(err error) {
- if err != nil {
- log.Fatalf("%+v", Wrap(err))
- }
-}
-
-// Check2 acts as convenience wrapper around Check, using the 2nd argument as error.
-func Check2(_ interface{}, err error) {
- Check(err)
-}
-
-// AssertTrue asserts that b is true. Otherwise, it would log fatal.
-func AssertTrue(b bool) {
- if !b {
- log.Fatalf("%+v", errors.Errorf("Assert failed"))
- }
-}
-
-// AssertTruef is AssertTrue with extra info.
-func AssertTruef(b bool, format string, args ...interface{}) {
- if !b {
- log.Fatalf("%+v", errors.Errorf(format, args...))
- }
-}
-
-// Wrap wraps errors from external lib.
-func Wrap(err error) error {
- if !debugMode {
- return err
- }
- return errors.Wrap(err, "")
-}
-
-// Wrapf is Wrap with extra info.
-func Wrapf(err error, format string, args ...interface{}) error {
- if !debugMode {
- if err == nil {
- return nil
- }
- return fmt.Errorf(format+" error: %+v", append(args, err)...)
- }
- return errors.Wrapf(err, format, args...)
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/event_log.go b/vendor/github.com/dgraph-io/badger/y/event_log.go
deleted file mode 100644
index ba9dcb1f..00000000
--- a/vendor/github.com/dgraph-io/badger/y/event_log.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "golang.org/x/net/trace"
-
-var (
- NoEventLog trace.EventLog = nilEventLog{}
-)
-
-type nilEventLog struct{}
-
-func (nel nilEventLog) Printf(format string, a ...interface{}) {}
-
-func (nel nilEventLog) Errorf(format string, a ...interface{}) {}
-
-func (nel nilEventLog) Finish() {}
diff --git a/vendor/github.com/dgraph-io/badger/y/file_dsync.go b/vendor/github.com/dgraph-io/badger/y/file_dsync.go
deleted file mode 100644
index ea4d9ab2..00000000
--- a/vendor/github.com/dgraph-io/badger/y/file_dsync.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// +build !dragonfly,!freebsd,!windows,!plan9
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "golang.org/x/sys/unix"
-
-func init() {
- datasyncFileFlag = unix.O_DSYNC
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/file_nodsync.go b/vendor/github.com/dgraph-io/badger/y/file_nodsync.go
deleted file mode 100644
index 54a2184e..00000000
--- a/vendor/github.com/dgraph-io/badger/y/file_nodsync.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// +build dragonfly freebsd windows plan9
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "syscall"
-
-func init() {
- datasyncFileFlag = syscall.O_SYNC
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/file_sync.go b/vendor/github.com/dgraph-io/badger/y/file_sync.go
deleted file mode 100644
index 19016ef6..00000000
--- a/vendor/github.com/dgraph-io/badger/y/file_sync.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// +build !darwin go1.12
-
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "os"
-
-// FileSync calls os.File.Sync with the right parameters.
-// This function can be removed once we stop supporting Go 1.11
-// on MacOS.
-//
-// More info: https://golang.org/issue/26650.
-func FileSync(f *os.File) error { return f.Sync() }
diff --git a/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go b/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go
deleted file mode 100644
index 01c79f23..00000000
--- a/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build darwin,!go1.12
-
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "os"
- "syscall"
-)
-
-// FileSync calls os.File.Sync with the right parameters.
-// This function can be removed once we stop supporting Go 1.11
-// on MacOS.
-//
-// More info: https://golang.org/issue/26650.
-func FileSync(f *os.File) error {
- _, _, err := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), syscall.F_FULLFSYNC, 0)
- if err == 0 {
- return nil
- }
- return err
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/iterator.go b/vendor/github.com/dgraph-io/badger/y/iterator.go
deleted file mode 100644
index d3142c05..00000000
--- a/vendor/github.com/dgraph-io/badger/y/iterator.go
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "bytes"
- "encoding/binary"
-)
-
-// ValueStruct represents the value info that can be associated with a key, but also the internal
-// Meta field.
-type ValueStruct struct {
- Meta byte
- UserMeta byte
- ExpiresAt uint64
- Value []byte
-
- Version uint64 // This field is not serialized. Only for internal usage.
-}
-
-func sizeVarint(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-
-// EncodedSize is the size of the ValueStruct when encoded
-func (v *ValueStruct) EncodedSize() uint16 {
- sz := len(v.Value) + 2 // meta, usermeta.
- if v.ExpiresAt == 0 {
- return uint16(sz + 1)
- }
-
- enc := sizeVarint(v.ExpiresAt)
- return uint16(sz + enc)
-}
-
-// Decode uses the length of the slice to infer the length of the Value field.
-func (v *ValueStruct) Decode(b []byte) {
- v.Meta = b[0]
- v.UserMeta = b[1]
- var sz int
- v.ExpiresAt, sz = binary.Uvarint(b[2:])
- v.Value = b[2+sz:]
-}
-
-// Encode expects a slice of length at least v.EncodedSize().
-func (v *ValueStruct) Encode(b []byte) {
- b[0] = v.Meta
- b[1] = v.UserMeta
- sz := binary.PutUvarint(b[2:], v.ExpiresAt)
- copy(b[2+sz:], v.Value)
-}
-
-// EncodeTo should be kept in sync with the Encode function above. The reason
-// this function exists is to avoid creating byte arrays per key-value pair in
-// table/builder.go.
-func (v *ValueStruct) EncodeTo(buf *bytes.Buffer) {
- buf.WriteByte(v.Meta)
- buf.WriteByte(v.UserMeta)
- var enc [binary.MaxVarintLen64]byte
- sz := binary.PutUvarint(enc[:], v.ExpiresAt)
- buf.Write(enc[:sz])
- buf.Write(v.Value)
-}
-
-// Iterator is an interface for a basic iterator.
-type Iterator interface {
- Next()
- Rewind()
- Seek(key []byte)
- Key() []byte
- Value() ValueStruct
- Valid() bool
-
- // All iterators should be closed so that file garbage collection works.
- Close() error
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/metrics.go b/vendor/github.com/dgraph-io/badger/y/metrics.go
deleted file mode 100644
index 2de17d10..00000000
--- a/vendor/github.com/dgraph-io/badger/y/metrics.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "expvar"
-
-var (
- // LSMSize has size of the LSM in bytes
- LSMSize *expvar.Map
- // VlogSize has size of the value log in bytes
- VlogSize *expvar.Map
- // PendingWrites tracks the number of pending writes.
- PendingWrites *expvar.Map
-
- // These are cumulative
-
- // NumReads has cumulative number of reads
- NumReads *expvar.Int
- // NumWrites has cumulative number of writes
- NumWrites *expvar.Int
- // NumBytesRead has cumulative number of bytes read
- NumBytesRead *expvar.Int
- // NumBytesWritten has cumulative number of bytes written
- NumBytesWritten *expvar.Int
- // NumLSMGets is number of LMS gets
- NumLSMGets *expvar.Map
- // NumLSMBloomHits is number of LMS bloom hits
- NumLSMBloomHits *expvar.Map
- // NumGets is number of gets
- NumGets *expvar.Int
- // NumPuts is number of puts
- NumPuts *expvar.Int
- // NumBlockedPuts is number of blocked puts
- NumBlockedPuts *expvar.Int
- // NumMemtableGets is number of memtable gets
- NumMemtableGets *expvar.Int
-)
-
-// These variables are global and have cumulative values for all kv stores.
-func init() {
- NumReads = expvar.NewInt("badger_disk_reads_total")
- NumWrites = expvar.NewInt("badger_disk_writes_total")
- NumBytesRead = expvar.NewInt("badger_read_bytes")
- NumBytesWritten = expvar.NewInt("badger_written_bytes")
- NumLSMGets = expvar.NewMap("badger_lsm_level_gets_total")
- NumLSMBloomHits = expvar.NewMap("badger_lsm_bloom_hits_total")
- NumGets = expvar.NewInt("badger_gets_total")
- NumPuts = expvar.NewInt("badger_puts_total")
- NumBlockedPuts = expvar.NewInt("badger_blocked_puts_total")
- NumMemtableGets = expvar.NewInt("badger_memtable_gets_total")
- LSMSize = expvar.NewMap("badger_lsm_size_bytes")
- VlogSize = expvar.NewMap("badger_vlog_size_bytes")
- PendingWrites = expvar.NewMap("badger_pending_writes_total")
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/mmap.go b/vendor/github.com/dgraph-io/badger/y/mmap.go
deleted file mode 100644
index 4a477af3..00000000
--- a/vendor/github.com/dgraph-io/badger/y/mmap.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "os"
-)
-
-// Mmap uses the mmap system call to memory-map a file. If writable is true,
-// memory protection of the pages is set so that they may be written to as well.
-func Mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
- return mmap(fd, writable, size)
-}
-
-// Munmap unmaps a previously mapped slice.
-func Munmap(b []byte) error {
- return munmap(b)
-}
-
-// Madvise uses the madvise system call to give advise about the use of memory
-// when using a slice that is memory-mapped to a file. Set the readahead flag to
-// false if page references are expected in random order.
-func Madvise(b []byte, readahead bool) error {
- return madvise(b, readahead)
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_darwin.go b/vendor/github.com/dgraph-io/badger/y/mmap_darwin.go
deleted file mode 100644
index 10b756ba..00000000
--- a/vendor/github.com/dgraph-io/badger/y/mmap_darwin.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "os"
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// Mmap uses the mmap system call to memory-map a file. If writable is true,
-// memory protection of the pages is set so that they may be written to as well.
-func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
- mtype := unix.PROT_READ
- if writable {
- mtype |= unix.PROT_WRITE
- }
- return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
-}
-
-// Munmap unmaps a previously mapped slice.
-func munmap(b []byte) error {
- return unix.Munmap(b)
-}
-
-// This is required because the unix package does not support the madvise system call on OS X.
-func madvise(b []byte, readahead bool) error {
- advice := unix.MADV_NORMAL
- if !readahead {
- advice = unix.MADV_RANDOM
- }
-
- _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])),
- uintptr(len(b)), uintptr(advice))
- if e1 != 0 {
- return e1
- }
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_plan9.go b/vendor/github.com/dgraph-io/badger/y/mmap_plan9.go
deleted file mode 100644
index 21db76bf..00000000
--- a/vendor/github.com/dgraph-io/badger/y/mmap_plan9.go
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "os"
- "syscall"
-)
-
-// Mmap uses the mmap system call to memory-map a file. If writable is true,
-// memory protection of the pages is set so that they may be written to as well.
-func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
- return nil, syscall.EPLAN9
-}
-
-// Munmap unmaps a previously mapped slice.
-func munmap(b []byte) error {
- return syscall.EPLAN9
-}
-
-// Madvise uses the madvise system call to give advise about the use of memory
-// when using a slice that is memory-mapped to a file. Set the readahead flag to
-// false if page references are expected in random order.
-func madvise(b []byte, readahead bool) error {
- return syscall.EPLAN9
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_unix.go b/vendor/github.com/dgraph-io/badger/y/mmap_unix.go
deleted file mode 100644
index 003f5972..00000000
--- a/vendor/github.com/dgraph-io/badger/y/mmap_unix.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// +build !windows,!darwin,!plan9
-
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-// Mmap uses the mmap system call to memory-map a file. If writable is true,
-// memory protection of the pages is set so that they may be written to as well.
-func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
- mtype := unix.PROT_READ
- if writable {
- mtype |= unix.PROT_WRITE
- }
- return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
-}
-
-// Munmap unmaps a previously mapped slice.
-func munmap(b []byte) error {
- return unix.Munmap(b)
-}
-
-// Madvise uses the madvise system call to give advise about the use of memory
-// when using a slice that is memory-mapped to a file. Set the readahead flag to
-// false if page references are expected in random order.
-func madvise(b []byte, readahead bool) error {
- flags := unix.MADV_NORMAL
- if !readahead {
- flags = unix.MADV_RANDOM
- }
- return unix.Madvise(b, flags)
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_windows.go b/vendor/github.com/dgraph-io/badger/y/mmap_windows.go
deleted file mode 100644
index b2419af9..00000000
--- a/vendor/github.com/dgraph-io/badger/y/mmap_windows.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// +build windows
-
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "fmt"
- "os"
- "syscall"
- "unsafe"
-)
-
-func mmap(fd *os.File, write bool, size int64) ([]byte, error) {
- protect := syscall.PAGE_READONLY
- access := syscall.FILE_MAP_READ
-
- if write {
- protect = syscall.PAGE_READWRITE
- access = syscall.FILE_MAP_WRITE
- }
- fi, err := fd.Stat()
- if err != nil {
- return nil, err
- }
-
- // In windows, we cannot mmap a file more than it's actual size.
- // So truncate the file to the size of the mmap.
- if fi.Size() < size {
- if err := fd.Truncate(size); err != nil {
- return nil, fmt.Errorf("truncate: %s", err)
- }
- }
-
- // Open a file mapping handle.
- sizelo := uint32(size >> 32)
- sizehi := uint32(size) & 0xffffffff
-
- handler, err := syscall.CreateFileMapping(syscall.Handle(fd.Fd()), nil,
- uint32(protect), sizelo, sizehi, nil)
- if err != nil {
- return nil, os.NewSyscallError("CreateFileMapping", err)
- }
-
- // Create the memory map.
- addr, err := syscall.MapViewOfFile(handler, uint32(access), 0, 0, uintptr(size))
- if addr == 0 {
- return nil, os.NewSyscallError("MapViewOfFile", err)
- }
-
- // Close mapping handle.
- if err := syscall.CloseHandle(syscall.Handle(handler)); err != nil {
- return nil, os.NewSyscallError("CloseHandle", err)
- }
-
- // Slice memory layout
- // Copied this snippet from golang/sys package
- var sl = struct {
- addr uintptr
- len int
- cap int
- }{addr, int(size), int(size)}
-
- // Use unsafe to turn sl into a []byte.
- data := *(*[]byte)(unsafe.Pointer(&sl))
-
- return data, nil
-}
-
-func munmap(b []byte) error {
- return syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&b[0])))
-}
-
-func madvise(b []byte, readahead bool) error {
- // Do Nothing. We don’t care about this setting on Windows
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/watermark.go b/vendor/github.com/dgraph-io/badger/y/watermark.go
deleted file mode 100644
index 2ff70b38..00000000
--- a/vendor/github.com/dgraph-io/badger/y/watermark.go
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Copyright 2016-2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "container/heap"
- "context"
- "sync/atomic"
-
- "golang.org/x/net/trace"
-)
-
-type uint64Heap []uint64
-
-func (u uint64Heap) Len() int { return len(u) }
-func (u uint64Heap) Less(i, j int) bool { return u[i] < u[j] }
-func (u uint64Heap) Swap(i, j int) { u[i], u[j] = u[j], u[i] }
-func (u *uint64Heap) Push(x interface{}) { *u = append(*u, x.(uint64)) }
-func (u *uint64Heap) Pop() interface{} {
- old := *u
- n := len(old)
- x := old[n-1]
- *u = old[0 : n-1]
- return x
-}
-
-// mark contains one of more indices, along with a done boolean to indicate the
-// status of the index: begin or done. It also contains waiters, who could be
-// waiting for the watermark to reach >= a certain index.
-type mark struct {
- // Either this is an (index, waiter) pair or (index, done) or (indices, done).
- index uint64
- waiter chan struct{}
- indices []uint64
- done bool // Set to true if the index is done.
-}
-
-// WaterMark is used to keep track of the minimum un-finished index. Typically, an index k becomes
-// finished or "done" according to a WaterMark once Done(k) has been called
-// 1. as many times as Begin(k) has, AND
-// 2. a positive number of times.
-//
-// An index may also become "done" by calling SetDoneUntil at a time such that it is not
-// inter-mingled with Begin/Done calls.
-//
-// Since doneUntil and lastIndex addresses are passed to sync/atomic packages, we ensure that they
-// are 64-bit aligned by putting them at the beginning of the structure.
-type WaterMark struct {
- doneUntil uint64
- lastIndex uint64
- Name string
- markCh chan mark
- elog trace.EventLog
-}
-
-// Init initializes a WaterMark struct. MUST be called before using it.
-func (w *WaterMark) Init(closer *Closer, eventLogging bool) {
- w.markCh = make(chan mark, 100)
- if eventLogging {
- w.elog = trace.NewEventLog("Watermark", w.Name)
- } else {
- w.elog = NoEventLog
- }
- go w.process(closer)
-}
-
-// Begin sets the last index to the given value.
-func (w *WaterMark) Begin(index uint64) {
- atomic.StoreUint64(&w.lastIndex, index)
- w.markCh <- mark{index: index, done: false}
-}
-
-// BeginMany works like Begin but accepts multiple indices.
-func (w *WaterMark) BeginMany(indices []uint64) {
- atomic.StoreUint64(&w.lastIndex, indices[len(indices)-1])
- w.markCh <- mark{index: 0, indices: indices, done: false}
-}
-
-// Done sets a single index as done.
-func (w *WaterMark) Done(index uint64) {
- w.markCh <- mark{index: index, done: true}
-}
-
-// DoneMany works like Done but accepts multiple indices.
-func (w *WaterMark) DoneMany(indices []uint64) {
- w.markCh <- mark{index: 0, indices: indices, done: true}
-}
-
-// DoneUntil returns the maximum index that has the property that all indices
-// less than or equal to it are done.
-func (w *WaterMark) DoneUntil() uint64 {
- return atomic.LoadUint64(&w.doneUntil)
-}
-
-// SetDoneUntil sets the maximum index that has the property that all indices
-// less than or equal to it are done.
-func (w *WaterMark) SetDoneUntil(val uint64) {
- atomic.StoreUint64(&w.doneUntil, val)
-}
-
-// LastIndex returns the last index for which Begin has been called.
-func (w *WaterMark) LastIndex() uint64 {
- return atomic.LoadUint64(&w.lastIndex)
-}
-
-// WaitForMark waits until the given index is marked as done.
-func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error {
- if w.DoneUntil() >= index {
- return nil
- }
- waitCh := make(chan struct{})
- w.markCh <- mark{index: index, waiter: waitCh}
-
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-waitCh:
- return nil
- }
-}
-
-// process is used to process the Mark channel. This is not thread-safe,
-// so only run one goroutine for process. One is sufficient, because
-// all goroutine ops use purely memory and cpu.
-// Each index has to emit atleast one begin watermark in serial order otherwise waiters
-// can get blocked idefinitely. Example: We had an watermark at 100 and a waiter at 101,
-// if no watermark is emitted at index 101 then waiter would get stuck indefinitely as it
-// can't decide whether the task at 101 has decided not to emit watermark or it didn't get
-// scheduled yet.
-func (w *WaterMark) process(closer *Closer) {
- defer closer.Done()
-
- var indices uint64Heap
- // pending maps raft proposal index to the number of pending mutations for this proposal.
- pending := make(map[uint64]int)
- waiters := make(map[uint64][]chan struct{})
-
- heap.Init(&indices)
- var loop uint64
-
- processOne := func(index uint64, done bool) {
- // If not already done, then set. Otherwise, don't undo a done entry.
- prev, present := pending[index]
- if !present {
- heap.Push(&indices, index)
- }
-
- delta := 1
- if done {
- delta = -1
- }
- pending[index] = prev + delta
-
- loop++
- if len(indices) > 0 && loop%10000 == 0 {
- min := indices[0]
- w.elog.Printf("WaterMark %s: Done entry %4d. Size: %4d Watermark: %-4d Looking for: "+
- "%-4d. Value: %d\n", w.Name, index, len(indices), w.DoneUntil(), min, pending[min])
- }
-
- // Update mark by going through all indices in order; and checking if they have
- // been done. Stop at the first index, which isn't done.
- doneUntil := w.DoneUntil()
- if doneUntil > index {
- AssertTruef(false, "Name: %s doneUntil: %d. Index: %d", w.Name, doneUntil, index)
- }
-
- until := doneUntil
- loops := 0
-
- for len(indices) > 0 {
- min := indices[0]
- if done := pending[min]; done > 0 {
- break // len(indices) will be > 0.
- }
- // Even if done is called multiple times causing it to become
- // negative, we should still pop the index.
- heap.Pop(&indices)
- delete(pending, min)
- until = min
- loops++
- }
-
- if until != doneUntil {
- AssertTrue(atomic.CompareAndSwapUint64(&w.doneUntil, doneUntil, until))
- w.elog.Printf("%s: Done until %d. Loops: %d\n", w.Name, until, loops)
- }
-
- notifyAndRemove := func(idx uint64, toNotify []chan struct{}) {
- for _, ch := range toNotify {
- close(ch)
- }
- delete(waiters, idx) // Release the memory back.
- }
-
- if until-doneUntil <= uint64(len(waiters)) {
- // Issue #908 showed that if doneUntil is close to 2^60, while until is zero, this loop
- // can hog up CPU just iterating over integers creating a busy-wait loop. So, only do
- // this path if until - doneUntil is less than the number of waiters.
- for idx := doneUntil + 1; idx <= until; idx++ {
- if toNotify, ok := waiters[idx]; ok {
- notifyAndRemove(idx, toNotify)
- }
- }
- } else {
- for idx, toNotify := range waiters {
- if idx <= until {
- notifyAndRemove(idx, toNotify)
- }
- }
- } // end of notifying waiters.
- }
-
- for {
- select {
- case <-closer.HasBeenClosed():
- return
- case mark := <-w.markCh:
- if mark.waiter != nil {
- doneUntil := atomic.LoadUint64(&w.doneUntil)
- if doneUntil >= mark.index {
- close(mark.waiter)
- } else {
- ws, ok := waiters[mark.index]
- if !ok {
- waiters[mark.index] = []chan struct{}{mark.waiter}
- } else {
- waiters[mark.index] = append(ws, mark.waiter)
- }
- }
- } else {
- if mark.index > 0 {
- processOne(mark.index, mark.done)
- }
- for _, index := range mark.indices {
- processOne(index, mark.done)
- }
- }
- }
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/y.go b/vendor/github.com/dgraph-io/badger/y/y.go
deleted file mode 100644
index e594b708..00000000
--- a/vendor/github.com/dgraph-io/badger/y/y.go
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "hash/crc32"
- "math"
- "os"
- "sync"
- "time"
-
- "github.com/pkg/errors"
-)
-
-// ErrEOF indicates an end of file when trying to read from a memory mapped file
-// and encountering the end of slice.
-var ErrEOF = errors.New("End of mapped region")
-
-const (
- // Sync indicates that O_DSYNC should be set on the underlying file,
- // ensuring that data writes do not return until the data is flushed
- // to disk.
- Sync = 1 << iota
- // ReadOnly opens the underlying file on a read-only basis.
- ReadOnly
-)
-
-var (
- // This is O_DSYNC (datasync) on platforms that support it -- see file_unix.go
- datasyncFileFlag = 0x0
-
- // CastagnoliCrcTable is a CRC32 polynomial table
- CastagnoliCrcTable = crc32.MakeTable(crc32.Castagnoli)
-
- // Dummy channel for nil closers.
- dummyCloserChan = make(chan struct{})
-)
-
-// OpenExistingFile opens an existing file, errors if it doesn't exist.
-func OpenExistingFile(filename string, flags uint32) (*os.File, error) {
- openFlags := os.O_RDWR
- if flags&ReadOnly != 0 {
- openFlags = os.O_RDONLY
- }
-
- if flags&Sync != 0 {
- openFlags |= datasyncFileFlag
- }
- return os.OpenFile(filename, openFlags, 0)
-}
-
-// CreateSyncedFile creates a new file (using O_EXCL), errors if it already existed.
-func CreateSyncedFile(filename string, sync bool) (*os.File, error) {
- flags := os.O_RDWR | os.O_CREATE | os.O_EXCL
- if sync {
- flags |= datasyncFileFlag
- }
- return os.OpenFile(filename, flags, 0600)
-}
-
-// OpenSyncedFile creates the file if one doesn't exist.
-func OpenSyncedFile(filename string, sync bool) (*os.File, error) {
- flags := os.O_RDWR | os.O_CREATE
- if sync {
- flags |= datasyncFileFlag
- }
- return os.OpenFile(filename, flags, 0600)
-}
-
-// OpenTruncFile opens the file with O_RDWR | O_CREATE | O_TRUNC
-func OpenTruncFile(filename string, sync bool) (*os.File, error) {
- flags := os.O_RDWR | os.O_CREATE | os.O_TRUNC
- if sync {
- flags |= datasyncFileFlag
- }
- return os.OpenFile(filename, flags, 0600)
-}
-
-// SafeCopy does append(a[:0], src...).
-func SafeCopy(a, src []byte) []byte {
- return append(a[:0], src...)
-}
-
-// Copy copies a byte slice and returns the copied slice.
-func Copy(a []byte) []byte {
- b := make([]byte, len(a))
- copy(b, a)
- return b
-}
-
-// KeyWithTs generates a new key by appending ts to key.
-func KeyWithTs(key []byte, ts uint64) []byte {
- out := make([]byte, len(key)+8)
- copy(out, key)
- binary.BigEndian.PutUint64(out[len(key):], math.MaxUint64-ts)
- return out
-}
-
-// ParseTs parses the timestamp from the key bytes.
-func ParseTs(key []byte) uint64 {
- if len(key) <= 8 {
- return 0
- }
- return math.MaxUint64 - binary.BigEndian.Uint64(key[len(key)-8:])
-}
-
-// CompareKeys checks the key without timestamp and checks the timestamp if keyNoTs
-// is same.
-// a would be sorted higher than aa if we use bytes.compare
-// All keys should have timestamp.
-func CompareKeys(key1, key2 []byte) int {
- AssertTrue(len(key1) > 8 && len(key2) > 8)
- if cmp := bytes.Compare(key1[:len(key1)-8], key2[:len(key2)-8]); cmp != 0 {
- return cmp
- }
- return bytes.Compare(key1[len(key1)-8:], key2[len(key2)-8:])
-}
-
-// ParseKey parses the actual key from the key bytes.
-func ParseKey(key []byte) []byte {
- if key == nil {
- return nil
- }
-
- AssertTrue(len(key) > 8)
- return key[:len(key)-8]
-}
-
-// SameKey checks for key equality ignoring the version timestamp suffix.
-func SameKey(src, dst []byte) bool {
- if len(src) != len(dst) {
- return false
- }
- return bytes.Equal(ParseKey(src), ParseKey(dst))
-}
-
-// Slice holds a reusable buf, will reallocate if you request a larger size than ever before.
-// One problem is with n distinct sizes in random order it'll reallocate log(n) times.
-type Slice struct {
- buf []byte
-}
-
-// Resize reuses the Slice's buffer (or makes a new one) and returns a slice in that buffer of
-// length sz.
-func (s *Slice) Resize(sz int) []byte {
- if cap(s.buf) < sz {
- s.buf = make([]byte, sz)
- }
- return s.buf[0:sz]
-}
-
-// FixedDuration returns a string representation of the given duration with the
-// hours, minutes, and seconds.
-func FixedDuration(d time.Duration) string {
- str := fmt.Sprintf("%02ds", int(d.Seconds())%60)
- if d >= time.Minute {
- str = fmt.Sprintf("%02dm", int(d.Minutes())%60) + str
- }
- if d >= time.Hour {
- str = fmt.Sprintf("%02dh", int(d.Hours())) + str
- }
- return str
-}
-
-// Closer holds the two things we need to close a goroutine and wait for it to finish: a chan
-// to tell the goroutine to shut down, and a WaitGroup with which to wait for it to finish shutting
-// down.
-type Closer struct {
- closed chan struct{}
- waiting sync.WaitGroup
-}
-
-// NewCloser constructs a new Closer, with an initial count on the WaitGroup.
-func NewCloser(initial int) *Closer {
- ret := &Closer{closed: make(chan struct{})}
- ret.waiting.Add(initial)
- return ret
-}
-
-// AddRunning Add()'s delta to the WaitGroup.
-func (lc *Closer) AddRunning(delta int) {
- lc.waiting.Add(delta)
-}
-
-// Signal signals the HasBeenClosed signal.
-func (lc *Closer) Signal() {
- close(lc.closed)
-}
-
-// HasBeenClosed gets signaled when Signal() is called.
-func (lc *Closer) HasBeenClosed() <-chan struct{} {
- if lc == nil {
- return dummyCloserChan
- }
- return lc.closed
-}
-
-// Done calls Done() on the WaitGroup.
-func (lc *Closer) Done() {
- if lc == nil {
- return
- }
- lc.waiting.Done()
-}
-
-// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done
-// calls to balance out.)
-func (lc *Closer) Wait() {
- lc.waiting.Wait()
-}
-
-// SignalAndWait calls Signal(), then Wait().
-func (lc *Closer) SignalAndWait() {
- lc.Signal()
- lc.Wait()
-}
-
-// Throttle allows a limited number of workers to run at a time. It also
-// provides a mechanism to check for errors encountered by workers and wait for
-// them to finish.
-type Throttle struct {
- once sync.Once
- wg sync.WaitGroup
- ch chan struct{}
- errCh chan error
- finishErr error
-}
-
-// NewThrottle creates a new throttle with a max number of workers.
-func NewThrottle(max int) *Throttle {
- return &Throttle{
- ch: make(chan struct{}, max),
- errCh: make(chan error, max),
- }
-}
-
-// Do should be called by workers before they start working. It blocks if there
-// are already maximum number of workers working. If it detects an error from
-// previously Done workers, it would return it.
-func (t *Throttle) Do() error {
- for {
- select {
- case t.ch <- struct{}{}:
- t.wg.Add(1)
- return nil
- case err := <-t.errCh:
- if err != nil {
- return err
- }
- }
- }
-}
-
-// Done should be called by workers when they finish working. They can also
-// pass the error status of work done.
-func (t *Throttle) Done(err error) {
- if err != nil {
- t.errCh <- err
- }
- select {
- case <-t.ch:
- default:
- panic("Throttle Do Done mismatch")
- }
- t.wg.Done()
-}
-
-// Finish waits until all workers have finished working. It would return any error passed by Done.
-// If Finish is called multiple time, it will wait for workers to finish only once(first time).
-// From next calls, it will return same error as found on first call.
-func (t *Throttle) Finish() error {
- t.once.Do(func() {
- t.wg.Wait()
- close(t.ch)
- close(t.errCh)
- for err := range t.errCh {
- if err != nil {
- t.finishErr = err
- return
- }
- }
- })
-
- return t.finishErr
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/.deepsource.toml b/vendor/github.com/dgraph-io/ristretto/.deepsource.toml
deleted file mode 100644
index 40609eff..00000000
--- a/vendor/github.com/dgraph-io/ristretto/.deepsource.toml
+++ /dev/null
@@ -1,17 +0,0 @@
-version = 1
-
-test_patterns = [
- '**/*_test.go'
-]
-
-exclude_patterns = [
-
-]
-
-[[analyzers]]
-name = 'go'
-enabled = true
-
-
- [analyzers.meta]
- import_path = 'github.com/dgraph-io/ristretto'
diff --git a/vendor/github.com/dgraph-io/ristretto/CHANGELOG.md b/vendor/github.com/dgraph-io/ristretto/CHANGELOG.md
deleted file mode 100644
index 2c985510..00000000
--- a/vendor/github.com/dgraph-io/ristretto/CHANGELOG.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
-and this project will adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html) starting v1.0.0.
-
-## Unreleased
-
-### Changed
-
-### Added
-
-### Fixed
-
-## [0.0.3] - 2020-07-06
-
-[0.0.3]: https://github.com/dgraph-io/ristretto/compare/v0.0.2..v0.0.3
-
-### Changed
-
-### Added
-
-### Fixed
-
-- z: use MemHashString and xxhash.Sum64String ([#153][])
-- Check conflict key before updating expiration map. ([#154][])
-- Fix race condition in Cache.Clear ([#133][])
-- Improve handling of updated items ([#168][])
-- Fix droppedSets count while updating the item ([#171][])
-
-## [0.0.2] - 2020-02-24
-
-[0.0.2]: https://github.com/dgraph-io/ristretto/compare/v0.0.1..v0.0.2
-
-### Added
-
-- Sets with TTL. ([#122][])
-
-### Fixed
-
-- Fix the way metrics are handled for deletions. ([#111][])
-- Support nil `*Cache` values in `Clear` and `Close`. ([#119][])
-- Delete item immediately. ([#113][])
-- Remove key from policy after TTL eviction. ([#130][])
-
-[#111]: https://github.com/dgraph-io/ristretto/issues/111
-[#113]: https://github.com/dgraph-io/ristretto/issues/113
-[#119]: https://github.com/dgraph-io/ristretto/issues/119
-[#122]: https://github.com/dgraph-io/ristretto/issues/122
-[#130]: https://github.com/dgraph-io/ristretto/issues/130
-
-## 0.0.1
-
-First release. Basic cache functionality based on a LFU policy.
diff --git a/vendor/github.com/dgraph-io/ristretto/LICENSE b/vendor/github.com/dgraph-io/ristretto/LICENSE
deleted file mode 100644
index d9a10c0d..00000000
--- a/vendor/github.com/dgraph-io/ristretto/LICENSE
+++ /dev/null
@@ -1,176 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
diff --git a/vendor/github.com/dgraph-io/ristretto/README.md b/vendor/github.com/dgraph-io/ristretto/README.md
deleted file mode 100644
index 7a97f289..00000000
--- a/vendor/github.com/dgraph-io/ristretto/README.md
+++ /dev/null
@@ -1,211 +0,0 @@
-# Ristretto
-[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/dgraph-io/ristretto)
-[![Go Report Card](https://img.shields.io/badge/go%20report-A%2B-brightgreen)](https://goreportcard.com/report/github.com/dgraph-io/ristretto)
-[![Coverage](https://img.shields.io/badge/coverage-100%25-brightgreen)](https://gocover.io/github.com/dgraph-io/ristretto)
-![Tests](https://github.com/dgraph-io/ristretto/workflows/tests/badge.svg)
-
-Ristretto is a fast, concurrent cache library built with a focus on performance and correctness.
-
-The motivation to build Ristretto comes from the need for a contention-free
-cache in [Dgraph][].
-
-**Use [Discuss Issues](https://discuss.dgraph.io/tags/c/issues/35/ristretto/40) for reporting issues about this repository.**
-
-[Dgraph]: https://github.com/dgraph-io/dgraph
-
-## Features
-
-* **High Hit Ratios** - with our unique admission/eviction policy pairing, Ristretto's performance is best in class.
- * **Eviction: SampledLFU** - on par with exact LRU and better performance on Search and Database traces.
- * **Admission: TinyLFU** - extra performance with little memory overhead (12 bits per counter).
-* **Fast Throughput** - we use a variety of techniques for managing contention and the result is excellent throughput.
-* **Cost-Based Eviction** - any large new item deemed valuable can evict multiple smaller items (cost could be anything).
-* **Fully Concurrent** - you can use as many goroutines as you want with little throughput degradation.
-* **Metrics** - optional performance metrics for throughput, hit ratios, and other stats.
-* **Simple API** - just figure out your ideal `Config` values and you're off and running.
-
-## Status
-
-Ristretto is usable but still under active development. We expect it to be production ready in the near future.
-
-## Table of Contents
-
-* [Usage](#Usage)
- * [Example](#Example)
- * [Config](#Config)
- * [NumCounters](#Config)
- * [MaxCost](#Config)
- * [BufferItems](#Config)
- * [Metrics](#Config)
- * [OnEvict](#Config)
- * [KeyToHash](#Config)
- * [Cost](#Config)
-* [Benchmarks](#Benchmarks)
- * [Hit Ratios](#Hit-Ratios)
- * [Search](#Search)
- * [Database](#Database)
- * [Looping](#Looping)
- * [CODASYL](#CODASYL)
- * [Throughput](#Throughput)
- * [Mixed](#Mixed)
- * [Read](#Read)
- * [Write](#Write)
-* [FAQ](#FAQ)
-
-## Usage
-
-### Example
-
-```go
-func main() {
- cache, err := ristretto.NewCache(&ristretto.Config{
- NumCounters: 1e7, // number of keys to track frequency of (10M).
- MaxCost: 1 << 30, // maximum cost of cache (1GB).
- BufferItems: 64, // number of keys per Get buffer.
- })
- if err != nil {
- panic(err)
- }
-
- // set a value with a cost of 1
- cache.Set("key", "value", 1)
-
- // wait for value to pass through buffers
- time.Sleep(10 * time.Millisecond)
-
- value, found := cache.Get("key")
- if !found {
- panic("missing value")
- }
- fmt.Println(value)
- cache.Del("key")
-}
-```
-
-### Config
-
-The `Config` struct is passed to `NewCache` when creating Ristretto instances (see the example above).
-
-**NumCounters** `int64`
-
-NumCounters is the number of 4-bit access counters to keep for admission and eviction. We've seen good performance in setting this to 10x the number of items you expect to keep in the cache when full.
-
-For example, if you expect each item to have a cost of 1 and MaxCost is 100, set NumCounters to 1,000. Or, if you use variable cost values but expect the cache to hold around 10,000 items when full, set NumCounters to 100,000. The important thing is the *number of unique items* in the full cache, not necessarily the MaxCost value.
-
-**MaxCost** `int64`
-
-MaxCost is how eviction decisions are made. For example, if MaxCost is 100 and a new item with a cost of 1 increases total cache cost to 101, 1 item will be evicted.
-
-MaxCost can also be used to denote the max size in bytes. For example, if MaxCost is 1,000,000 (1MB) and the cache is full with 1,000 1KB items, a new item (that's accepted) would cause 5 1KB items to be evicted.
-
-MaxCost could be anything as long as it matches how you're using the cost values when calling Set.
-
-**BufferItems** `int64`
-
-BufferItems is the size of the Get buffers. The best value we've found for this is 64.
-
-If for some reason you see Get performance decreasing with lots of contention (you shouldn't), try increasing this value in increments of 64. This is a fine-tuning mechanism and you probably won't have to touch this.
-
-**Metrics** `bool`
-
-Metrics is true when you want real-time logging of a variety of stats. The reason this is a Config flag is because there's a 10% throughput performance overhead.
-
-**OnEvict** `func(hashes [2]uint64, value interface{}, cost int64)`
-
-OnEvict is called for every eviction.
-
-**KeyToHash** `func(key interface{}) [2]uint64`
-
-KeyToHash is the hashing algorithm used for every key. If this is nil, Ristretto has a variety of [defaults depending on the underlying interface type](https://github.com/dgraph-io/ristretto/blob/master/z/z.go#L19-L41).
-
-Note that if you want 128bit hashes you should use the full `[2]uint64`,
-otherwise just fill the `uint64` at the `0` position and it will behave like
-any 64bit hash.
-
-**Cost** `func(value interface{}) int64`
-
-Cost is an optional function you can pass to the Config in order to evaluate
-item cost at runtime, and only for the Set calls that aren't dropped (this is
-useful if calculating item cost is particularly expensive and you don't want to
-waste time on items that will be dropped anyways).
-
-To signal to Ristretto that you'd like to use this Cost function:
-
-1. Set the Cost field to a non-nil function.
-2. When calling Set for new items or item updates, use a `cost` of 0.
-
-## Benchmarks
-
-The benchmarks can be found in https://github.com/dgraph-io/benchmarks/tree/master/cachebench/ristretto.
-
-### Hit Ratios
-
-#### Search
-
-This trace is described as "disk read accesses initiated by a large commercial
-search engine in response to various web search requests."
-
-
-
-
-
-#### Database
-
-This trace is described as "a database server running at a commercial site
-running an ERP application on top of a commercial database."
-
-
-
-
-
-#### Looping
-
-This trace demonstrates a looping access pattern.
-
-
-
-
-
-#### CODASYL
-
-This trace is described as "references to a CODASYL database for a one hour
-period."
-
-
-
-
-
-### Throughput
-
-All throughput benchmarks were ran on an Intel Core i7-8700K (3.7GHz) with 16gb
-of RAM.
-
-#### Mixed
-
-
-
-
-
-#### Read
-
-
-
-
-
-#### Write
-
-
-
-
-
-## FAQ
-
-### How are you achieving this performance? What shortcuts are you taking?
-
-We go into detail in the [Ristretto blog post](https://blog.dgraph.io/post/introducing-ristretto-high-perf-go-cache/), but in short: our throughput performance can be attributed to a mix of batching and eventual consistency. Our hit ratio performance is mostly due to an excellent [admission policy](https://arxiv.org/abs/1512.00727) and SampledLFU eviction policy.
-
-As for "shortcuts," the only thing Ristretto does that could be construed as one is dropping some Set calls. That means a Set call for a new item (updates are guaranteed) isn't guaranteed to make it into the cache. The new item could be dropped at two points: when passing through the Set buffer or when passing through the admission policy. However, this doesn't affect hit ratios much at all as we expect the most popular items to be Set multiple times and eventually make it in the cache.
-
-### Is Ristretto distributed?
-
-No, it's just like any other Go library that you can import into your project and use in a single process.
diff --git a/vendor/github.com/dgraph-io/ristretto/cache.go b/vendor/github.com/dgraph-io/ristretto/cache.go
deleted file mode 100644
index 10ff7a8a..00000000
--- a/vendor/github.com/dgraph-io/ristretto/cache.go
+++ /dev/null
@@ -1,648 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Ristretto is a fast, fixed size, in-memory cache with a dual focus on
-// throughput and hit ratio performance. You can easily add Ristretto to an
-// existing system and keep the most valuable data where you need it.
-package ristretto
-
-import (
- "bytes"
- "errors"
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/ristretto/z"
-)
-
-var (
- // TODO: find the optimal value for this or make it configurable
- setBufSize = 32 * 1024
-)
-
-type itemCallback func(*Item)
-
-// Cache is a thread-safe implementation of a hashmap with a TinyLFU admission
-// policy and a Sampled LFU eviction policy. You can use the same Cache instance
-// from as many goroutines as you want.
-type Cache struct {
- // store is the central concurrent hashmap where key-value items are stored.
- store store
- // policy determines what gets let in to the cache and what gets kicked out.
- policy policy
- // getBuf is a custom ring buffer implementation that gets pushed to when
- // keys are read.
- getBuf *ringBuffer
- // setBuf is a buffer allowing us to batch/drop Sets during times of high
- // contention.
- setBuf chan *Item
- // onEvict is called for item evictions.
- onEvict itemCallback
- // onReject is called when an item is rejected via admission policy.
- onReject itemCallback
- // onExit is called whenever a value goes out of scope from the cache.
- onExit (func(interface{}))
- // KeyToHash function is used to customize the key hashing algorithm.
- // Each key will be hashed using the provided function. If keyToHash value
- // is not set, the default keyToHash function is used.
- keyToHash func(interface{}) (uint64, uint64)
- // stop is used to stop the processItems goroutine.
- stop chan struct{}
- // cost calculates cost from a value.
- cost func(value interface{}) int64
- // cleanupTicker is used to periodically check for entries whose TTL has passed.
- cleanupTicker *time.Ticker
- // Metrics contains a running log of important statistics like hits, misses,
- // and dropped items.
- Metrics *Metrics
-}
-
-// Config is passed to NewCache for creating new Cache instances.
-type Config struct {
- // NumCounters determines the number of counters (keys) to keep that hold
- // access frequency information. It's generally a good idea to have more
- // counters than the max cache capacity, as this will improve eviction
- // accuracy and subsequent hit ratios.
- //
- // For example, if you expect your cache to hold 1,000,000 items when full,
- // NumCounters should be 10,000,000 (10x). Each counter takes up 4 bits, so
- // keeping 10,000,000 counters would require 5MB of memory.
- NumCounters int64
- // MaxCost can be considered as the cache capacity, in whatever units you
- // choose to use.
- //
- // For example, if you want the cache to have a max capacity of 100MB, you
- // would set MaxCost to 100,000,000 and pass an item's number of bytes as
- // the `cost` parameter for calls to Set. If new items are accepted, the
- // eviction process will take care of making room for the new item and not
- // overflowing the MaxCost value.
- MaxCost int64
- // BufferItems determines the size of Get buffers.
- //
- // Unless you have a rare use case, using `64` as the BufferItems value
- // results in good performance.
- BufferItems int64
- // Metrics determines whether cache statistics are kept during the cache's
- // lifetime. There *is* some overhead to keeping statistics, so you should
- // only set this flag to true when testing or throughput performance isn't a
- // major factor.
- Metrics bool
- // OnEvict is called for every eviction and passes the hashed key, value,
- // and cost to the function.
- OnEvict func(item *Item)
- // OnReject is called for every rejection done via the policy.
- OnReject func(item *Item)
- // OnExit is called whenever a value is removed from cache. This can be
- // used to do manual memory deallocation. Would also be called on eviction
- // and rejection of the value.
- OnExit func(val interface{})
- // KeyToHash function is used to customize the key hashing algorithm.
- // Each key will be hashed using the provided function. If keyToHash value
- // is not set, the default keyToHash function is used.
- KeyToHash func(key interface{}) (uint64, uint64)
- // Cost evaluates a value and outputs a corresponding cost. This function
- // is ran after Set is called for a new item or an item update with a cost
- // param of 0.
- Cost func(value interface{}) int64
-}
-
-type itemFlag byte
-
-const (
- itemNew itemFlag = iota
- itemDelete
- itemUpdate
-)
-
-// Item is passed to setBuf so items can eventually be added to the cache.
-type Item struct {
- flag itemFlag
- Key uint64
- Conflict uint64
- Value interface{}
- Cost int64
- Expiration time.Time
- wg *sync.WaitGroup
-}
-
-// NewCache returns a new Cache instance and any configuration errors, if any.
-func NewCache(config *Config) (*Cache, error) {
- switch {
- case config.NumCounters == 0:
- return nil, errors.New("NumCounters can't be zero")
- case config.MaxCost == 0:
- return nil, errors.New("MaxCost can't be zero")
- case config.BufferItems == 0:
- return nil, errors.New("BufferItems can't be zero")
- }
- policy := newPolicy(config.NumCounters, config.MaxCost)
- cache := &Cache{
- store: newStore(),
- policy: policy,
- getBuf: newRingBuffer(policy, config.BufferItems),
- setBuf: make(chan *Item, setBufSize),
- keyToHash: config.KeyToHash,
- stop: make(chan struct{}),
- cost: config.Cost,
- cleanupTicker: time.NewTicker(time.Duration(bucketDurationSecs) * time.Second / 2),
- }
- cache.onExit = func(val interface{}) {
- if config.OnExit != nil && val != nil {
- config.OnExit(val)
- }
- }
- cache.onEvict = func(item *Item) {
- if config.OnEvict != nil {
- config.OnEvict(item)
- }
- cache.onExit(item.Value)
- }
- cache.onReject = func(item *Item) {
- if config.OnReject != nil {
- config.OnReject(item)
- }
- cache.onExit(item.Value)
- }
- if cache.keyToHash == nil {
- cache.keyToHash = z.KeyToHash
- }
- if config.Metrics {
- cache.collectMetrics()
- }
- // NOTE: benchmarks seem to show that performance decreases the more
- // goroutines we have running cache.processItems(), so 1 should
- // usually be sufficient
- go cache.processItems()
- return cache, nil
-}
-
-func (c *Cache) Wait() {
- if c == nil {
- return
- }
- wg := &sync.WaitGroup{}
- wg.Add(1)
- c.setBuf <- &Item{wg: wg}
- wg.Wait()
-}
-
-// Get returns the value (if any) and a boolean representing whether the
-// value was found or not. The value can be nil and the boolean can be true at
-// the same time.
-func (c *Cache) Get(key interface{}) (interface{}, bool) {
- if c == nil || key == nil {
- return nil, false
- }
- keyHash, conflictHash := c.keyToHash(key)
- c.getBuf.Push(keyHash)
- value, ok := c.store.Get(keyHash, conflictHash)
- if ok {
- c.Metrics.add(hit, keyHash, 1)
- } else {
- c.Metrics.add(miss, keyHash, 1)
- }
- return value, ok
-}
-
-// Set attempts to add the key-value item to the cache. If it returns false,
-// then the Set was dropped and the key-value item isn't added to the cache. If
-// it returns true, there's still a chance it could be dropped by the policy if
-// its determined that the key-value item isn't worth keeping, but otherwise the
-// item will be added and other items will be evicted in order to make room.
-//
-// To dynamically evaluate the items cost using the Config.Coster function, set
-// the cost parameter to 0 and Coster will be ran when needed in order to find
-// the items true cost.
-func (c *Cache) Set(key, value interface{}, cost int64) bool {
- return c.SetWithTTL(key, value, cost, 0*time.Second)
-}
-
-// SetWithTTL works like Set but adds a key-value pair to the cache that will expire
-// after the specified TTL (time to live) has passed. A zero value means the value never
-// expires, which is identical to calling Set. A negative value is a no-op and the value
-// is discarded.
-func (c *Cache) SetWithTTL(key, value interface{}, cost int64, ttl time.Duration) bool {
- if c == nil || key == nil {
- return false
- }
-
- var expiration time.Time
- switch {
- case ttl == 0:
- // No expiration.
- break
- case ttl < 0:
- // Treat this a a no-op.
- return false
- default:
- expiration = time.Now().Add(ttl)
- }
-
- keyHash, conflictHash := c.keyToHash(key)
- i := &Item{
- flag: itemNew,
- Key: keyHash,
- Conflict: conflictHash,
- Value: value,
- Cost: cost,
- Expiration: expiration,
- }
- // cost is eventually updated. The expiration must also be immediately updated
- // to prevent items from being prematurely removed from the map.
- if prev, ok := c.store.Update(i); ok {
- c.onExit(prev)
- i.flag = itemUpdate
- }
- // Attempt to send item to policy.
- select {
- case c.setBuf <- i:
- return true
- default:
- if i.flag == itemUpdate {
- // Return true if this was an update operation since we've already
- // updated the store. For all the other operations (set/delete), we
- // return false which means the item was not inserted.
- return true
- }
- c.Metrics.add(dropSets, keyHash, 1)
- return false
- }
-}
-
-// Del deletes the key-value item from the cache if it exists.
-func (c *Cache) Del(key interface{}) {
- if c == nil || key == nil {
- return
- }
- keyHash, conflictHash := c.keyToHash(key)
- // Delete immediately.
- _, prev := c.store.Del(keyHash, conflictHash)
- c.onExit(prev)
- // If we've set an item, it would be applied slightly later.
- // So we must push the same item to `setBuf` with the deletion flag.
- // This ensures that if a set is followed by a delete, it will be
- // applied in the correct order.
- c.setBuf <- &Item{
- flag: itemDelete,
- Key: keyHash,
- Conflict: conflictHash,
- }
-}
-
-// Close stops all goroutines and closes all channels.
-func (c *Cache) Close() {
- if c == nil || c.stop == nil {
- return
- }
- // Block until processItems goroutine is returned.
- c.stop <- struct{}{}
- close(c.stop)
- c.stop = nil
- close(c.setBuf)
- c.policy.Close()
-}
-
-// Clear empties the hashmap and zeroes all policy counters. Note that this is
-// not an atomic operation (but that shouldn't be a problem as it's assumed that
-// Set/Get calls won't be occurring until after this).
-func (c *Cache) Clear() {
- if c == nil {
- return
- }
- // Block until processItems goroutine is returned.
- c.stop <- struct{}{}
-
- // Clear out the setBuf channel.
-loop:
- for {
- select {
- case i := <-c.setBuf:
- if i.flag != itemUpdate {
- // In itemUpdate, the value is already set in the store. So, no need to call
- // onEvict here.
- c.onEvict(i)
- }
- default:
- break loop
- }
- }
-
- // Clear value hashmap and policy data.
- c.policy.Clear()
- c.store.Clear(c.onEvict)
- // Only reset metrics if they're enabled.
- if c.Metrics != nil {
- c.Metrics.Clear()
- }
- // Restart processItems goroutine.
- go c.processItems()
-}
-
-// processItems is ran by goroutines processing the Set buffer.
-func (c *Cache) processItems() {
- startTs := make(map[uint64]time.Time)
- numToKeep := 100000 // TODO: Make this configurable via options.
-
- trackAdmission := func(key uint64) {
- if c.Metrics == nil {
- return
- }
- startTs[key] = time.Now()
- if len(startTs) > numToKeep {
- for k := range startTs {
- if len(startTs) <= numToKeep {
- break
- }
- delete(startTs, k)
- }
- }
- }
- onEvict := func(i *Item) {
- if ts, has := startTs[i.Key]; has {
- c.Metrics.trackEviction(int64(time.Since(ts) / time.Second))
- delete(startTs, i.Key)
- }
- if c.onEvict != nil {
- c.onEvict(i)
- }
- }
-
- for {
- select {
- case i := <-c.setBuf:
- if i.wg != nil {
- i.wg.Done()
- continue
- }
- // Calculate item cost value if new or update.
- if i.Cost == 0 && c.cost != nil && i.flag != itemDelete {
- i.Cost = c.cost(i.Value)
- }
- switch i.flag {
- case itemNew:
- victims, added := c.policy.Add(i.Key, i.Cost)
- if added {
- c.store.Set(i)
- c.Metrics.add(keyAdd, i.Key, 1)
- trackAdmission(i.Key)
- } else {
- c.onReject(i)
- }
- for _, victim := range victims {
- victim.Conflict, victim.Value = c.store.Del(victim.Key, 0)
- onEvict(victim)
- }
-
- case itemUpdate:
- c.policy.Update(i.Key, i.Cost)
-
- case itemDelete:
- c.policy.Del(i.Key) // Deals with metrics updates.
- _, val := c.store.Del(i.Key, i.Conflict)
- c.onExit(val)
- }
- case <-c.cleanupTicker.C:
- c.store.Cleanup(c.policy, onEvict)
- case <-c.stop:
- return
- }
- }
-}
-
-// collectMetrics just creates a new *Metrics instance and adds the pointers
-// to the cache and policy instances.
-func (c *Cache) collectMetrics() {
- c.Metrics = newMetrics()
- c.policy.CollectMetrics(c.Metrics)
-}
-
-type metricType int
-
-const (
- // The following 2 keep track of hits and misses.
- hit = iota
- miss
- // The following 3 keep track of number of keys added, updated and evicted.
- keyAdd
- keyUpdate
- keyEvict
- // The following 2 keep track of cost of keys added and evicted.
- costAdd
- costEvict
- // The following keep track of how many sets were dropped or rejected later.
- dropSets
- rejectSets
- // The following 2 keep track of how many gets were kept and dropped on the
- // floor.
- dropGets
- keepGets
- // This should be the final enum. Other enums should be set before this.
- doNotUse
-)
-
-func stringFor(t metricType) string {
- switch t {
- case hit:
- return "hit"
- case miss:
- return "miss"
- case keyAdd:
- return "keys-added"
- case keyUpdate:
- return "keys-updated"
- case keyEvict:
- return "keys-evicted"
- case costAdd:
- return "cost-added"
- case costEvict:
- return "cost-evicted"
- case dropSets:
- return "sets-dropped"
- case rejectSets:
- return "sets-rejected" // by policy.
- case dropGets:
- return "gets-dropped"
- case keepGets:
- return "gets-kept"
- default:
- return "unidentified"
- }
-}
-
-// Metrics is a snapshot of performance statistics for the lifetime of a cache instance.
-type Metrics struct {
- all [doNotUse][]*uint64
-
- mu sync.RWMutex
- life *z.HistogramData // Tracks the life expectancy of a key.
-}
-
-func newMetrics() *Metrics {
- s := &Metrics{
- life: z.NewHistogramData(z.HistogramBounds(1, 16)),
- }
- for i := 0; i < doNotUse; i++ {
- s.all[i] = make([]*uint64, 256)
- slice := s.all[i]
- for j := range slice {
- slice[j] = new(uint64)
- }
- }
- return s
-}
-
-func (p *Metrics) add(t metricType, hash, delta uint64) {
- if p == nil {
- return
- }
- valp := p.all[t]
- // Avoid false sharing by padding at least 64 bytes of space between two
- // atomic counters which would be incremented.
- idx := (hash % 25) * 10
- atomic.AddUint64(valp[idx], delta)
-}
-
-func (p *Metrics) get(t metricType) uint64 {
- if p == nil {
- return 0
- }
- valp := p.all[t]
- var total uint64
- for i := range valp {
- total += atomic.LoadUint64(valp[i])
- }
- return total
-}
-
-// Hits is the number of Get calls where a value was found for the corresponding key.
-func (p *Metrics) Hits() uint64 {
- return p.get(hit)
-}
-
-// Misses is the number of Get calls where a value was not found for the corresponding key.
-func (p *Metrics) Misses() uint64 {
- return p.get(miss)
-}
-
-// KeysAdded is the total number of Set calls where a new key-value item was added.
-func (p *Metrics) KeysAdded() uint64 {
- return p.get(keyAdd)
-}
-
-// KeysUpdated is the total number of Set calls where the value was updated.
-func (p *Metrics) KeysUpdated() uint64 {
- return p.get(keyUpdate)
-}
-
-// KeysEvicted is the total number of keys evicted.
-func (p *Metrics) KeysEvicted() uint64 {
- return p.get(keyEvict)
-}
-
-// CostAdded is the sum of costs that have been added (successful Set calls).
-func (p *Metrics) CostAdded() uint64 {
- return p.get(costAdd)
-}
-
-// CostEvicted is the sum of all costs that have been evicted.
-func (p *Metrics) CostEvicted() uint64 {
- return p.get(costEvict)
-}
-
-// SetsDropped is the number of Set calls that don't make it into internal
-// buffers (due to contention or some other reason).
-func (p *Metrics) SetsDropped() uint64 {
- return p.get(dropSets)
-}
-
-// SetsRejected is the number of Set calls rejected by the policy (TinyLFU).
-func (p *Metrics) SetsRejected() uint64 {
- return p.get(rejectSets)
-}
-
-// GetsDropped is the number of Get counter increments that are dropped
-// internally.
-func (p *Metrics) GetsDropped() uint64 {
- return p.get(dropGets)
-}
-
-// GetsKept is the number of Get counter increments that are kept.
-func (p *Metrics) GetsKept() uint64 {
- return p.get(keepGets)
-}
-
-// Ratio is the number of Hits over all accesses (Hits + Misses). This is the
-// percentage of successful Get calls.
-func (p *Metrics) Ratio() float64 {
- if p == nil {
- return 0.0
- }
- hits, misses := p.get(hit), p.get(miss)
- if hits == 0 && misses == 0 {
- return 0.0
- }
- return float64(hits) / float64(hits+misses)
-}
-
-func (p *Metrics) trackEviction(numSeconds int64) {
- if p == nil {
- return
- }
- p.mu.Lock()
- defer p.mu.Unlock()
- p.life.Update(numSeconds)
-}
-
-func (p *Metrics) LifeExpectancySeconds() *z.HistogramData {
- if p == nil {
- return nil
- }
- p.mu.RLock()
- defer p.mu.RUnlock()
- return p.life.Copy()
-}
-
-// Clear resets all the metrics.
-func (p *Metrics) Clear() {
- if p == nil {
- return
- }
- for i := 0; i < doNotUse; i++ {
- for j := range p.all[i] {
- atomic.StoreUint64(p.all[i][j], 0)
- }
- }
- p.mu.Lock()
- p.life = z.NewHistogramData(z.HistogramBounds(1, 16))
- p.mu.Unlock()
-}
-
-// String returns a string representation of the metrics.
-func (p *Metrics) String() string {
- if p == nil {
- return ""
- }
- var buf bytes.Buffer
- for i := 0; i < doNotUse; i++ {
- t := metricType(i)
- fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t))
- }
- fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss))
- fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio())
- return buf.String()
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/policy.go b/vendor/github.com/dgraph-io/ristretto/policy.go
deleted file mode 100644
index 065118d3..00000000
--- a/vendor/github.com/dgraph-io/ristretto/policy.go
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "math"
- "sync"
-
- "github.com/dgraph-io/ristretto/z"
-)
-
-const (
- // lfuSample is the number of items to sample when looking at eviction
- // candidates. 5 seems to be the most optimal number [citation needed].
- lfuSample = 5
-)
-
-// policy is the interface encapsulating eviction/admission behavior.
-//
-// TODO: remove this interface and just rename defaultPolicy to policy, as we
-// are probably only going to use/implement/maintain one policy.
-type policy interface {
- ringConsumer
- // Add attempts to Add the key-cost pair to the Policy. It returns a slice
- // of evicted keys and a bool denoting whether or not the key-cost pair
- // was added. If it returns true, the key should be stored in cache.
- Add(uint64, int64) ([]*Item, bool)
- // Has returns true if the key exists in the Policy.
- Has(uint64) bool
- // Del deletes the key from the Policy.
- Del(uint64)
- // Cap returns the available capacity.
- Cap() int64
- // Close stops all goroutines and closes all channels.
- Close()
- // Update updates the cost value for the key.
- Update(uint64, int64)
- // Cost returns the cost value of a key or -1 if missing.
- Cost(uint64) int64
- // Optionally, set stats object to track how policy is performing.
- CollectMetrics(*Metrics)
- // Clear zeroes out all counters and clears hashmaps.
- Clear()
-}
-
-func newPolicy(numCounters, maxCost int64) policy {
- return newDefaultPolicy(numCounters, maxCost)
-}
-
-type defaultPolicy struct {
- sync.Mutex
- admit *tinyLFU
- evict *sampledLFU
- itemsCh chan []uint64
- stop chan struct{}
- metrics *Metrics
-}
-
-func newDefaultPolicy(numCounters, maxCost int64) *defaultPolicy {
- p := &defaultPolicy{
- admit: newTinyLFU(numCounters),
- evict: newSampledLFU(maxCost),
- itemsCh: make(chan []uint64, 3),
- stop: make(chan struct{}),
- }
- go p.processItems()
- return p
-}
-
-func (p *defaultPolicy) CollectMetrics(metrics *Metrics) {
- p.metrics = metrics
- p.evict.metrics = metrics
-}
-
-type policyPair struct {
- key uint64
- cost int64
-}
-
-func (p *defaultPolicy) processItems() {
- for {
- select {
- case items := <-p.itemsCh:
- p.Lock()
- p.admit.Push(items)
- p.Unlock()
- case <-p.stop:
- return
- }
- }
-}
-
-func (p *defaultPolicy) Push(keys []uint64) bool {
- if len(keys) == 0 {
- return true
- }
- select {
- case p.itemsCh <- keys:
- p.metrics.add(keepGets, keys[0], uint64(len(keys)))
- return true
- default:
- p.metrics.add(dropGets, keys[0], uint64(len(keys)))
- return false
- }
-}
-
-// Add decides whether the item with the given key and cost should be accepted by
-// the policy. It returns the list of victims that have been evicted and a boolean
-// indicating whether the incoming item should be accepted.
-func (p *defaultPolicy) Add(key uint64, cost int64) ([]*Item, bool) {
- p.Lock()
- defer p.Unlock()
-
- // Cannot add an item bigger than entire cache.
- if cost > p.evict.maxCost {
- return nil, false
- }
-
- // No need to go any further if the item is already in the cache.
- if has := p.evict.updateIfHas(key, cost); has {
- // An update does not count as an addition, so return false.
- return nil, false
- }
-
- // If the execution reaches this point, the key doesn't exist in the cache.
- // Calculate the remaining room in the cache (usually bytes).
- room := p.evict.roomLeft(cost)
- if room >= 0 {
- // There's enough room in the cache to store the new item without
- // overflowing. Do that now and stop here.
- p.evict.add(key, cost)
- p.metrics.add(costAdd, key, uint64(cost))
- return nil, true
- }
-
- // incHits is the hit count for the incoming item.
- incHits := p.admit.Estimate(key)
- // sample is the eviction candidate pool to be filled via random sampling.
- // TODO: perhaps we should use a min heap here. Right now our time
- // complexity is N for finding the min. Min heap should bring it down to
- // O(lg N).
- sample := make([]*policyPair, 0, lfuSample)
- // As items are evicted they will be appended to victims.
- victims := make([]*Item, 0)
-
- // Delete victims until there's enough space or a minKey is found that has
- // more hits than incoming item.
- for ; room < 0; room = p.evict.roomLeft(cost) {
- // Fill up empty slots in sample.
- sample = p.evict.fillSample(sample)
-
- // Find minimally used item in sample.
- minKey, minHits, minId, minCost := uint64(0), int64(math.MaxInt64), 0, int64(0)
- for i, pair := range sample {
- // Look up hit count for sample key.
- if hits := p.admit.Estimate(pair.key); hits < minHits {
- minKey, minHits, minId, minCost = pair.key, hits, i, pair.cost
- }
- }
-
- // If the incoming item isn't worth keeping in the policy, reject.
- if incHits < minHits {
- p.metrics.add(rejectSets, key, 1)
- return victims, false
- }
-
- // Delete the victim from metadata.
- p.evict.del(minKey)
-
- // Delete the victim from sample.
- sample[minId] = sample[len(sample)-1]
- sample = sample[:len(sample)-1]
- // Store victim in evicted victims slice.
- victims = append(victims, &Item{
- Key: minKey,
- Conflict: 0,
- Cost: minCost,
- })
- }
-
- p.evict.add(key, cost)
- p.metrics.add(costAdd, key, uint64(cost))
- return victims, true
-}
-
-func (p *defaultPolicy) Has(key uint64) bool {
- p.Lock()
- _, exists := p.evict.keyCosts[key]
- p.Unlock()
- return exists
-}
-
-func (p *defaultPolicy) Del(key uint64) {
- p.Lock()
- p.evict.del(key)
- p.Unlock()
-}
-
-func (p *defaultPolicy) Cap() int64 {
- p.Lock()
- capacity := int64(p.evict.maxCost - p.evict.used)
- p.Unlock()
- return capacity
-}
-
-func (p *defaultPolicy) Update(key uint64, cost int64) {
- p.Lock()
- p.evict.updateIfHas(key, cost)
- p.Unlock()
-}
-
-func (p *defaultPolicy) Cost(key uint64) int64 {
- p.Lock()
- if cost, found := p.evict.keyCosts[key]; found {
- p.Unlock()
- return cost
- }
- p.Unlock()
- return -1
-}
-
-func (p *defaultPolicy) Clear() {
- p.Lock()
- p.admit.clear()
- p.evict.clear()
- p.Unlock()
-}
-
-func (p *defaultPolicy) Close() {
- // Block until the p.processItems goroutine returns.
- p.stop <- struct{}{}
- close(p.stop)
- close(p.itemsCh)
-}
-
-// sampledLFU is an eviction helper storing key-cost pairs.
-type sampledLFU struct {
- keyCosts map[uint64]int64
- maxCost int64
- used int64
- metrics *Metrics
-}
-
-func newSampledLFU(maxCost int64) *sampledLFU {
- return &sampledLFU{
- keyCosts: make(map[uint64]int64),
- maxCost: maxCost,
- }
-}
-
-func (p *sampledLFU) roomLeft(cost int64) int64 {
- return p.maxCost - (p.used + cost)
-}
-
-func (p *sampledLFU) fillSample(in []*policyPair) []*policyPair {
- if len(in) >= lfuSample {
- return in
- }
- for key, cost := range p.keyCosts {
- in = append(in, &policyPair{key, cost})
- if len(in) >= lfuSample {
- return in
- }
- }
- return in
-}
-
-func (p *sampledLFU) del(key uint64) {
- cost, ok := p.keyCosts[key]
- if !ok {
- return
- }
- p.used -= cost
- delete(p.keyCosts, key)
- p.metrics.add(costEvict, key, uint64(cost))
- p.metrics.add(keyEvict, key, 1)
-}
-
-func (p *sampledLFU) add(key uint64, cost int64) {
- p.keyCosts[key] = cost
- p.used += cost
-}
-
-func (p *sampledLFU) updateIfHas(key uint64, cost int64) bool {
- if prev, found := p.keyCosts[key]; found {
- // Update the cost of an existing key, but don't worry about evicting.
- // Evictions will be handled the next time a new item is added.
- p.metrics.add(keyUpdate, key, 1)
- if prev > cost {
- diff := prev - cost
- p.metrics.add(costAdd, key, ^uint64(uint64(diff)-1))
- } else if cost > prev {
- diff := cost - prev
- p.metrics.add(costAdd, key, uint64(diff))
- }
- p.used += cost - prev
- p.keyCosts[key] = cost
- return true
- }
- return false
-}
-
-func (p *sampledLFU) clear() {
- p.used = 0
- p.keyCosts = make(map[uint64]int64)
-}
-
-// tinyLFU is an admission helper that keeps track of access frequency using
-// tiny (4-bit) counters in the form of a count-min sketch.
-// tinyLFU is NOT thread safe.
-type tinyLFU struct {
- freq *cmSketch
- door *z.Bloom
- incrs int64
- resetAt int64
-}
-
-func newTinyLFU(numCounters int64) *tinyLFU {
- return &tinyLFU{
- freq: newCmSketch(numCounters),
- door: z.NewBloomFilter(float64(numCounters), 0.01),
- resetAt: numCounters,
- }
-}
-
-func (p *tinyLFU) Push(keys []uint64) {
- for _, key := range keys {
- p.Increment(key)
- }
-}
-
-func (p *tinyLFU) Estimate(key uint64) int64 {
- hits := p.freq.Estimate(key)
- if p.door.Has(key) {
- hits++
- }
- return hits
-}
-
-func (p *tinyLFU) Increment(key uint64) {
- // Flip doorkeeper bit if not already done.
- if added := p.door.AddIfNotHas(key); !added {
- // Increment count-min counter if doorkeeper bit is already set.
- p.freq.Increment(key)
- }
- p.incrs++
- if p.incrs >= p.resetAt {
- p.reset()
- }
-}
-
-func (p *tinyLFU) reset() {
- // Zero out incrs.
- p.incrs = 0
- // clears doorkeeper bits
- p.door.Clear()
- // halves count-min counters
- p.freq.Reset()
-}
-
-func (p *tinyLFU) clear() {
- p.incrs = 0
- p.door.Clear()
- p.freq.Clear()
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/ring.go b/vendor/github.com/dgraph-io/ristretto/ring.go
deleted file mode 100644
index 5dbed4cc..00000000
--- a/vendor/github.com/dgraph-io/ristretto/ring.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "sync"
-)
-
-// ringConsumer is the user-defined object responsible for receiving and
-// processing items in batches when buffers are drained.
-type ringConsumer interface {
- Push([]uint64) bool
-}
-
-// ringStripe is a singular ring buffer that is not concurrent safe.
-type ringStripe struct {
- cons ringConsumer
- data []uint64
- capa int
-}
-
-func newRingStripe(cons ringConsumer, capa int64) *ringStripe {
- return &ringStripe{
- cons: cons,
- data: make([]uint64, 0, capa),
- capa: int(capa),
- }
-}
-
-// Push appends an item in the ring buffer and drains (copies items and
-// sends to Consumer) if full.
-func (s *ringStripe) Push(item uint64) {
- s.data = append(s.data, item)
- // Decide if the ring buffer should be drained.
- if len(s.data) >= s.capa {
- // Send elements to consumer and create a new ring stripe.
- if s.cons.Push(s.data) {
- s.data = make([]uint64, 0, s.capa)
- } else {
- s.data = s.data[:0]
- }
- }
-}
-
-// ringBuffer stores multiple buffers (stripes) and distributes Pushed items
-// between them to lower contention.
-//
-// This implements the "batching" process described in the BP-Wrapper paper
-// (section III part A).
-type ringBuffer struct {
- pool *sync.Pool
-}
-
-// newRingBuffer returns a striped ring buffer. The Consumer in ringConfig will
-// be called when individual stripes are full and need to drain their elements.
-func newRingBuffer(cons ringConsumer, capa int64) *ringBuffer {
- // LOSSY buffers use a very simple sync.Pool for concurrently reusing
- // stripes. We do lose some stripes due to GC (unheld items in sync.Pool
- // are cleared), but the performance gains generally outweigh the small
- // percentage of elements lost. The performance primarily comes from
- // low-level runtime functions used in the standard library that aren't
- // available to us (such as runtime_procPin()).
- return &ringBuffer{
- pool: &sync.Pool{
- New: func() interface{} { return newRingStripe(cons, capa) },
- },
- }
-}
-
-// Push adds an element to one of the internal stripes and possibly drains if
-// the stripe becomes full.
-func (b *ringBuffer) Push(item uint64) {
- // Reuse or create a new stripe.
- stripe := b.pool.Get().(*ringStripe)
- stripe.Push(item)
- b.pool.Put(stripe)
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/sketch.go b/vendor/github.com/dgraph-io/ristretto/sketch.go
deleted file mode 100644
index 10f41468..00000000
--- a/vendor/github.com/dgraph-io/ristretto/sketch.go
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// This package includes multiple probabalistic data structures needed for
-// admission/eviction metadata. Most are Counting Bloom Filter variations, but
-// a caching-specific feature that is also required is a "freshness" mechanism,
-// which basically serves as a "lifetime" process. This freshness mechanism
-// was described in the original TinyLFU paper [1], but other mechanisms may
-// be better suited for certain data distributions.
-//
-// [1]: https://arxiv.org/abs/1512.00727
-package ristretto
-
-import (
- "fmt"
- "math/rand"
- "time"
-)
-
-// cmSketch is a Count-Min sketch implementation with 4-bit counters, heavily
-// based on Damian Gryski's CM4 [1].
-//
-// [1]: https://github.com/dgryski/go-tinylfu/blob/master/cm4.go
-type cmSketch struct {
- rows [cmDepth]cmRow
- seed [cmDepth]uint64
- mask uint64
-}
-
-const (
- // cmDepth is the number of counter copies to store (think of it as rows).
- cmDepth = 4
-)
-
-func newCmSketch(numCounters int64) *cmSketch {
- if numCounters == 0 {
- panic("cmSketch: bad numCounters")
- }
- // Get the next power of 2 for better cache performance.
- numCounters = next2Power(numCounters)
- sketch := &cmSketch{mask: uint64(numCounters - 1)}
- // Initialize rows of counters and seeds.
- source := rand.New(rand.NewSource(time.Now().UnixNano()))
- for i := 0; i < cmDepth; i++ {
- sketch.seed[i] = source.Uint64()
- sketch.rows[i] = newCmRow(numCounters)
- }
- return sketch
-}
-
-// Increment increments the count(ers) for the specified key.
-func (s *cmSketch) Increment(hashed uint64) {
- for i := range s.rows {
- s.rows[i].increment((hashed ^ s.seed[i]) & s.mask)
- }
-}
-
-// Estimate returns the value of the specified key.
-func (s *cmSketch) Estimate(hashed uint64) int64 {
- min := byte(255)
- for i := range s.rows {
- val := s.rows[i].get((hashed ^ s.seed[i]) & s.mask)
- if val < min {
- min = val
- }
- }
- return int64(min)
-}
-
-// Reset halves all counter values.
-func (s *cmSketch) Reset() {
- for _, r := range s.rows {
- r.reset()
- }
-}
-
-// Clear zeroes all counters.
-func (s *cmSketch) Clear() {
- for _, r := range s.rows {
- r.clear()
- }
-}
-
-// cmRow is a row of bytes, with each byte holding two counters.
-type cmRow []byte
-
-func newCmRow(numCounters int64) cmRow {
- return make(cmRow, numCounters/2)
-}
-
-func (r cmRow) get(n uint64) byte {
- return byte(r[n/2]>>((n&1)*4)) & 0x0f
-}
-
-func (r cmRow) increment(n uint64) {
- // Index of the counter.
- i := n / 2
- // Shift distance (even 0, odd 4).
- s := (n & 1) * 4
- // Counter value.
- v := (r[i] >> s) & 0x0f
- // Only increment if not max value (overflow wrap is bad for LFU).
- if v < 15 {
- r[i] += 1 << s
- }
-}
-
-func (r cmRow) reset() {
- // Halve each counter.
- for i := range r {
- r[i] = (r[i] >> 1) & 0x77
- }
-}
-
-func (r cmRow) clear() {
- // Zero each counter.
- for i := range r {
- r[i] = 0
- }
-}
-
-func (r cmRow) string() string {
- s := ""
- for i := uint64(0); i < uint64(len(r)*2); i++ {
- s += fmt.Sprintf("%02d ", (r[(i/2)]>>((i&1)*4))&0x0f)
- }
- s = s[:len(s)-1]
- return s
-}
-
-// next2Power rounds x up to the next power of 2, if it's not already one.
-func next2Power(x int64) int64 {
- x--
- x |= x >> 1
- x |= x >> 2
- x |= x >> 4
- x |= x >> 8
- x |= x >> 16
- x |= x >> 32
- x++
- return x
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/store.go b/vendor/github.com/dgraph-io/ristretto/store.go
deleted file mode 100644
index e42a98b7..00000000
--- a/vendor/github.com/dgraph-io/ristretto/store.go
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "sync"
- "time"
-)
-
-// TODO: Do we need this to be a separate struct from Item?
-type storeItem struct {
- key uint64
- conflict uint64
- value interface{}
- expiration time.Time
-}
-
-// store is the interface fulfilled by all hash map implementations in this
-// file. Some hash map implementations are better suited for certain data
-// distributions than others, so this allows us to abstract that out for use
-// in Ristretto.
-//
-// Every store is safe for concurrent usage.
-type store interface {
- // Get returns the value associated with the key parameter.
- Get(uint64, uint64) (interface{}, bool)
- // Expiration returns the expiration time for this key.
- Expiration(uint64) time.Time
- // Set adds the key-value pair to the Map or updates the value if it's
- // already present. The key-value pair is passed as a pointer to an
- // item object.
- Set(*Item)
- // Del deletes the key-value pair from the Map.
- Del(uint64, uint64) (uint64, interface{})
- // Update attempts to update the key with a new value and returns true if
- // successful.
- Update(*Item) (interface{}, bool)
- // Cleanup removes items that have an expired TTL.
- Cleanup(policy policy, onEvict itemCallback)
- // Clear clears all contents of the store.
- Clear(onEvict itemCallback)
-}
-
-// newStore returns the default store implementation.
-func newStore() store {
- return newShardedMap()
-}
-
-const numShards uint64 = 256
-
-type shardedMap struct {
- shards []*lockedMap
- expiryMap *expirationMap
-}
-
-func newShardedMap() *shardedMap {
- sm := &shardedMap{
- shards: make([]*lockedMap, int(numShards)),
- expiryMap: newExpirationMap(),
- }
- for i := range sm.shards {
- sm.shards[i] = newLockedMap(sm.expiryMap)
- }
- return sm
-}
-
-func (sm *shardedMap) Get(key, conflict uint64) (interface{}, bool) {
- return sm.shards[key%numShards].get(key, conflict)
-}
-
-func (sm *shardedMap) Expiration(key uint64) time.Time {
- return sm.shards[key%numShards].Expiration(key)
-}
-
-func (sm *shardedMap) Set(i *Item) {
- if i == nil {
- // If item is nil make this Set a no-op.
- return
- }
-
- sm.shards[i.Key%numShards].Set(i)
-}
-
-func (sm *shardedMap) Del(key, conflict uint64) (uint64, interface{}) {
- return sm.shards[key%numShards].Del(key, conflict)
-}
-
-func (sm *shardedMap) Update(newItem *Item) (interface{}, bool) {
- return sm.shards[newItem.Key%numShards].Update(newItem)
-}
-
-func (sm *shardedMap) Cleanup(policy policy, onEvict itemCallback) {
- sm.expiryMap.cleanup(sm, policy, onEvict)
-}
-
-func (sm *shardedMap) Clear(onEvict itemCallback) {
- for i := uint64(0); i < numShards; i++ {
- sm.shards[i].Clear(onEvict)
- }
-}
-
-type lockedMap struct {
- sync.RWMutex
- data map[uint64]storeItem
- em *expirationMap
-}
-
-func newLockedMap(em *expirationMap) *lockedMap {
- return &lockedMap{
- data: make(map[uint64]storeItem),
- em: em,
- }
-}
-
-func (m *lockedMap) get(key, conflict uint64) (interface{}, bool) {
- m.RLock()
- item, ok := m.data[key]
- m.RUnlock()
- if !ok {
- return nil, false
- }
- if conflict != 0 && (conflict != item.conflict) {
- return nil, false
- }
-
- // Handle expired items.
- if !item.expiration.IsZero() && time.Now().After(item.expiration) {
- return nil, false
- }
- return item.value, true
-}
-
-func (m *lockedMap) Expiration(key uint64) time.Time {
- m.RLock()
- defer m.RUnlock()
- return m.data[key].expiration
-}
-
-func (m *lockedMap) Set(i *Item) {
- if i == nil {
- // If the item is nil make this Set a no-op.
- return
- }
-
- m.Lock()
- defer m.Unlock()
- item, ok := m.data[i.Key]
-
- if ok {
- // The item existed already. We need to check the conflict key and reject the
- // update if they do not match. Only after that the expiration map is updated.
- if i.Conflict != 0 && (i.Conflict != item.conflict) {
- return
- }
- m.em.update(i.Key, i.Conflict, item.expiration, i.Expiration)
- } else {
- // The value is not in the map already. There's no need to return anything.
- // Simply add the expiration map.
- m.em.add(i.Key, i.Conflict, i.Expiration)
- }
-
- m.data[i.Key] = storeItem{
- key: i.Key,
- conflict: i.Conflict,
- value: i.Value,
- expiration: i.Expiration,
- }
-}
-
-func (m *lockedMap) Del(key, conflict uint64) (uint64, interface{}) {
- m.Lock()
- item, ok := m.data[key]
- if !ok {
- m.Unlock()
- return 0, nil
- }
- if conflict != 0 && (conflict != item.conflict) {
- m.Unlock()
- return 0, nil
- }
-
- if !item.expiration.IsZero() {
- m.em.del(key, item.expiration)
- }
-
- delete(m.data, key)
- m.Unlock()
- return item.conflict, item.value
-}
-
-func (m *lockedMap) Update(newItem *Item) (interface{}, bool) {
- m.Lock()
- item, ok := m.data[newItem.Key]
- if !ok {
- m.Unlock()
- return nil, false
- }
- if newItem.Conflict != 0 && (newItem.Conflict != item.conflict) {
- m.Unlock()
- return nil, false
- }
-
- m.em.update(newItem.Key, newItem.Conflict, item.expiration, newItem.Expiration)
- m.data[newItem.Key] = storeItem{
- key: newItem.Key,
- conflict: newItem.Conflict,
- value: newItem.Value,
- expiration: newItem.Expiration,
- }
-
- m.Unlock()
- return item.value, true
-}
-
-func (m *lockedMap) Clear(onEvict itemCallback) {
- m.Lock()
- i := &Item{}
- if onEvict != nil {
- for _, si := range m.data {
- i.Key = si.key
- i.Conflict = si.conflict
- i.Value = si.value
- onEvict(i)
- }
- }
- m.data = make(map[uint64]storeItem)
- m.Unlock()
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/test.sh b/vendor/github.com/dgraph-io/ristretto/test.sh
deleted file mode 100644
index 2bdcc250..00000000
--- a/vendor/github.com/dgraph-io/ristretto/test.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#! /bin/sh
-
-starttest() {
- set -e
- GO111MODULE=on go test -race ./...
-}
-
-if [ -z "${TEAMCITY_VERSION}" ]; then
- # running locally, so start test in a container
- docker run --rm --name ristretto-test -ti \
- -v `pwd`:/go/src/github.com/dgraph-io/ristretto \
- --workdir /go/src/github.com/dgraph-io/ristretto \
- # TEAMCITY_VERSION=local will avoid recursive calls, when it would be running in container
- --env TEAMCITY_VERSION=local \
- golang:1.13 \
- sh test.sh
-else
- # running in teamcity, since teamcity itself run this in container, let's simply run this
- starttest
-fi
diff --git a/vendor/github.com/dgraph-io/ristretto/ttl.go b/vendor/github.com/dgraph-io/ristretto/ttl.go
deleted file mode 100644
index 337976ad..00000000
--- a/vendor/github.com/dgraph-io/ristretto/ttl.go
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "sync"
- "time"
-)
-
-var (
- // TODO: find the optimal value or make it configurable.
- bucketDurationSecs = int64(5)
-)
-
-func storageBucket(t time.Time) int64 {
- return (t.Unix() / bucketDurationSecs) + 1
-}
-
-func cleanupBucket(t time.Time) int64 {
- // The bucket to cleanup is always behind the storage bucket by one so that
- // no elements in that bucket (which might not have expired yet) are deleted.
- return storageBucket(t) - 1
-}
-
-// bucket type is a map of key to conflict.
-type bucket map[uint64]uint64
-
-// expirationMap is a map of bucket number to the corresponding bucket.
-type expirationMap struct {
- sync.RWMutex
- buckets map[int64]bucket
-}
-
-func newExpirationMap() *expirationMap {
- return &expirationMap{
- buckets: make(map[int64]bucket),
- }
-}
-
-func (m *expirationMap) add(key, conflict uint64, expiration time.Time) {
- if m == nil {
- return
- }
-
- // Items that don't expire don't need to be in the expiration map.
- if expiration.IsZero() {
- return
- }
-
- bucketNum := storageBucket(expiration)
- m.Lock()
- defer m.Unlock()
-
- b, ok := m.buckets[bucketNum]
- if !ok {
- b = make(bucket)
- m.buckets[bucketNum] = b
- }
- b[key] = conflict
-}
-
-func (m *expirationMap) update(key, conflict uint64, oldExpTime, newExpTime time.Time) {
- if m == nil {
- return
- }
-
- m.Lock()
- defer m.Unlock()
-
- oldBucketNum := storageBucket(oldExpTime)
- oldBucket, ok := m.buckets[oldBucketNum]
- if ok {
- delete(oldBucket, key)
- }
-
- newBucketNum := storageBucket(newExpTime)
- newBucket, ok := m.buckets[newBucketNum]
- if !ok {
- newBucket = make(bucket)
- m.buckets[newBucketNum] = newBucket
- }
- newBucket[key] = conflict
-}
-
-func (m *expirationMap) del(key uint64, expiration time.Time) {
- if m == nil {
- return
- }
-
- bucketNum := storageBucket(expiration)
- m.Lock()
- defer m.Unlock()
- _, ok := m.buckets[bucketNum]
- if !ok {
- return
- }
- delete(m.buckets[bucketNum], key)
-}
-
-// cleanup removes all the items in the bucket that was just completed. It deletes
-// those items from the store, and calls the onEvict function on those items.
-// This function is meant to be called periodically.
-func (m *expirationMap) cleanup(store store, policy policy, onEvict itemCallback) {
- if m == nil {
- return
- }
-
- m.Lock()
- now := time.Now()
- bucketNum := cleanupBucket(now)
- keys := m.buckets[bucketNum]
- delete(m.buckets, bucketNum)
- m.Unlock()
-
- for key, conflict := range keys {
- // Sanity check. Verify that the store agrees that this key is expired.
- if store.Expiration(key).After(now) {
- continue
- }
-
- cost := policy.Cost(key)
- policy.Del(key)
- _, value := store.Del(key, conflict)
-
- if onEvict != nil {
- onEvict(&Item{Key: key,
- Conflict: conflict,
- Value: value,
- Cost: cost,
- })
- }
- }
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/z/LICENSE b/vendor/github.com/dgraph-io/ristretto/z/LICENSE
deleted file mode 100644
index 0860cbfe..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/LICENSE
+++ /dev/null
@@ -1,64 +0,0 @@
-bbloom.go
-
-// The MIT License (MIT)
-// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
-
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-// the Software, and to permit persons to whom the Software is furnished to do so,
-// subject to the following conditions:
-
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-rtutil.go
-
-// MIT License
-
-// Copyright (c) 2019 Ewan Chou
-
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-Modifications:
-
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
diff --git a/vendor/github.com/dgraph-io/ristretto/z/README.md b/vendor/github.com/dgraph-io/ristretto/z/README.md
deleted file mode 100644
index 6d77e146..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/README.md
+++ /dev/null
@@ -1,129 +0,0 @@
-## bbloom: a bitset Bloom filter for go/golang
-===
-
-package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter.
-
-NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom
-
-===
-
-changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache.
-
-This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html".
-Nonetheless bbloom should work with any other form of entries.
-
-~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~
-
-Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash )
-
-Minimum hashset size is: 512 ([4]uint64; will be set automatically).
-
-###install
-
-```sh
-go get github.com/AndreasBriese/bbloom
-```
-
-###test
-+ change to folder ../bbloom
-+ create wordlist in file "words.txt" (you might use `python permut.py`)
-+ run 'go test -bench=.' within the folder
-
-```go
-go test -bench=.
-```
-
-~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~
-
-using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively)
-
-### usage
-
-after installation add
-
-```go
-import (
- ...
- "github.com/AndreasBriese/bbloom"
- ...
- )
-```
-
-at your header. In the program use
-
-```go
-// create a bloom filter for 65536 items and 1 % wrong-positive ratio
-bf := bbloom.New(float64(1<<16), float64(0.01))
-
-// or
-// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly
-// bf = bbloom.New(float64(650000), float64(7))
-// or
-bf = bbloom.New(650000.0, 7.0)
-
-// add one item
-bf.Add([]byte("butter"))
-
-// Number of elements added is exposed now
-// Note: ElemNum will not be included in JSON export (for compatability to older version)
-nOfElementsInFilter := bf.ElemNum
-
-// check if item is in the filter
-isIn := bf.Has([]byte("butter")) // should be true
-isNotIn := bf.Has([]byte("Butter")) // should be false
-
-// 'add only if item is new' to the bloomfilter
-added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set
-added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new
-
-// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS
-// add one item
-bf.AddTS([]byte("peanutbutter"))
-// check if item is in the filter
-isIn = bf.HasTS([]byte("peanutbutter")) // should be true
-isNotIn = bf.HasTS([]byte("peanutButter")) // should be false
-// 'add only if item is new' to the bloomfilter
-added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set
-added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new
-
-// convert to JSON ([]byte)
-Json := bf.JSONMarshal()
-
-// bloomfilters Mutex is exposed for external un-/locking
-// i.e. mutex lock while doing JSON conversion
-bf.Mtx.Lock()
-Json = bf.JSONMarshal()
-bf.Mtx.Unlock()
-
-// restore a bloom filter from storage
-bfNew := bbloom.JSONUnmarshal(Json)
-
-isInNew := bfNew.Has([]byte("butter")) // should be true
-isNotInNew := bfNew.Has([]byte("Butter")) // should be false
-
-```
-
-to work with the bloom filter.
-
-### why 'fast'?
-
-It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint:
-
-
- Bloom filter (filter size 524288, 7 hashlocs)
- github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op)
- github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op)
- github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op)
- github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op)
-
- github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op)
- github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op)
- github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op)
- github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op)
- github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op)
- github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op)
-
-(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz)
-
-
-With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions.
diff --git a/vendor/github.com/dgraph-io/ristretto/z/allocator.go b/vendor/github.com/dgraph-io/ristretto/z/allocator.go
deleted file mode 100644
index af486a8d..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/allocator.go
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package z
-
-import "fmt"
-
-// Allocator amortizes the cost of small allocations by allocating memory in bigger chunks.
-// Internally it uses z.Calloc to allocate memory. Once allocated, the memory is not moved,
-// so it is safe to use the allocated bytes to unsafe cast them to Go struct pointers.
-type Allocator struct {
- pageSize int
- curBuf int
- curIdx int
- buffers [][]byte
- size uint64
-}
-
-// NewAllocator creates an allocator starting with the given size.
-func NewAllocator(sz int) *Allocator {
- return &Allocator{pageSize: sz}
-}
-
-// Size returns the size of the allocations so far.
-func (a *Allocator) Size() uint64 {
- return a.size
-}
-
-// Release would release the memory back. Remember to make this call to avoid memory leaks.
-func (a *Allocator) Release() {
- for _, b := range a.buffers {
- Free(b)
- }
-}
-
-const maxAlloc = 1 << 30
-
-func (a *Allocator) MaxAlloc() int {
- return maxAlloc
-}
-
-// Allocate would allocate a byte slice of length sz. It is safe to use this memory to unsafe cast
-// to Go structs.
-func (a *Allocator) Allocate(sz int) []byte {
- if len(a.buffers) == 0 {
- buf := Calloc(a.pageSize)
- a.buffers = append(a.buffers, buf)
- }
-
- if sz >= maxAlloc {
- panic(fmt.Sprintf("Allocate call exceeds max allocation possible."+
- " Requested: %d. Max Allowed: %d\n", sz, maxAlloc))
- }
- cb := a.buffers[a.curBuf]
- if len(cb) < a.curIdx+sz {
- for {
- a.pageSize *= 2 // Do multiply by 2 here.
- if a.pageSize >= sz {
- break
- }
- }
- if a.pageSize > maxAlloc {
- a.pageSize = maxAlloc
- }
-
- buf := Calloc(a.pageSize)
- a.buffers = append(a.buffers, buf)
- a.curBuf++
- a.curIdx = 0
- cb = a.buffers[a.curBuf]
- }
-
- slice := cb[a.curIdx : a.curIdx+sz]
- a.curIdx += sz
- a.size += uint64(sz)
- return slice
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/z/bbloom.go b/vendor/github.com/dgraph-io/ristretto/z/bbloom.go
deleted file mode 100644
index c80559d2..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/bbloom.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// The MIT License (MIT)
-// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
-
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-// the Software, and to permit persons to whom the Software is furnished to do so,
-// subject to the following conditions:
-
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package z
-
-import (
- "bytes"
- "encoding/json"
- "log"
- "math"
- "unsafe"
-)
-
-// helper
-var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128}
-
-func getSize(ui64 uint64) (size uint64, exponent uint64) {
- if ui64 < uint64(512) {
- ui64 = uint64(512)
- }
- size = uint64(1)
- for size < ui64 {
- size <<= 1
- exponent++
- }
- return size, exponent
-}
-
-func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) {
- size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2)
- locs := math.Ceil(float64(0.69314718056) * size / numEntries)
- return uint64(size), uint64(locs)
-}
-
-// NewBloomFilter returns a new bloomfilter.
-func NewBloomFilter(params ...float64) (bloomfilter *Bloom) {
- var entries, locs uint64
- if len(params) == 2 {
- if params[1] < 1 {
- entries, locs = calcSizeByWrongPositives(params[0], params[1])
- } else {
- entries, locs = uint64(params[0]), uint64(params[1])
- }
- } else {
- log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations))" +
- " i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries)," +
- " float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))")
- }
- size, exponent := getSize(entries)
- bloomfilter = &Bloom{
- sizeExp: exponent,
- size: size - 1,
- setLocs: locs,
- shift: 64 - exponent,
- }
- bloomfilter.Size(size)
- return bloomfilter
-}
-
-// Bloom filter
-type Bloom struct {
- bitset []uint64
- ElemNum uint64
- sizeExp uint64
- size uint64
- setLocs uint64
- shift uint64
-}
-
-// <--- http://www.cse.yorku.ca/~oz/hash.html
-// modified Berkeley DB Hash (32bit)
-// hash is casted to l, h = 16bit fragments
-// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) {
-// hash := uint64(len(*b))
-// for _, c := range *b {
-// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash
-// }
-// h = hash >> bl.shift
-// l = hash << bl.shift >> bl.shift
-// return l, h
-// }
-
-// Add adds hash of a key to the bloomfilter.
-func (bl *Bloom) Add(hash uint64) {
- h := hash >> bl.shift
- l := hash << bl.shift >> bl.shift
- for i := uint64(0); i < bl.setLocs; i++ {
- bl.Set((h + i*l) & bl.size)
- bl.ElemNum++
- }
-}
-
-// Has checks if bit(s) for entry hash is/are set,
-// returns true if the hash was added to the Bloom Filter.
-func (bl Bloom) Has(hash uint64) bool {
- h := hash >> bl.shift
- l := hash << bl.shift >> bl.shift
- for i := uint64(0); i < bl.setLocs; i++ {
- if !bl.IsSet((h + i*l) & bl.size) {
- return false
- }
- }
- return true
-}
-
-// AddIfNotHas only Adds hash, if it's not present in the bloomfilter.
-// Returns true if hash was added.
-// Returns false if hash was already registered in the bloomfilter.
-func (bl *Bloom) AddIfNotHas(hash uint64) bool {
- if bl.Has(hash) {
- return false
- }
- bl.Add(hash)
- return true
-}
-
-// Size makes Bloom filter with as bitset of size sz.
-func (bl *Bloom) Size(sz uint64) {
- bl.bitset = make([]uint64, sz>>6)
-}
-
-// Clear resets the Bloom filter.
-func (bl *Bloom) Clear() {
- for i := range bl.bitset {
- bl.bitset[i] = 0
- }
-}
-
-// Set sets the bit[idx] of bitset.
-func (bl *Bloom) Set(idx uint64) {
- ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
- *(*uint8)(ptr) |= mask[idx%8]
-}
-
-// IsSet checks if bit[idx] of bitset is set, returns true/false.
-func (bl *Bloom) IsSet(idx uint64) bool {
- ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
- r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1
- return r == 1
-}
-
-// bloomJSONImExport
-// Im/Export structure used by JSONMarshal / JSONUnmarshal
-type bloomJSONImExport struct {
- FilterSet []byte
- SetLocs uint64
-}
-
-// NewWithBoolset takes a []byte slice and number of locs per entry,
-// returns the bloomfilter with a bitset populated according to the input []byte.
-func newWithBoolset(bs *[]byte, locs uint64) *Bloom {
- bloomfilter := NewBloomFilter(float64(len(*bs)<<3), float64(locs))
- for i, b := range *bs {
- *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bloomfilter.bitset[0])) + uintptr(i))) = b
- }
- return bloomfilter
-}
-
-// JSONUnmarshal takes JSON-Object (type bloomJSONImExport) as []bytes
-// returns bloom32 / bloom64 object.
-func JSONUnmarshal(dbData []byte) (*Bloom, error) {
- bloomImEx := bloomJSONImExport{}
- if err := json.Unmarshal(dbData, &bloomImEx); err != nil {
- return nil, err
- }
- buf := bytes.NewBuffer(bloomImEx.FilterSet)
- bs := buf.Bytes()
- bf := newWithBoolset(&bs, bloomImEx.SetLocs)
- return bf, nil
-}
-
-// JSONMarshal returns JSON-object (type bloomJSONImExport) as []byte.
-func (bl Bloom) JSONMarshal() []byte {
- bloomImEx := bloomJSONImExport{}
- bloomImEx.SetLocs = bl.setLocs
- bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3)
- for i := range bloomImEx.FilterSet {
- bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[0])) +
- uintptr(i)))
- }
- data, err := json.Marshal(bloomImEx)
- if err != nil {
- log.Fatal("json.Marshal failed: ", err)
- }
- return data
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/z/buffer.go b/vendor/github.com/dgraph-io/ristretto/z/buffer.go
deleted file mode 100644
index c63b1ec1..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/buffer.go
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package z
-
-import (
- "encoding/binary"
-)
-
-// Buffer is equivalent of bytes.Buffer without the ability to read. It uses z.Calloc to allocate
-// memory, which depending upon how the code is compiled could use jemalloc for allocations.
-type Buffer struct {
- buf []byte
- offset int
-}
-
-// NewBuffer would allocate a buffer of size sz upfront.
-func NewBuffer(sz int) *Buffer {
- return &Buffer{
- buf: Calloc(sz),
- offset: 0,
- }
-}
-
-// Len would return the number of bytes written to the buffer so far.
-func (b *Buffer) Len() int {
- return b.offset
-}
-
-// Bytes would return all the written bytes as a slice.
-func (b *Buffer) Bytes() []byte {
- return b.buf[0:b.offset]
-}
-
-// smallBufferSize is an initial allocation minimal capacity.
-const smallBufferSize = 64
-
-// Grow would grow the buffer to have at least n more bytes. In case the buffer is at capacity, it
-// would reallocate twice the size of current capacity + n, to ensure n bytes can be written to the
-// buffer without further allocation.
-func (b *Buffer) Grow(n int) {
- // In this case, len and cap are the same.
- if len(b.buf) == 0 && n <= smallBufferSize {
- b.buf = Calloc(smallBufferSize)
- return
- } else if b.buf == nil {
- b.buf = Calloc(n)
- return
- }
- if b.offset+n < len(b.buf) {
- return
- }
-
- sz := 2*len(b.buf) + n
- newBuf := Calloc(sz)
- copy(newBuf, b.buf[:b.offset])
- Free(b.buf)
- b.buf = newBuf
-}
-
-// Allocate is a way to get a slice of size n back from the buffer. This slice can be directly
-// written to. Warning: Allocate is not thread-safe. The byte slice returned MUST be used before
-// further calls to Buffer.
-func (b *Buffer) Allocate(n int) []byte {
- b.Grow(n)
- off := b.offset
- b.offset += n
- return b.buf[off:b.offset]
-}
-
-func (b *Buffer) writeLen(sz int) {
- buf := b.Allocate(4)
- binary.BigEndian.PutUint32(buf, uint32(sz))
-}
-
-// SliceAllocate would encode the size provided into the buffer, followed by a call to Allocate,
-// hence returning the slice of size sz. This can be used to allocate a lot of small buffers into
-// this big buffer.
-// Note that SliceAllocate should NOT be mixed with normal calls to Write. Otherwise, SliceOffsets
-// won't work.
-func (b *Buffer) SliceAllocate(sz int) []byte {
- b.Grow(4 + sz)
- b.writeLen(sz)
- return b.Allocate(sz)
-}
-
-// SliceOffsets would return the offsets of all slices written to the buffer.
-// TODO: Perhaps keep the offsets separate in another buffer, and allow access to slices via index.
-func (b *Buffer) SliceOffsets(offsets []int) []int {
- start := 0
- for start < b.offset {
- offsets = append(offsets, start)
- sz := binary.BigEndian.Uint32(b.buf[start:])
- start += 4 + int(sz)
- }
- return offsets
-}
-
-// Slice would return the slice written at offset.
-func (b *Buffer) Slice(offset int) []byte {
- sz := binary.BigEndian.Uint32(b.buf[offset:])
- start := offset + 4
- return b.buf[start : start+int(sz)]
-}
-
-// Write would write p bytes to the buffer.
-func (b *Buffer) Write(p []byte) (n int, err error) {
- b.Grow(len(p))
- n = copy(b.buf[b.offset:], p)
- b.offset += n
- return n, nil
-}
-
-// Reset would reset the buffer to be reused.
-func (b *Buffer) Reset() {
- b.offset = 0
-}
-
-// Release would free up the memory allocated by the buffer. Once the usage of buffer is done, it is
-// important to call Release, otherwise a memory leak can happen.
-func (b *Buffer) Release() {
- Free(b.buf)
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc.go b/vendor/github.com/dgraph-io/ristretto/z/calloc.go
deleted file mode 100644
index 71980f76..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/calloc.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package z
-
-import "sync/atomic"
-
-var numBytes int64
-
-// NumAllocBytes returns the number of bytes allocated using calls to z.Calloc. The allocations
-// could be happening via either Go or jemalloc, depending upon the build flags.
-func NumAllocBytes() int64 {
- return atomic.LoadInt64(&numBytes)
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_32bit.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_32bit.go
deleted file mode 100644
index db36d985..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/calloc_32bit.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
-// of this source code is governed by a BSD-style license that can be found in
-// the LICENSE file.
-
-// +build 386 amd64p32 arm armbe mips mipsle mips64p32 mips64p32le ppc sparc
-
-package z
-
-const (
- // MaxArrayLen is a safe maximum length for slices on this architecture.
- MaxArrayLen = 1<<31 - 1
-)
diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go
deleted file mode 100644
index 7e2c5da7..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
-// of this source code is governed by a BSD-style license that can be found in
-// the LICENSE file.
-
-// +build amd64 arm64 arm64be ppc64 ppc64le mips64 mips64le s390x sparc64
-
-package z
-
-const (
- // MaxArrayLen is a safe maximum length for slices on this architecture.
- MaxArrayLen = 1<<50 - 1
-)
diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_jemalloc.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_jemalloc.go
deleted file mode 100644
index 2a10683a..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/calloc_jemalloc.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
-// of this source code is governed by a BSD-style license that can be found in
-// the LICENSE file.
-
-// +build jemalloc
-
-package z
-
-/*
-#cgo LDFLAGS: -L/usr/local/lib -Wl,-rpath,/usr/local/lib -ljemalloc -lm -lstdc++ -pthread -ldl
-#include
-#include
-*/
-import "C"
-import (
- "sync/atomic"
- "unsafe"
-)
-
-// The go:linkname directives provides backdoor access to private functions in
-// the runtime. Below we're accessing the throw function.
-
-//go:linkname throw runtime.throw
-func throw(s string)
-
-// New allocates a slice of size n. The returned slice is from manually managed
-// memory and MUST be released by calling Free. Failure to do so will result in
-// a memory leak.
-//
-// Compile jemalloc with ./configure --with-jemalloc-prefix="je_"
-// https://android.googlesource.com/platform/external/jemalloc_new/+/6840b22e8e11cb68b493297a5cd757d6eaa0b406/TUNING.md
-// These two config options seems useful for frequent allocations and deallocations in
-// multi-threaded programs (like we have).
-// JE_MALLOC_CONF="background_thread:true,metadata_thp:auto"
-//
-// Compile Go program with `go build -tags=jemalloc` to enable this.
-func Calloc(n int) []byte {
- if n == 0 {
- return make([]byte, 0)
- }
- // We need to be conscious of the Cgo pointer passing rules:
- //
- // https://golang.org/cmd/cgo/#hdr-Passing_pointers
- //
- // ...
- // Note: the current implementation has a bug. While Go code is permitted
- // to write nil or a C pointer (but not a Go pointer) to C memory, the
- // current implementation may sometimes cause a runtime error if the
- // contents of the C memory appear to be a Go pointer. Therefore, avoid
- // passing uninitialized C memory to Go code if the Go code is going to
- // store pointer values in it. Zero out the memory in C before passing it
- // to Go.
-
- ptr := C.je_calloc(C.size_t(n), 1)
- if ptr == nil {
- // NB: throw is like panic, except it guarantees the process will be
- // terminated. The call below is exactly what the Go runtime invokes when
- // it cannot allocate memory.
- throw("out of memory")
- }
- atomic.AddInt64(&numBytes, int64(n))
- // Interpret the C pointer as a pointer to a Go array, then slice.
- return (*[MaxArrayLen]byte)(unsafe.Pointer(ptr))[:n:n]
-}
-
-// CallocNoRef does the exact same thing as Calloc with jemalloc enabled.
-func CallocNoRef(n int) []byte {
- return Calloc(n)
-}
-
-// Free frees the specified slice.
-func Free(b []byte) {
- if sz := cap(b); sz != 0 {
- if len(b) == 0 {
- b = b[:cap(b)]
- }
- ptr := unsafe.Pointer(&b[0])
- C.je_free(ptr)
- atomic.AddInt64(&numBytes, -int64(sz))
- }
-}
-
-func StatsPrint() {
- opts := C.CString("mdablxe")
- C.je_malloc_stats_print(nil, nil, opts)
- C.free(unsafe.Pointer(opts))
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go
deleted file mode 100644
index 25bfaa1e..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
-// of this source code is governed by a BSD-style license that can be found in
-// the LICENSE file.
-
-// +build !jemalloc
-
-package z
-
-import (
- "fmt"
-)
-
-// Provides versions of New and Free when cgo is not available (e.g. cross
-// compilation).
-
-// Calloc allocates a slice of size n.
-func Calloc(n int) []byte {
- return make([]byte, n)
-}
-
-// CallocNoRef will not give you memory back without jemalloc.
-func CallocNoRef(n int) []byte {
- // We do the add here just to stay compatible with a corresponding Free call.
- return nil
-}
-
-// Free does not do anything in this mode.
-func Free(b []byte) {}
-
-func StatsPrint() {
- fmt.Println("Using Go memory")
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/z/histogram.go b/vendor/github.com/dgraph-io/ristretto/z/histogram.go
deleted file mode 100644
index 119aea5f..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/histogram.go
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package z
-
-import (
- "fmt"
- "math"
- "strings"
-
- "github.com/dustin/go-humanize"
-)
-
-// Creates bounds for an histogram. The bounds are powers of two of the form
-// [2^min_exponent, ..., 2^max_exponent].
-func HistogramBounds(minExponent, maxExponent uint32) []float64 {
- var bounds []float64
- for i := minExponent; i <= maxExponent; i++ {
- bounds = append(bounds, float64(int(1)< histogram.Max {
- histogram.Max = value
- }
- if value < histogram.Min {
- histogram.Min = value
- }
-
- histogram.Sum += value
- histogram.Count++
-
- for index := 0; index <= len(histogram.Bounds); index++ {
- // Allocate value in the last buckets if we reached the end of the Bounds array.
- if index == len(histogram.Bounds) {
- histogram.CountPerBucket[index]++
- break
- }
-
- if value < int64(histogram.Bounds[index]) {
- histogram.CountPerBucket[index]++
- break
- }
- }
-}
-
-// Mean returns the mean value for the histogram.
-func (histogram *HistogramData) Mean() float64 {
- if histogram.Count == 0 {
- return 0
- }
- return float64(histogram.Sum) / float64(histogram.Count)
-}
-
-// String converts the histogram data into human-readable string.
-func (histogram *HistogramData) String() string {
- if histogram == nil {
- return ""
- }
- var b strings.Builder
-
- b.WriteString("\n -- Histogram: \n")
- b.WriteString(fmt.Sprintf("Min value: %d \n", histogram.Min))
- b.WriteString(fmt.Sprintf("Max value: %d \n", histogram.Max))
- b.WriteString(fmt.Sprintf("Mean: %.2f \n", histogram.Mean()))
-
- numBounds := len(histogram.Bounds)
- for index, count := range histogram.CountPerBucket {
- if count == 0 {
- continue
- }
-
- // The last bucket represents the bucket that contains the range from
- // the last bound up to infinity so it's processed differently than the
- // other buckets.
- if index == len(histogram.CountPerBucket)-1 {
- lowerBound := uint64(histogram.Bounds[numBounds-1])
- page := float64(count*100) / float64(histogram.Count)
- b.WriteString(fmt.Sprintf("[%s, %s) %d %.2f%% \n",
- humanize.IBytes(lowerBound), "infinity", count, page))
- continue
- }
-
- upperBound := uint64(histogram.Bounds[index])
- lowerBound := uint64(0)
- if index > 0 {
- lowerBound = uint64(histogram.Bounds[index-1])
- }
-
- page := float64(count*100) / float64(histogram.Count)
- b.WriteString(fmt.Sprintf("[%s, %s) %d %.2f%% \n",
- humanize.IBytes(lowerBound), humanize.IBytes(upperBound), count, page))
- }
- b.WriteString(" --\n")
- return b.String()
-}
diff --git a/vendor/github.com/dgraph-io/ristretto/z/rtutil.go b/vendor/github.com/dgraph-io/ristretto/z/rtutil.go
deleted file mode 100644
index 16aff0c9..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/rtutil.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// MIT License
-
-// Copyright (c) 2019 Ewan Chou
-
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package z
-
-import (
- "unsafe"
-)
-
-// NanoTime returns the current time in nanoseconds from a monotonic clock.
-//go:linkname NanoTime runtime.nanotime
-func NanoTime() int64
-
-// CPUTicks is a faster alternative to NanoTime to measure time duration.
-//go:linkname CPUTicks runtime.cputicks
-func CPUTicks() int64
-
-type stringStruct struct {
- str unsafe.Pointer
- len int
-}
-
-//go:noescape
-//go:linkname memhash runtime.memhash
-func memhash(p unsafe.Pointer, h, s uintptr) uintptr
-
-// MemHash is the hash function used by go map, it utilizes available hardware instructions(behaves
-// as aeshash if aes instruction is available).
-// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash.
-func MemHash(data []byte) uint64 {
- ss := (*stringStruct)(unsafe.Pointer(&data))
- return uint64(memhash(ss.str, 0, uintptr(ss.len)))
-}
-
-// MemHashString is the hash function used by go map, it utilizes available hardware instructions
-// (behaves as aeshash if aes instruction is available).
-// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash.
-func MemHashString(str string) uint64 {
- ss := (*stringStruct)(unsafe.Pointer(&str))
- return uint64(memhash(ss.str, 0, uintptr(ss.len)))
-}
-
-// FastRand is a fast thread local random function.
-//go:linkname FastRand runtime.fastrand
-func FastRand() uint32
diff --git a/vendor/github.com/dgraph-io/ristretto/z/rtutil.s b/vendor/github.com/dgraph-io/ristretto/z/rtutil.s
deleted file mode 100644
index e69de29b..00000000
diff --git a/vendor/github.com/dgraph-io/ristretto/z/z.go b/vendor/github.com/dgraph-io/ristretto/z/z.go
deleted file mode 100644
index a25e10d1..00000000
--- a/vendor/github.com/dgraph-io/ristretto/z/z.go
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package z
-
-import (
- "context"
- "sync"
-
- "github.com/cespare/xxhash"
-)
-
-// TODO: Figure out a way to re-use memhash for the second uint64 hash, we
-// already know that appending bytes isn't reliable for generating a
-// second hash (see Ristretto PR #88).
-//
-// We also know that while the Go runtime has a runtime memhash128
-// function, it's not possible to use it to generate [2]uint64 or
-// anything resembling a 128bit hash, even though that's exactly what
-// we need in this situation.
-func KeyToHash(key interface{}) (uint64, uint64) {
- if key == nil {
- return 0, 0
- }
- switch k := key.(type) {
- case uint64:
- return k, 0
- case string:
- return MemHashString(k), xxhash.Sum64String(k)
- case []byte:
- return MemHash(k), xxhash.Sum64(k)
- case byte:
- return uint64(k), 0
- case int:
- return uint64(k), 0
- case int32:
- return uint64(k), 0
- case uint32:
- return uint64(k), 0
- case int64:
- return uint64(k), 0
- default:
- panic("Key type not supported")
- }
-}
-
-var (
- dummyCloserChan <-chan struct{}
-)
-
-// Closer holds the two things we need to close a goroutine and wait for it to
-// finish: a chan to tell the goroutine to shut down, and a WaitGroup with
-// which to wait for it to finish shutting down.
-type Closer struct {
- waiting sync.WaitGroup
-
- ctx context.Context
- cancel context.CancelFunc
-}
-
-// NewCloser constructs a new Closer, with an initial count on the WaitGroup.
-func NewCloser(initial int) *Closer {
- ret := &Closer{}
- ret.ctx, ret.cancel = context.WithCancel(context.Background())
- ret.waiting.Add(initial)
- return ret
-}
-
-// AddRunning Add()'s delta to the WaitGroup.
-func (lc *Closer) AddRunning(delta int) {
- lc.waiting.Add(delta)
-}
-
-// Ctx can be used to get a context, which would automatically get cancelled when Signal is called.
-func (lc *Closer) Ctx() context.Context {
- if lc == nil {
- return context.Background()
- }
- return lc.ctx
-}
-
-// Signal signals the HasBeenClosed signal.
-func (lc *Closer) Signal() {
- // Todo(ibrahim): Change Signal to return error on next badger breaking change.
- lc.cancel()
-}
-
-// HasBeenClosed gets signaled when Signal() is called.
-func (lc *Closer) HasBeenClosed() <-chan struct{} {
- if lc == nil {
- return dummyCloserChan
- }
- return lc.ctx.Done()
-}
-
-// Done calls Done() on the WaitGroup.
-func (lc *Closer) Done() {
- if lc == nil {
- return
- }
- lc.waiting.Done()
-}
-
-// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done
-// calls to balance out.)
-func (lc *Closer) Wait() {
- lc.waiting.Wait()
-}
-
-// SignalAndWait calls Signal(), then Wait().
-func (lc *Closer) SignalAndWait() {
- lc.Signal()
- lc.Wait()
-}
diff --git a/vendor/github.com/dgryski/go-farm/.gitignore b/vendor/github.com/dgryski/go-farm/.gitignore
deleted file mode 100644
index 36029ab5..00000000
--- a/vendor/github.com/dgryski/go-farm/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-*.exe
-*.test
-*.prof
-
-target
diff --git a/vendor/github.com/dgryski/go-farm/.travis.yml b/vendor/github.com/dgryski/go-farm/.travis.yml
deleted file mode 100644
index a6422d3f..00000000
--- a/vendor/github.com/dgryski/go-farm/.travis.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-language: go
-
-sudo: false
-
-branches:
- except:
- - release
-
-branches:
- only:
- - master
- - develop
- - travis
-
-go:
- - 1.12.x
- - 1.13.x
- - tip
-
-matrix:
- allow_failures:
- - go: tip
-
-before_install:
- - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi;
- - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi;
- - go get github.com/mattn/goveralls
-
-before_script:
- - make deps
-
-script:
- - make qa
-
-after_failure:
- - cat ./target/test/report.xml
-
-after_success:
- - if [ "$TRAVIS_GO_VERSION" = "1.9" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi;
diff --git a/vendor/github.com/dgryski/go-farm/LICENSE b/vendor/github.com/dgryski/go-farm/LICENSE
deleted file mode 100644
index 0f188485..00000000
--- a/vendor/github.com/dgryski/go-farm/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-Copyright (c) 2014-2017 Damian Gryski
-Copyright (c) 2016-2017 Nicola Asuni - Tecnick.com
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
diff --git a/vendor/github.com/dgryski/go-farm/Makefile b/vendor/github.com/dgryski/go-farm/Makefile
deleted file mode 100644
index 7592736e..00000000
--- a/vendor/github.com/dgryski/go-farm/Makefile
+++ /dev/null
@@ -1,187 +0,0 @@
-# MAKEFILE
-#
-# @author Nicola Asuni
-# @link https://github.com/dgryski/go-farm
-#
-# This file is intended to be executed in a Linux-compatible system.
-# It also assumes that the project has been cloned in the right path under GOPATH:
-# $GOPATH/src/github.com/dgryski/go-farm
-#
-# ------------------------------------------------------------------------------
-
-# List special make targets that are not associated with files
-.PHONY: help all test format fmtcheck vet lint coverage cyclo misspell errcheck staticcheck astscan qa deps clean nuke
-
-# Use bash as shell (Note: Ubuntu now uses dash which doesn't support PIPESTATUS).
-SHELL=/bin/bash
-
-# CVS path (path to the parent dir containing the project)
-CVSPATH=github.com/dgryski
-
-# Project owner
-OWNER=dgryski
-
-# Project vendor
-VENDOR=dgryski
-
-# Project name
-PROJECT=go-farm
-
-# Project version
-VERSION=$(shell cat VERSION)
-
-# Name of RPM or DEB package
-PKGNAME=${VENDOR}-${PROJECT}
-
-# Current directory
-CURRENTDIR=$(shell pwd)
-
-# GO lang path
-ifneq ($(GOPATH),)
- ifeq ($(findstring $(GOPATH),$(CURRENTDIR)),)
- # the defined GOPATH is not valid
- GOPATH=
- endif
-endif
-ifeq ($(GOPATH),)
- # extract the GOPATH
- GOPATH=$(firstword $(subst /src/, ,$(CURRENTDIR)))
-endif
-
-# --- MAKE TARGETS ---
-
-# Display general help about this command
-help:
- @echo ""
- @echo "$(PROJECT) Makefile."
- @echo "GOPATH=$(GOPATH)"
- @echo "The following commands are available:"
- @echo ""
- @echo " make qa : Run all the tests"
- @echo " make test : Run the unit tests"
- @echo ""
- @echo " make format : Format the source code"
- @echo " make fmtcheck : Check if the source code has been formatted"
- @echo " make vet : Check for suspicious constructs"
- @echo " make lint : Check for style errors"
- @echo " make coverage : Generate the coverage report"
- @echo " make cyclo : Generate the cyclomatic complexity report"
- @echo " make misspell : Detect commonly misspelled words in source files"
- @echo " make staticcheck : Run staticcheck
- @echo " make errcheck : Check that error return values are used"
- @echo " make astscan : GO AST scanner"
- @echo ""
- @echo " make docs : Generate source code documentation"
- @echo ""
- @echo " make deps : Get the dependencies"
- @echo " make clean : Remove any build artifact"
- @echo " make nuke : Deletes any intermediate file"
- @echo ""
-
-
-# Alias for help target
-all: help
-
-# Run the unit tests
-test:
- @mkdir -p target/test
- @mkdir -p target/report
- GOPATH=$(GOPATH) \
- go test \
- -covermode=atomic \
- -bench=. \
- -race \
- -cpuprofile=target/report/cpu.out \
- -memprofile=target/report/mem.out \
- -mutexprofile=target/report/mutex.out \
- -coverprofile=target/report/coverage.out \
- -v ./... | \
- tee >(PATH=$(GOPATH)/bin:$(PATH) go-junit-report > target/test/report.xml); \
- test $${PIPESTATUS[0]} -eq 0
-
-# Format the source code
-format:
- @find . -type f -name "*.go" -exec gofmt -s -w {} \;
-
-# Check if the source code has been formatted
-fmtcheck:
- @mkdir -p target
- @find . -type f -name "*.go" -exec gofmt -s -d {} \; | tee target/format.diff
- @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; }
-
-# Check for syntax errors
-vet:
- GOPATH=$(GOPATH) go vet .
-
-# Check for style errors
-lint:
- GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint .
-
-# Generate the coverage report
-coverage:
- @mkdir -p target/report
- GOPATH=$(GOPATH) \
- go tool cover -html=target/report/coverage.out -o target/report/coverage.html
-
-# Report cyclomatic complexity
-cyclo:
- @mkdir -p target/report
- GOPATH=$(GOPATH) gocyclo -avg ./ | tee target/report/cyclo.txt ; test $${PIPESTATUS[0]} -eq 0
-
-# Detect commonly misspelled words in source files
-misspell:
- @mkdir -p target/report
- GOPATH=$(GOPATH) misspell -error ./ | tee target/report/misspell.txt ; test $${PIPESTATUS[0]} -eq 0
-
-# Check that error return values are used
-errcheck:
- @mkdir -p target/report
- GOPATH=$(GOPATH) errcheck ./ | tee target/report/errcheck.txt
-
-
-# staticcheck
-staticcheck:
- @mkdir -p target/report
- GOPATH=$(GOPATH) staticcheck ./... | tee target/report/staticcheck.txt
-
-
-# AST scanner
-astscan:
- @mkdir -p target/report
- GOPATH=$(GOPATH) gas .//*.go | tee target/report/astscan.txt
-
-# Generate source docs
-docs:
- @mkdir -p target/docs
- nohup sh -c 'GOPATH=$(GOPATH) godoc -http=127.0.0.1:6060' > target/godoc_server.log 2>&1 &
- wget --directory-prefix=target/docs/ --execute robots=off --retry-connrefused --recursive --no-parent --adjust-extension --page-requisites --convert-links http://127.0.0.1:6060/pkg/github.com/${VENDOR}/${PROJECT}/ ; kill -9 `lsof -ti :6060`
- @echo ''${PKGNAME}' Documentation ...' > target/docs/index.html
-
-# Alias to run all quality-assurance checks
-qa: fmtcheck test vet lint coverage cyclo misspell errcheck astscan
-
-# --- INSTALL ---
-
-# Get the dependencies
-deps:
- GOPATH=$(GOPATH) go get ./...
- GOPATH=$(GOPATH) go get golang.org/x/lint/golint
- GOPATH=$(GOPATH) go get github.com/jstemmer/go-junit-report
- GOPATH=$(GOPATH) go get github.com/axw/gocov/gocov
- GOPATH=$(GOPATH) go get github.com/fzipp/gocyclo
- GOPATH=$(GOPATH) go get github.com/gordonklaus/ineffassign
- GOPATH=$(GOPATH) go get github.com/client9/misspell/cmd/misspell
- GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/structcheck
- GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/varcheck
- GOPATH=$(GOPATH) go get github.com/kisielk/errcheck
- GOPATH=$(GOPATH) go get honnef.co/go/tools/cmd/staticcheck
- GOPATH=$(GOPATH) go get github.com/GoASTScanner/gas
-
-# Remove any build artifact
-clean:
- GOPATH=$(GOPATH) go clean ./...
-
-# Deletes any intermediate file
-nuke:
- rm -rf ./target
- GOPATH=$(GOPATH) go clean -i ./...
diff --git a/vendor/github.com/dgryski/go-farm/README.md b/vendor/github.com/dgryski/go-farm/README.md
deleted file mode 100644
index 0784f90f..00000000
--- a/vendor/github.com/dgryski/go-farm/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# go-farm
-
-*Google's FarmHash hash functions implemented in Go*
-
-[![Master Branch](https://img.shields.io/badge/-master:-gray.svg)](https://github.com/dgryski/go-farm/tree/master)
-[![Master Build Status](https://secure.travis-ci.org/dgryski/go-farm.png?branch=master)](https://travis-ci.org/dgryski/go-farm?branch=master)
-[![Master Coverage Status](https://coveralls.io/repos/dgryski/go-farm/badge.svg?branch=master&service=github)](https://coveralls.io/github/dgryski/go-farm?branch=master)
-[![Go Report Card](https://goreportcard.com/badge/github.com/dgryski/go-farm)](https://goreportcard.com/report/github.com/dgryski/go-farm)
-[![GoDoc](https://godoc.org/github.com/dgryski/go-farm?status.svg)](http://godoc.org/github.com/dgryski/go-farm)
-
-## Description
-
-FarmHash, a family of hash functions.
-
-This is a (mechanical) translation of the non-SSE4/non-AESNI hash functions from Google's FarmHash (https://github.com/google/farmhash).
-
-
-FarmHash provides hash functions for strings and other data.
-The functions mix the input bits thoroughly but are not suitable for cryptography.
-
-All members of the FarmHash family were designed with heavy reliance on previous work by Jyrki Alakuijala, Austin Appleby, Bob Jenkins, and others.
-
-For more information please consult https://github.com/google/farmhash
-
-
-## Getting started
-
-This application is written in Go language, please refer to the guides in https://golang.org for getting started.
-
-This project include a Makefile that allows you to test and build the project with simple commands.
-To see all available options:
-```bash
-make help
-```
-
-## Running all tests
-
-Before committing the code, please check if it passes all tests using
-```bash
-make qa
-```
-
-## License
-
-As this is a highly derivative work, I have placed it under the same license as the original implementation. See the
-LICENSE file for details.
diff --git a/vendor/github.com/dgryski/go-farm/VERSION b/vendor/github.com/dgryski/go-farm/VERSION
deleted file mode 100644
index 38f77a65..00000000
--- a/vendor/github.com/dgryski/go-farm/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-2.0.1
diff --git a/vendor/github.com/dgryski/go-farm/basics.go b/vendor/github.com/dgryski/go-farm/basics.go
deleted file mode 100644
index ec7076c0..00000000
--- a/vendor/github.com/dgryski/go-farm/basics.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package farm
-
-import "math/bits"
-
-// Some primes between 2^63 and 2^64 for various uses.
-const k0 uint64 = 0xc3a5c85c97cb3127
-const k1 uint64 = 0xb492b66fbe98f273
-const k2 uint64 = 0x9ae16a3b2f90404f
-
-// Magic numbers for 32-bit hashing. Copied from Murmur3.
-const c1 uint32 = 0xcc9e2d51
-const c2 uint32 = 0x1b873593
-
-// A 32-bit to 32-bit integer hash copied from Murmur3.
-func fmix(h uint32) uint32 {
- h ^= h >> 16
- h *= 0x85ebca6b
- h ^= h >> 13
- h *= 0xc2b2ae35
- h ^= h >> 16
- return h
-}
-
-func mur(a, h uint32) uint32 {
- // Helper from Murmur3 for combining two 32-bit values.
- a *= c1
- a = bits.RotateLeft32(a, -17)
- a *= c2
- h ^= a
- h = bits.RotateLeft32(h, -19)
- return h*5 + 0xe6546b64
-}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashcc.go b/vendor/github.com/dgryski/go-farm/farmhashcc.go
deleted file mode 100644
index 3e68ae3a..00000000
--- a/vendor/github.com/dgryski/go-farm/farmhashcc.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package farm
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-// This file provides a 32-bit hash equivalent to CityHash32 (v1.1.1)
-// and a 128-bit hash equivalent to CityHash128 (v1.1.1). It also provides
-// a seeded 32-bit hash function similar to CityHash32.
-
-func hash32Len13to24Seed(s []byte, seed uint32) uint32 {
- slen := len(s)
- a := binary.LittleEndian.Uint32(s[-4+(slen>>1) : -4+(slen>>1)+4])
- b := binary.LittleEndian.Uint32(s[4 : 4+4])
- c := binary.LittleEndian.Uint32(s[slen-8 : slen-8+4])
- d := binary.LittleEndian.Uint32(s[(slen >> 1) : (slen>>1)+4])
- e := binary.LittleEndian.Uint32(s[0 : 0+4])
- f := binary.LittleEndian.Uint32(s[slen-4 : slen-4+4])
- h := d*c1 + uint32(slen) + seed
- a = bits.RotateLeft32(a, -12) + f
- h = mur(c, h) + a
- a = bits.RotateLeft32(a, -3) + c
- h = mur(e, h) + a
- a = bits.RotateLeft32(a+f, -12) + d
- h = mur(b^seed, h) + a
- return fmix(h)
-}
-
-func hash32Len0to4(s []byte, seed uint32) uint32 {
- slen := len(s)
- b := seed
- c := uint32(9)
- for i := 0; i < slen; i++ {
- v := int8(s[i])
- b = (b * c1) + uint32(v)
- c ^= b
- }
- return fmix(mur(b, mur(uint32(slen), c)))
-}
-
-func hash128to64(x uint128) uint64 {
- // Murmur-inspired hashing.
- const mul uint64 = 0x9ddfea08eb382d69
- a := (x.lo ^ x.hi) * mul
- a ^= (a >> 47)
- b := (x.hi ^ a) * mul
- b ^= (b >> 47)
- b *= mul
- return b
-}
-
-type uint128 struct {
- lo uint64
- hi uint64
-}
-
-// A subroutine for CityHash128(). Returns a decent 128-bit hash for strings
-// of any length representable in signed long. Based on City and Murmur.
-func cityMurmur(s []byte, seed uint128) uint128 {
- slen := len(s)
- a := seed.lo
- b := seed.hi
- var c uint64
- var d uint64
- l := slen - 16
- if l <= 0 { // len <= 16
- a = shiftMix(a*k1) * k1
- c = b*k1 + hashLen0to16(s)
- if slen >= 8 {
- d = shiftMix(a + binary.LittleEndian.Uint64(s[0:0+8]))
- } else {
- d = shiftMix(a + c)
- }
- } else { // len > 16
- c = hashLen16(binary.LittleEndian.Uint64(s[slen-8:slen-8+8])+k1, a)
- d = hashLen16(b+uint64(slen), c+binary.LittleEndian.Uint64(s[slen-16:slen-16+8]))
- a += d
- for {
- a ^= shiftMix(binary.LittleEndian.Uint64(s[0:0+8])*k1) * k1
- a *= k1
- b ^= a
- c ^= shiftMix(binary.LittleEndian.Uint64(s[8:8+8])*k1) * k1
- c *= k1
- d ^= c
- s = s[16:]
- l -= 16
- if l <= 0 {
- break
- }
- }
- }
- a = hashLen16(a, c)
- b = hashLen16(d, b)
- return uint128{a ^ b, hashLen16(b, a)}
-}
-
-func cityHash128WithSeed(s []byte, seed uint128) uint128 {
- slen := len(s)
- if slen < 128 {
- return cityMurmur(s, seed)
- }
-
- endIdx := ((slen - 1) / 128) * 128
- lastBlockIdx := endIdx + ((slen - 1) & 127) - 127
- last := s[lastBlockIdx:]
-
- // We expect len >= 128 to be the common case. Keep 56 bytes of state:
- // v, w, x, y, and z.
- var v1, v2 uint64
- var w1, w2 uint64
- x := seed.lo
- y := seed.hi
- z := uint64(slen) * k1
- v1 = bits.RotateLeft64(y^k1, -49)*k1 + binary.LittleEndian.Uint64(s[0:0+8])
- v2 = bits.RotateLeft64(v1, -42)*k1 + binary.LittleEndian.Uint64(s[8:8+8])
- w1 = bits.RotateLeft64(y+z, -35)*k1 + x
- w2 = bits.RotateLeft64(x+binary.LittleEndian.Uint64(s[88:88+8]), -53) * k1
-
- // This is the same inner loop as CityHash64(), manually unrolled.
- for {
- x = bits.RotateLeft64(x+y+v1+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1
- y = bits.RotateLeft64(y+v2+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1
- x ^= w2
- y += v1 + binary.LittleEndian.Uint64(s[40:40+8])
- z = bits.RotateLeft64(z+w1, -33) * k1
- v1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1)
- w1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+binary.LittleEndian.Uint64(s[16:16+8]))
- z, x = x, z
- s = s[64:]
- x = bits.RotateLeft64(x+y+v1+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1
- y = bits.RotateLeft64(y+v2+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1
- x ^= w2
- y += v1 + binary.LittleEndian.Uint64(s[40:40+8])
- z = bits.RotateLeft64(z+w1, -33) * k1
- v1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1)
- w1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+binary.LittleEndian.Uint64(s[16:16+8]))
- z, x = x, z
- s = s[64:]
- slen -= 128
- if slen < 128 {
- break
- }
- }
- x += bits.RotateLeft64(v1+z, -49) * k0
- y = y*k0 + bits.RotateLeft64(w2, -37)
- z = z*k0 + bits.RotateLeft64(w1, -27)
- w1 *= 9
- v1 *= k0
- // If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s.
- for tailDone := 0; tailDone < slen; {
- tailDone += 32
- y = bits.RotateLeft64(x+y, -42)*k0 + v2
- w1 += binary.LittleEndian.Uint64(last[128-tailDone+16 : 128-tailDone+16+8])
- x = x*k0 + w1
- z += w2 + binary.LittleEndian.Uint64(last[128-tailDone:128-tailDone+8])
- w2 += v1
- v1, v2 = weakHashLen32WithSeeds(last[128-tailDone:], v1+z, v2)
- v1 *= k0
- }
-
- // At this point our 56 bytes of state should contain more than
- // enough information for a strong 128-bit hash. We use two
- // different 56-byte-to-8-byte hashes to get a 16-byte final result.
- x = hashLen16(x, v1)
- y = hashLen16(y+z, w1)
- return uint128{hashLen16(x+v2, w2) + y,
- hashLen16(x+w2, y+v2)}
-}
-
-func cityHash128(s []byte) uint128 {
- slen := len(s)
- if slen >= 16 {
- return cityHash128WithSeed(s[16:], uint128{binary.LittleEndian.Uint64(s[0 : 0+8]), binary.LittleEndian.Uint64(s[8:8+8]) + k0})
- }
- return cityHash128WithSeed(s, uint128{k0, k1})
-}
-
-// Fingerprint128 is a 128-bit fingerprint function for byte-slices
-func Fingerprint128(s []byte) (lo, hi uint64) {
- h := cityHash128(s)
- return h.lo, h.hi
-}
-
-// Hash128 is a 128-bit hash function for byte-slices
-func Hash128(s []byte) (lo, hi uint64) {
- return Fingerprint128(s)
-}
-
-// Hash128WithSeed is a 128-bit hash function for byte-slices and a 128-bit seed
-func Hash128WithSeed(s []byte, seed0, seed1 uint64) (lo, hi uint64) {
- h := cityHash128WithSeed(s, uint128{seed0, seed1})
- return h.lo, h.hi
-}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashmk.go b/vendor/github.com/dgryski/go-farm/farmhashmk.go
deleted file mode 100644
index 8e4c7428..00000000
--- a/vendor/github.com/dgryski/go-farm/farmhashmk.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package farm
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-func hash32Len5to12(s []byte, seed uint32) uint32 {
- slen := len(s)
- a := uint32(len(s))
- b := uint32(len(s) * 5)
- c := uint32(9)
- d := b + seed
- a += binary.LittleEndian.Uint32(s[0 : 0+4])
- b += binary.LittleEndian.Uint32(s[slen-4 : slen-4+4])
- c += binary.LittleEndian.Uint32(s[((slen >> 1) & 4) : ((slen>>1)&4)+4])
- return fmix(seed ^ mur(c, mur(b, mur(a, d))))
-}
-
-// Hash32 hashes a byte slice and returns a uint32 hash value
-func Hash32(s []byte) uint32 {
-
- slen := len(s)
-
- if slen <= 24 {
- if slen <= 12 {
- if slen <= 4 {
- return hash32Len0to4(s, 0)
- }
- return hash32Len5to12(s, 0)
- }
- return hash32Len13to24Seed(s, 0)
- }
-
- // len > 24
- h := uint32(slen)
- g := c1 * uint32(slen)
- f := g
- a0 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-4:slen-4+4])*c1, -17) * c2
- a1 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-8:slen-8+4])*c1, -17) * c2
- a2 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-16:slen-16+4])*c1, -17) * c2
- a3 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-12:slen-12+4])*c1, -17) * c2
- a4 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-20:slen-20+4])*c1, -17) * c2
- h ^= a0
- h = bits.RotateLeft32(h, -19)
- h = h*5 + 0xe6546b64
- h ^= a2
- h = bits.RotateLeft32(h, -19)
- h = h*5 + 0xe6546b64
- g ^= a1
- g = bits.RotateLeft32(g, -19)
- g = g*5 + 0xe6546b64
- g ^= a3
- g = bits.RotateLeft32(g, -19)
- g = g*5 + 0xe6546b64
- f += a4
- f = bits.RotateLeft32(f, -19) + 113
- for len(s) > 20 {
- a := binary.LittleEndian.Uint32(s[0 : 0+4])
- b := binary.LittleEndian.Uint32(s[4 : 4+4])
- c := binary.LittleEndian.Uint32(s[8 : 8+4])
- d := binary.LittleEndian.Uint32(s[12 : 12+4])
- e := binary.LittleEndian.Uint32(s[16 : 16+4])
- h += a
- g += b
- f += c
- h = mur(d, h) + e
- g = mur(c, g) + a
- f = mur(b+e*c1, f) + d
- f += g
- g += f
- s = s[20:]
- }
- g = bits.RotateLeft32(g, -11) * c1
- g = bits.RotateLeft32(g, -17) * c1
- f = bits.RotateLeft32(f, -11) * c1
- f = bits.RotateLeft32(f, -17) * c1
- h = bits.RotateLeft32(h+g, -19)
- h = h*5 + 0xe6546b64
- h = bits.RotateLeft32(h, -17) * c1
- h = bits.RotateLeft32(h+f, -19)
- h = h*5 + 0xe6546b64
- h = bits.RotateLeft32(h, -17) * c1
- return h
-}
-
-// Hash32WithSeed hashes a byte slice and a uint32 seed and returns a uint32 hash value
-func Hash32WithSeed(s []byte, seed uint32) uint32 {
- slen := len(s)
-
- if slen <= 24 {
- if slen >= 13 {
- return hash32Len13to24Seed(s, seed*c1)
- }
- if slen >= 5 {
- return hash32Len5to12(s, seed)
- }
- return hash32Len0to4(s, seed)
- }
- h := hash32Len13to24Seed(s[:24], seed^uint32(slen))
- return mur(Hash32(s[24:])+seed, h)
-}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashna.go b/vendor/github.com/dgryski/go-farm/farmhashna.go
deleted file mode 100644
index ac62edd3..00000000
--- a/vendor/github.com/dgryski/go-farm/farmhashna.go
+++ /dev/null
@@ -1,161 +0,0 @@
-package farm
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-func shiftMix(val uint64) uint64 {
- return val ^ (val >> 47)
-}
-
-func hashLen16(u, v uint64) uint64 {
- return hash128to64(uint128{u, v})
-}
-
-func hashLen16Mul(u, v, mul uint64) uint64 {
- // Murmur-inspired hashing.
- a := (u ^ v) * mul
- a ^= (a >> 47)
- b := (v ^ a) * mul
- b ^= (b >> 47)
- b *= mul
- return b
-}
-
-func hashLen0to16(s []byte) uint64 {
- slen := uint64(len(s))
- if slen >= 8 {
- mul := k2 + slen*2
- a := binary.LittleEndian.Uint64(s[0:0+8]) + k2
- b := binary.LittleEndian.Uint64(s[int(slen-8) : int(slen-8)+8])
- c := bits.RotateLeft64(b, -37)*mul + a
- d := (bits.RotateLeft64(a, -25) + b) * mul
- return hashLen16Mul(c, d, mul)
- }
-
- if slen >= 4 {
- mul := k2 + slen*2
- a := binary.LittleEndian.Uint32(s[0 : 0+4])
- return hashLen16Mul(slen+(uint64(a)<<3), uint64(binary.LittleEndian.Uint32(s[int(slen-4):int(slen-4)+4])), mul)
- }
- if slen > 0 {
- a := s[0]
- b := s[slen>>1]
- c := s[slen-1]
- y := uint32(a) + (uint32(b) << 8)
- z := uint32(slen) + (uint32(c) << 2)
- return shiftMix(uint64(y)*k2^uint64(z)*k0) * k2
- }
- return k2
-}
-
-// This probably works well for 16-byte strings as well, but it may be overkill
-// in that case.
-func hashLen17to32(s []byte) uint64 {
- slen := len(s)
- mul := k2 + uint64(slen*2)
- a := binary.LittleEndian.Uint64(s[0:0+8]) * k1
- b := binary.LittleEndian.Uint64(s[8 : 8+8])
- c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul
- d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2
- return hashLen16Mul(bits.RotateLeft64(a+b, -43)+bits.RotateLeft64(c, -30)+d, a+bits.RotateLeft64(b+k2, -18)+c, mul)
-}
-
-// Return a 16-byte hash for 48 bytes. Quick and dirty.
-// Callers do best to use "random-looking" values for a and b.
-func weakHashLen32WithSeedsWords(w, x, y, z, a, b uint64) (uint64, uint64) {
- a += w
- b = bits.RotateLeft64(b+a+z, -21)
- c := a
- a += x
- a += y
- b += bits.RotateLeft64(a, -44)
- return a + z, b + c
-}
-
-// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty.
-func weakHashLen32WithSeeds(s []byte, a, b uint64) (uint64, uint64) {
- return weakHashLen32WithSeedsWords(binary.LittleEndian.Uint64(s[0:0+8]),
- binary.LittleEndian.Uint64(s[8:8+8]),
- binary.LittleEndian.Uint64(s[16:16+8]),
- binary.LittleEndian.Uint64(s[24:24+8]),
- a,
- b)
-}
-
-// Return an 8-byte hash for 33 to 64 bytes.
-func hashLen33to64(s []byte) uint64 {
- slen := len(s)
- mul := k2 + uint64(slen)*2
- a := binary.LittleEndian.Uint64(s[0:0+8]) * k2
- b := binary.LittleEndian.Uint64(s[8 : 8+8])
- c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul
- d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2
- y := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d
- z := hashLen16Mul(y, a+bits.RotateLeft64(b+k2, -18)+c, mul)
- e := binary.LittleEndian.Uint64(s[16:16+8]) * mul
- f := binary.LittleEndian.Uint64(s[24 : 24+8])
- g := (y + binary.LittleEndian.Uint64(s[slen-32:slen-32+8])) * mul
- h := (z + binary.LittleEndian.Uint64(s[slen-24:slen-24+8])) * mul
- return hashLen16Mul(bits.RotateLeft64(e+f, -43)+bits.RotateLeft64(g, -30)+h, e+bits.RotateLeft64(f+a, -18)+g, mul)
-}
-
-func naHash64(s []byte) uint64 {
- slen := len(s)
- var seed uint64 = 81
- if slen <= 32 {
- if slen <= 16 {
- return hashLen0to16(s)
- }
- return hashLen17to32(s)
- }
- if slen <= 64 {
- return hashLen33to64(s)
- }
- // For strings over 64 bytes we loop.
- // Internal state consists of 56 bytes: v, w, x, y, and z.
- v := uint128{0, 0}
- w := uint128{0, 0}
- x := seed*k2 + binary.LittleEndian.Uint64(s[0:0+8])
- y := seed*k1 + 113
- z := shiftMix(y*k2+113) * k2
- // Set end so that after the loop we have 1 to 64 bytes left to process.
- endIdx := ((slen - 1) / 64) * 64
- last64Idx := endIdx + ((slen - 1) & 63) - 63
- last64 := s[last64Idx:]
- for len(s) > 64 {
- x = bits.RotateLeft64(x+y+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1
- y = bits.RotateLeft64(y+v.hi+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1
- x ^= w.hi
- y += v.lo + binary.LittleEndian.Uint64(s[40:40+8])
- z = bits.RotateLeft64(z+w.lo, -33) * k1
- v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*k1, x+w.lo)
- w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8]))
- x, z = z, x
- s = s[64:]
- }
- mul := k1 + ((z & 0xff) << 1)
- // Make s point to the last 64 bytes of input.
- s = last64
- w.lo += (uint64(slen-1) & 63)
- v.lo += w.lo
- w.lo += v.lo
- x = bits.RotateLeft64(x+y+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * mul
- y = bits.RotateLeft64(y+v.hi+binary.LittleEndian.Uint64(s[48:48+8]), -42) * mul
- x ^= w.hi * 9
- y += v.lo*9 + binary.LittleEndian.Uint64(s[40:40+8])
- z = bits.RotateLeft64(z+w.lo, -33) * mul
- v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*mul, x+w.lo)
- w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8]))
- x, z = z, x
- return hashLen16Mul(hashLen16Mul(v.lo, w.lo, mul)+shiftMix(y)*k0+z, hashLen16Mul(v.hi, w.hi, mul)+x, mul)
-}
-
-func naHash64WithSeed(s []byte, seed uint64) uint64 {
- return naHash64WithSeeds(s, k2, seed)
-}
-
-func naHash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 {
- return hashLen16(naHash64(s)-seed0, seed1)
-}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashuo.go b/vendor/github.com/dgryski/go-farm/farmhashuo.go
deleted file mode 100644
index 7328fc70..00000000
--- a/vendor/github.com/dgryski/go-farm/farmhashuo.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package farm
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-func uoH(x, y, mul uint64, r uint) uint64 {
- a := (x ^ y) * mul
- a ^= (a >> 47)
- b := (y ^ a) * mul
- return bits.RotateLeft64(b, -int(r)) * mul
-}
-
-// Hash64WithSeeds hashes a byte slice and two uint64 seeds and returns a uint64 hash value
-func Hash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 {
- slen := len(s)
- if slen <= 64 {
- return naHash64WithSeeds(s, seed0, seed1)
- }
-
- // For strings over 64 bytes we loop.
- // Internal state consists of 64 bytes: u, v, w, x, y, and z.
- x := seed0
- y := seed1*k2 + 113
- z := shiftMix(y*k2) * k2
- v := uint128{seed0, seed1}
- var w uint128
- u := x - z
- x *= k2
- mul := k2 + (u & 0x82)
-
- // Set end so that after the loop we have 1 to 64 bytes left to process.
- endIdx := ((slen - 1) / 64) * 64
- last64Idx := endIdx + ((slen - 1) & 63) - 63
- last64 := s[last64Idx:]
-
- for len(s) > 64 {
- a0 := binary.LittleEndian.Uint64(s[0 : 0+8])
- a1 := binary.LittleEndian.Uint64(s[8 : 8+8])
- a2 := binary.LittleEndian.Uint64(s[16 : 16+8])
- a3 := binary.LittleEndian.Uint64(s[24 : 24+8])
- a4 := binary.LittleEndian.Uint64(s[32 : 32+8])
- a5 := binary.LittleEndian.Uint64(s[40 : 40+8])
- a6 := binary.LittleEndian.Uint64(s[48 : 48+8])
- a7 := binary.LittleEndian.Uint64(s[56 : 56+8])
- x += a0 + a1
- y += a2
- z += a3
- v.lo += a4
- v.hi += a5 + a1
- w.lo += a6
- w.hi += a7
-
- x = bits.RotateLeft64(x, -26)
- x *= 9
- y = bits.RotateLeft64(y, -29)
- z *= mul
- v.lo = bits.RotateLeft64(v.lo, -33)
- v.hi = bits.RotateLeft64(v.hi, -30)
- w.lo ^= x
- w.lo *= 9
- z = bits.RotateLeft64(z, -32)
- z += w.hi
- w.hi += z
- z *= 9
- u, y = y, u
-
- z += a0 + a6
- v.lo += a2
- v.hi += a3
- w.lo += a4
- w.hi += a5 + a6
- x += a1
- y += a7
-
- y += v.lo
- v.lo += x - y
- v.hi += w.lo
- w.lo += v.hi
- w.hi += x - y
- x += w.hi
- w.hi = bits.RotateLeft64(w.hi, -34)
- u, z = z, u
- s = s[64:]
- }
- // Make s point to the last 64 bytes of input.
- s = last64
- u *= 9
- v.hi = bits.RotateLeft64(v.hi, -28)
- v.lo = bits.RotateLeft64(v.lo, -20)
- w.lo += (uint64(slen-1) & 63)
- u += y
- y += u
- x = bits.RotateLeft64(y-x+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * mul
- y = bits.RotateLeft64(y^v.hi^binary.LittleEndian.Uint64(s[48:48+8]), -42) * mul
- x ^= w.hi * 9
- y += v.lo + binary.LittleEndian.Uint64(s[40:40+8])
- z = bits.RotateLeft64(z+w.lo, -33) * mul
- v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*mul, x+w.lo)
- w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8]))
- return uoH(hashLen16Mul(v.lo+x, w.lo^y, mul)+z-u,
- uoH(v.hi+y, w.hi+z, k2, 30)^x,
- k2,
- 31)
-}
-
-// Hash64WithSeed hashes a byte slice and a uint64 seed and returns a uint64 hash value
-func Hash64WithSeed(s []byte, seed uint64) uint64 {
- if len(s) <= 64 {
- return naHash64WithSeed(s, seed)
- }
- return Hash64WithSeeds(s, 0, seed)
-}
-
-// Hash64 hashes a byte slice and returns a uint64 hash value
-func uoHash64(s []byte) uint64 {
- if len(s) <= 64 {
- return naHash64(s)
- }
- return Hash64WithSeeds(s, 81, 0)
-}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashxo.go b/vendor/github.com/dgryski/go-farm/farmhashxo.go
deleted file mode 100644
index 9234212a..00000000
--- a/vendor/github.com/dgryski/go-farm/farmhashxo.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package farm
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-func h32(s []byte, mul uint64) uint64 {
- slen := len(s)
- a := binary.LittleEndian.Uint64(s[0:0+8]) * k1
- b := binary.LittleEndian.Uint64(s[8 : 8+8])
- c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul
- d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2
- u := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d
- v := a + bits.RotateLeft64(b+k2, -18) + c
- a = shiftMix((u ^ v) * mul)
- b = shiftMix((v ^ a) * mul)
- return b
-}
-
-func h32Seeds(s []byte, mul, seed0, seed1 uint64) uint64 {
- slen := len(s)
- a := binary.LittleEndian.Uint64(s[0:0+8]) * k1
- b := binary.LittleEndian.Uint64(s[8 : 8+8])
- c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul
- d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2
- u := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d + seed0
- v := a + bits.RotateLeft64(b+k2, -18) + c + seed1
- a = shiftMix((u ^ v) * mul)
- b = shiftMix((v ^ a) * mul)
- return b
-}
-
-func xohashLen33to64(s []byte) uint64 {
- slen := len(s)
- mul0 := k2 - 30
- mul1 := k2 - 30 + 2*uint64(slen)
-
- var h0 uint64
- {
- s := s[0:32]
- mul := mul0
- slen := len(s)
- a := binary.LittleEndian.Uint64(s[0:0+8]) * k1
- b := binary.LittleEndian.Uint64(s[8 : 8+8])
- c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul
- d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2
- u := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d
- v := a + bits.RotateLeft64(b+k2, -18) + c
- a = shiftMix((u ^ v) * mul)
- b = shiftMix((v ^ a) * mul)
- h0 = b
- }
-
- var h1 uint64
- {
- s := s[slen-32:]
- mul := mul1
- slen := len(s)
- a := binary.LittleEndian.Uint64(s[0:0+8]) * k1
- b := binary.LittleEndian.Uint64(s[8 : 8+8])
- c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul
- d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2
- u := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d
- v := a + bits.RotateLeft64(b+k2, -18) + c
- a = shiftMix((u ^ v) * mul)
- b = shiftMix((v ^ a) * mul)
- h1 = b
- }
-
- r := ((h1 * mul1) + h0) * mul1
- return r
-}
-
-func xohashLen65to96(s []byte) uint64 {
- slen := len(s)
-
- mul0 := k2 - 114
- mul1 := k2 - 114 + 2*uint64(slen)
- h0 := h32(s[:32], mul0)
- h1 := h32(s[32:64], mul1)
- h2 := h32Seeds(s[slen-32:], mul1, h0, h1)
- return (h2*9 + (h0 >> 17) + (h1 >> 21)) * mul1
-}
-
-func Hash64(s []byte) uint64 {
- slen := len(s)
-
- if slen <= 32 {
- if slen <= 16 {
- return hashLen0to16(s)
- } else {
- return hashLen17to32(s)
- }
- } else if slen <= 64 {
- return xohashLen33to64(s)
- } else if slen <= 96 {
- return xohashLen65to96(s)
- } else if slen <= 256 {
- return naHash64(s)
- } else {
- return uoHash64(s)
- }
-}
diff --git a/vendor/github.com/dgryski/go-farm/fp_amd64.s b/vendor/github.com/dgryski/go-farm/fp_amd64.s
deleted file mode 100644
index 2b8fa324..00000000
--- a/vendor/github.com/dgryski/go-farm/fp_amd64.s
+++ /dev/null
@@ -1,951 +0,0 @@
-// Code generated by command: go run asm.go -out=fp_amd64.s -go111=false. DO NOT EDIT.
-
-// +build amd64,!purego
-
-#include "textflag.h"
-
-// func Fingerprint64(s []byte) uint64
-TEXT ·Fingerprint64(SB), NOSPLIT, $0-32
- MOVQ s_base+0(FP), CX
- MOVQ s_len+8(FP), AX
- CMPQ AX, $0x10
- JG check32
- CMPQ AX, $0x08
- JL check4
- MOVQ (CX), DX
- MOVQ AX, BX
- SUBQ $0x08, BX
- ADDQ CX, BX
- MOVQ (BX), BX
- MOVQ $0x9ae16a3b2f90404f, BP
- ADDQ BP, DX
- SHLQ $0x01, AX
- ADDQ BP, AX
- MOVQ BX, BP
- RORQ $0x25, BP
- IMULQ AX, BP
- ADDQ DX, BP
- RORQ $0x19, DX
- ADDQ BX, DX
- IMULQ AX, DX
- XORQ DX, BP
- IMULQ AX, BP
- MOVQ BP, BX
- SHRQ $0x2f, BX
- XORQ BP, BX
- XORQ BX, DX
- IMULQ AX, DX
- MOVQ DX, BX
- SHRQ $0x2f, BX
- XORQ DX, BX
- IMULQ AX, BX
- MOVQ BX, ret+24(FP)
- RET
-
-check4:
- CMPQ AX, $0x04
- JL check0
- MOVQ $0x9ae16a3b2f90404f, DX
- MOVQ AX, BX
- SHLQ $0x01, BX
- ADDQ DX, BX
- MOVL (CX), SI
- SHLQ $0x03, SI
- ADDQ AX, SI
- SUBQ $0x04, AX
- ADDQ AX, CX
- MOVL (CX), DI
- XORQ DI, SI
- IMULQ BX, SI
- MOVQ SI, DX
- SHRQ $0x2f, DX
- XORQ SI, DX
- XORQ DX, DI
- IMULQ BX, DI
- MOVQ DI, DX
- SHRQ $0x2f, DX
- XORQ DI, DX
- IMULQ BX, DX
- MOVQ DX, ret+24(FP)
- RET
-
-check0:
- TESTQ AX, AX
- JZ empty
- MOVBQZX (CX), DX
- MOVQ AX, BX
- SHRQ $0x01, BX
- ADDQ CX, BX
- MOVBQZX (BX), BP
- MOVQ AX, BX
- SUBQ $0x01, BX
- ADDQ CX, BX
- MOVBQZX (BX), BX
- SHLQ $0x08, BP
- ADDQ BP, DX
- SHLQ $0x02, BX
- ADDQ BX, AX
- MOVQ $0xc3a5c85c97cb3127, BX
- IMULQ BX, AX
- MOVQ $0x9ae16a3b2f90404f, BX
- IMULQ BX, DX
- XORQ DX, AX
- MOVQ AX, DX
- SHRQ $0x2f, DX
- XORQ AX, DX
- IMULQ BX, DX
- MOVQ DX, ret+24(FP)
- RET
-
-empty:
- MOVQ $0x9ae16a3b2f90404f, DX
- MOVQ DX, ret+24(FP)
- RET
-
-check32:
- CMPQ AX, $0x20
- JG check64
- MOVQ AX, DX
- SHLQ $0x01, DX
- MOVQ $0x9ae16a3b2f90404f, BX
- ADDQ BX, DX
- MOVQ (CX), BP
- MOVQ $0xb492b66fbe98f273, SI
- IMULQ SI, BP
- MOVQ 8(CX), SI
- MOVQ AX, DI
- SUBQ $0x10, DI
- ADDQ CX, DI
- MOVQ 8(DI), R12
- IMULQ DX, R12
- MOVQ (DI), DI
- IMULQ BX, DI
- MOVQ BP, R13
- ADDQ SI, R13
- RORQ $0x2b, R13
- ADDQ DI, R13
- MOVQ R12, DI
- RORQ $0x1e, DI
- ADDQ DI, R13
- ADDQ R12, BP
- ADDQ BX, SI
- RORQ $0x12, SI
- ADDQ SI, BP
- XORQ BP, R13
- IMULQ DX, R13
- MOVQ R13, BX
- SHRQ $0x2f, BX
- XORQ R13, BX
- XORQ BX, BP
- IMULQ DX, BP
- MOVQ BP, BX
- SHRQ $0x2f, BX
- XORQ BP, BX
- IMULQ DX, BX
- MOVQ BX, ret+24(FP)
- RET
-
-check64:
- CMPQ AX, $0x40
- JG long
- MOVQ AX, DX
- SHLQ $0x01, DX
- MOVQ $0x9ae16a3b2f90404f, BX
- ADDQ BX, DX
- MOVQ (CX), BP
- IMULQ BX, BP
- MOVQ 8(CX), SI
- MOVQ AX, DI
- SUBQ $0x10, DI
- ADDQ CX, DI
- MOVQ 8(DI), R12
- IMULQ DX, R12
- MOVQ (DI), DI
- IMULQ BX, DI
- MOVQ BP, R13
- ADDQ SI, R13
- RORQ $0x2b, R13
- ADDQ DI, R13
- MOVQ R12, DI
- RORQ $0x1e, DI
- ADDQ DI, R13
- ADDQ BP, R12
- ADDQ BX, SI
- RORQ $0x12, SI
- ADDQ SI, R12
- MOVQ R13, BX
- XORQ R12, BX
- IMULQ DX, BX
- MOVQ BX, SI
- SHRQ $0x2f, SI
- XORQ BX, SI
- XORQ SI, R12
- IMULQ DX, R12
- MOVQ R12, BX
- SHRQ $0x2f, BX
- XORQ R12, BX
- IMULQ DX, BX
- MOVQ 16(CX), SI
- IMULQ DX, SI
- MOVQ 24(CX), DI
- MOVQ AX, R12
- SUBQ $0x20, R12
- ADDQ CX, R12
- MOVQ (R12), R14
- ADDQ R13, R14
- IMULQ DX, R14
- MOVQ 8(R12), R12
- ADDQ BX, R12
- IMULQ DX, R12
- MOVQ SI, BX
- ADDQ DI, BX
- RORQ $0x2b, BX
- ADDQ R12, BX
- MOVQ R14, R12
- RORQ $0x1e, R12
- ADDQ R12, BX
- ADDQ R14, SI
- ADDQ BP, DI
- RORQ $0x12, DI
- ADDQ DI, SI
- XORQ SI, BX
- IMULQ DX, BX
- MOVQ BX, BP
- SHRQ $0x2f, BP
- XORQ BX, BP
- XORQ BP, SI
- IMULQ DX, SI
- MOVQ SI, BX
- SHRQ $0x2f, BX
- XORQ SI, BX
- IMULQ DX, BX
- MOVQ BX, ret+24(FP)
- RET
-
-long:
- XORQ R8, R8
- XORQ R9, R9
- XORQ R10, R10
- XORQ R11, R11
- MOVQ $0x01529cba0ca458ff, DX
- ADDQ (CX), DX
- MOVQ $0x226bb95b4e64b6d4, BX
- MOVQ $0x134a747f856d0526, BP
- MOVQ AX, SI
- SUBQ $0x01, SI
- MOVQ $0xffffffffffffffc0, DI
- ANDQ DI, SI
- MOVQ AX, DI
- SUBQ $0x01, DI
- ANDQ $0x3f, DI
- SUBQ $0x3f, DI
- ADDQ SI, DI
- MOVQ DI, SI
- ADDQ CX, SI
- MOVQ AX, DI
-
-loop:
- MOVQ $0xb492b66fbe98f273, R12
- ADDQ BX, DX
- ADDQ R8, DX
- ADDQ 8(CX), DX
- RORQ $0x25, DX
- IMULQ R12, DX
- ADDQ R9, BX
- ADDQ 48(CX), BX
- RORQ $0x2a, BX
- IMULQ R12, BX
- XORQ R11, DX
- ADDQ R8, BX
- ADDQ 40(CX), BX
- ADDQ R10, BP
- RORQ $0x21, BP
- IMULQ R12, BP
- IMULQ R12, R9
- MOVQ DX, R8
- ADDQ R10, R8
- ADDQ (CX), R9
- ADDQ R9, R8
- ADDQ 24(CX), R8
- RORQ $0x15, R8
- MOVQ R9, R10
- ADDQ 8(CX), R9
- ADDQ 16(CX), R9
- MOVQ R9, R13
- RORQ $0x2c, R13
- ADDQ R13, R8
- ADDQ 24(CX), R9
- ADDQ R10, R8
- XCHGQ R9, R8
- ADDQ BP, R11
- MOVQ BX, R10
- ADDQ 16(CX), R10
- ADDQ 32(CX), R11
- ADDQ R11, R10
- ADDQ 56(CX), R10
- RORQ $0x15, R10
- MOVQ R11, R13
- ADDQ 40(CX), R11
- ADDQ 48(CX), R11
- MOVQ R11, R14
- RORQ $0x2c, R14
- ADDQ R14, R10
- ADDQ 56(CX), R11
- ADDQ R13, R10
- XCHGQ R11, R10
- XCHGQ BP, DX
- ADDQ $0x40, CX
- SUBQ $0x40, DI
- CMPQ DI, $0x40
- JG loop
- MOVQ SI, CX
- MOVQ BP, DI
- ANDQ $0xff, DI
- SHLQ $0x01, DI
- ADDQ R12, DI
- MOVQ SI, CX
- SUBQ $0x01, AX
- ANDQ $0x3f, AX
- ADDQ AX, R10
- ADDQ R10, R8
- ADDQ R8, R10
- ADDQ BX, DX
- ADDQ R8, DX
- ADDQ 8(CX), DX
- RORQ $0x25, DX
- IMULQ DI, DX
- ADDQ R9, BX
- ADDQ 48(CX), BX
- RORQ $0x2a, BX
- IMULQ DI, BX
- MOVQ $0x00000009, AX
- IMULQ R11, AX
- XORQ AX, DX
- MOVQ $0x00000009, AX
- IMULQ R8, AX
- ADDQ AX, BX
- ADDQ 40(CX), BX
- ADDQ R10, BP
- RORQ $0x21, BP
- IMULQ DI, BP
- IMULQ DI, R9
- MOVQ DX, R8
- ADDQ R10, R8
- ADDQ (CX), R9
- ADDQ R9, R8
- ADDQ 24(CX), R8
- RORQ $0x15, R8
- MOVQ R9, AX
- ADDQ 8(CX), R9
- ADDQ 16(CX), R9
- MOVQ R9, SI
- RORQ $0x2c, SI
- ADDQ SI, R8
- ADDQ 24(CX), R9
- ADDQ AX, R8
- XCHGQ R9, R8
- ADDQ BP, R11
- MOVQ BX, R10
- ADDQ 16(CX), R10
- ADDQ 32(CX), R11
- ADDQ R11, R10
- ADDQ 56(CX), R10
- RORQ $0x15, R10
- MOVQ R11, AX
- ADDQ 40(CX), R11
- ADDQ 48(CX), R11
- MOVQ R11, SI
- RORQ $0x2c, SI
- ADDQ SI, R10
- ADDQ 56(CX), R11
- ADDQ AX, R10
- XCHGQ R11, R10
- XCHGQ BP, DX
- XORQ R10, R8
- IMULQ DI, R8
- MOVQ R8, AX
- SHRQ $0x2f, AX
- XORQ R8, AX
- XORQ AX, R10
- IMULQ DI, R10
- MOVQ R10, AX
- SHRQ $0x2f, AX
- XORQ R10, AX
- IMULQ DI, AX
- ADDQ BP, AX
- MOVQ BX, CX
- SHRQ $0x2f, CX
- XORQ BX, CX
- MOVQ $0xc3a5c85c97cb3127, BX
- IMULQ BX, CX
- ADDQ CX, AX
- XORQ R11, R9
- IMULQ DI, R9
- MOVQ R9, CX
- SHRQ $0x2f, CX
- XORQ R9, CX
- XORQ CX, R11
- IMULQ DI, R11
- MOVQ R11, CX
- SHRQ $0x2f, CX
- XORQ R11, CX
- IMULQ DI, CX
- ADDQ DX, CX
- XORQ CX, AX
- IMULQ DI, AX
- MOVQ AX, DX
- SHRQ $0x2f, DX
- XORQ AX, DX
- XORQ DX, CX
- IMULQ DI, CX
- MOVQ CX, AX
- SHRQ $0x2f, AX
- XORQ CX, AX
- IMULQ DI, AX
- MOVQ AX, ret+24(FP)
- RET
-
-// func Fingerprint32(s []byte) uint32
-TEXT ·Fingerprint32(SB), NOSPLIT, $0-28
- MOVQ s_base+0(FP), AX
- MOVQ s_len+8(FP), CX
- CMPQ CX, $0x18
- JG long
- CMPQ CX, $0x0c
- JG hash_13_24
- CMPQ CX, $0x04
- JG hash_5_12
- XORL DX, DX
- MOVL $0x00000009, BX
- TESTQ CX, CX
- JZ done
- MOVQ CX, BP
- MOVL $0xcc9e2d51, DI
- IMULL DI, DX
- MOVBLSX (AX), SI
- ADDL SI, DX
- XORL DX, BX
- SUBQ $0x01, BP
- TESTQ BP, BP
- JZ done
- IMULL DI, DX
- MOVBLSX 1(AX), SI
- ADDL SI, DX
- XORL DX, BX
- SUBQ $0x01, BP
- TESTQ BP, BP
- JZ done
- IMULL DI, DX
- MOVBLSX 2(AX), SI
- ADDL SI, DX
- XORL DX, BX
- SUBQ $0x01, BP
- TESTQ BP, BP
- JZ done
- IMULL DI, DX
- MOVBLSX 3(AX), SI
- ADDL SI, DX
- XORL DX, BX
- SUBQ $0x01, BP
- TESTQ BP, BP
- JZ done
-
-done:
- MOVL CX, BP
- MOVL $0xcc9e2d51, SI
- IMULL SI, BP
- RORL $0x11, BP
- MOVL $0x1b873593, SI
- IMULL SI, BP
- XORL BP, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), BP
- LEAL 3864292196(BP), BX
- MOVL $0xcc9e2d51, BP
- IMULL BP, DX
- RORL $0x11, DX
- MOVL $0x1b873593, BP
- IMULL BP, DX
- XORL DX, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), DX
- LEAL 3864292196(DX), BX
- MOVL BX, DX
- SHRL $0x10, DX
- XORL DX, BX
- MOVL $0x85ebca6b, DX
- IMULL DX, BX
- MOVL BX, DX
- SHRL $0x0d, DX
- XORL DX, BX
- MOVL $0xc2b2ae35, DX
- IMULL DX, BX
- MOVL BX, DX
- SHRL $0x10, DX
- XORL DX, BX
- MOVL BX, ret+24(FP)
- RET
-
-hash_5_12:
- MOVL CX, DX
- MOVL DX, BX
- SHLL $0x02, BX
- ADDL DX, BX
- MOVL $0x00000009, BP
- MOVL BX, SI
- ADDL (AX), DX
- MOVQ CX, DI
- SUBQ $0x04, DI
- ADDQ AX, DI
- ADDL (DI), BX
- MOVQ CX, DI
- SHRQ $0x01, DI
- ANDQ $0x04, DI
- ADDQ AX, DI
- ADDL (DI), BP
- MOVL $0xcc9e2d51, DI
- IMULL DI, DX
- RORL $0x11, DX
- MOVL $0x1b873593, DI
- IMULL DI, DX
- XORL DX, SI
- RORL $0x13, SI
- LEAL (SI)(SI*4), DX
- LEAL 3864292196(DX), SI
- MOVL $0xcc9e2d51, DX
- IMULL DX, BX
- RORL $0x11, BX
- MOVL $0x1b873593, DX
- IMULL DX, BX
- XORL BX, SI
- RORL $0x13, SI
- LEAL (SI)(SI*4), BX
- LEAL 3864292196(BX), SI
- MOVL $0xcc9e2d51, DX
- IMULL DX, BP
- RORL $0x11, BP
- MOVL $0x1b873593, DX
- IMULL DX, BP
- XORL BP, SI
- RORL $0x13, SI
- LEAL (SI)(SI*4), BP
- LEAL 3864292196(BP), SI
- MOVL SI, DX
- SHRL $0x10, DX
- XORL DX, SI
- MOVL $0x85ebca6b, DX
- IMULL DX, SI
- MOVL SI, DX
- SHRL $0x0d, DX
- XORL DX, SI
- MOVL $0xc2b2ae35, DX
- IMULL DX, SI
- MOVL SI, DX
- SHRL $0x10, DX
- XORL DX, SI
- MOVL SI, ret+24(FP)
- RET
-
-hash_13_24:
- MOVQ CX, DX
- SHRQ $0x01, DX
- ADDQ AX, DX
- MOVL -4(DX), BX
- MOVL 4(AX), BP
- MOVQ CX, SI
- ADDQ AX, SI
- MOVL -8(SI), DI
- MOVL (DX), DX
- MOVL (AX), R8
- MOVL -4(SI), SI
- MOVL $0xcc9e2d51, R9
- IMULL DX, R9
- ADDL CX, R9
- RORL $0x0c, BX
- ADDL SI, BX
- MOVL DI, R10
- MOVL $0xcc9e2d51, R11
- IMULL R11, R10
- RORL $0x11, R10
- MOVL $0x1b873593, R11
- IMULL R11, R10
- XORL R10, R9
- RORL $0x13, R9
- LEAL (R9)(R9*4), R10
- LEAL 3864292196(R10), R9
- ADDL BX, R9
- RORL $0x03, BX
- ADDL DI, BX
- MOVL $0xcc9e2d51, DI
- IMULL DI, R8
- RORL $0x11, R8
- MOVL $0x1b873593, DI
- IMULL DI, R8
- XORL R8, R9
- RORL $0x13, R9
- LEAL (R9)(R9*4), R8
- LEAL 3864292196(R8), R9
- ADDL BX, R9
- ADDL SI, BX
- RORL $0x0c, BX
- ADDL DX, BX
- MOVL $0xcc9e2d51, DX
- IMULL DX, BP
- RORL $0x11, BP
- MOVL $0x1b873593, DX
- IMULL DX, BP
- XORL BP, R9
- RORL $0x13, R9
- LEAL (R9)(R9*4), BP
- LEAL 3864292196(BP), R9
- ADDL BX, R9
- MOVL R9, DX
- SHRL $0x10, DX
- XORL DX, R9
- MOVL $0x85ebca6b, DX
- IMULL DX, R9
- MOVL R9, DX
- SHRL $0x0d, DX
- XORL DX, R9
- MOVL $0xc2b2ae35, DX
- IMULL DX, R9
- MOVL R9, DX
- SHRL $0x10, DX
- XORL DX, R9
- MOVL R9, ret+24(FP)
- RET
-
-long:
- MOVL CX, DX
- MOVL $0xcc9e2d51, BX
- IMULL DX, BX
- MOVL BX, BP
- MOVQ CX, SI
- ADDQ AX, SI
- MOVL $0xcc9e2d51, DI
- MOVL $0x1b873593, R8
- MOVL -4(SI), R9
- IMULL DI, R9
- RORL $0x11, R9
- IMULL R8, R9
- XORL R9, DX
- RORL $0x13, DX
- MOVL DX, R9
- SHLL $0x02, R9
- ADDL R9, DX
- ADDL $0xe6546b64, DX
- MOVL -8(SI), R9
- IMULL DI, R9
- RORL $0x11, R9
- IMULL R8, R9
- XORL R9, BX
- RORL $0x13, BX
- MOVL BX, R9
- SHLL $0x02, R9
- ADDL R9, BX
- ADDL $0xe6546b64, BX
- MOVL -16(SI), R9
- IMULL DI, R9
- RORL $0x11, R9
- IMULL R8, R9
- XORL R9, DX
- RORL $0x13, DX
- MOVL DX, R9
- SHLL $0x02, R9
- ADDL R9, DX
- ADDL $0xe6546b64, DX
- MOVL -12(SI), R9
- IMULL DI, R9
- RORL $0x11, R9
- IMULL R8, R9
- XORL R9, BX
- RORL $0x13, BX
- MOVL BX, R9
- SHLL $0x02, R9
- ADDL R9, BX
- ADDL $0xe6546b64, BX
- PREFETCHT0 (AX)
- MOVL -20(SI), SI
- IMULL DI, SI
- RORL $0x11, SI
- IMULL R8, SI
- ADDL SI, BP
- RORL $0x13, BP
- ADDL $0x71, BP
-
-loop80:
- CMPQ CX, $0x64
- JL loop20
- PREFETCHT0 20(AX)
- MOVL (AX), SI
- ADDL SI, DX
- MOVL 4(AX), DI
- ADDL DI, BX
- MOVL 8(AX), R8
- ADDL R8, BP
- MOVL 12(AX), R9
- MOVL R9, R11
- MOVL $0xcc9e2d51, R10
- IMULL R10, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R10
- IMULL R10, R11
- XORL R11, DX
- RORL $0x13, DX
- LEAL (DX)(DX*4), R11
- LEAL 3864292196(R11), DX
- MOVL 16(AX), R10
- ADDL R10, DX
- MOVL R8, R11
- MOVL $0xcc9e2d51, R8
- IMULL R8, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R8
- IMULL R8, R11
- XORL R11, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), R11
- LEAL 3864292196(R11), BX
- ADDL SI, BX
- MOVL $0xcc9e2d51, SI
- IMULL SI, R10
- MOVL R10, R11
- ADDL DI, R11
- MOVL $0xcc9e2d51, SI
- IMULL SI, R11
- RORL $0x11, R11
- MOVL $0x1b873593, SI
- IMULL SI, R11
- XORL R11, BP
- RORL $0x13, BP
- LEAL (BP)(BP*4), R11
- LEAL 3864292196(R11), BP
- ADDL R9, BP
- ADDL BX, BP
- ADDL BP, BX
- PREFETCHT0 40(AX)
- MOVL 20(AX), SI
- ADDL SI, DX
- MOVL 24(AX), DI
- ADDL DI, BX
- MOVL 28(AX), R8
- ADDL R8, BP
- MOVL 32(AX), R9
- MOVL R9, R11
- MOVL $0xcc9e2d51, R10
- IMULL R10, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R10
- IMULL R10, R11
- XORL R11, DX
- RORL $0x13, DX
- LEAL (DX)(DX*4), R11
- LEAL 3864292196(R11), DX
- MOVL 36(AX), R10
- ADDL R10, DX
- MOVL R8, R11
- MOVL $0xcc9e2d51, R8
- IMULL R8, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R8
- IMULL R8, R11
- XORL R11, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), R11
- LEAL 3864292196(R11), BX
- ADDL SI, BX
- MOVL $0xcc9e2d51, SI
- IMULL SI, R10
- MOVL R10, R11
- ADDL DI, R11
- MOVL $0xcc9e2d51, SI
- IMULL SI, R11
- RORL $0x11, R11
- MOVL $0x1b873593, SI
- IMULL SI, R11
- XORL R11, BP
- RORL $0x13, BP
- LEAL (BP)(BP*4), R11
- LEAL 3864292196(R11), BP
- ADDL R9, BP
- ADDL BX, BP
- ADDL BP, BX
- PREFETCHT0 60(AX)
- MOVL 40(AX), SI
- ADDL SI, DX
- MOVL 44(AX), DI
- ADDL DI, BX
- MOVL 48(AX), R8
- ADDL R8, BP
- MOVL 52(AX), R9
- MOVL R9, R11
- MOVL $0xcc9e2d51, R10
- IMULL R10, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R10
- IMULL R10, R11
- XORL R11, DX
- RORL $0x13, DX
- LEAL (DX)(DX*4), R11
- LEAL 3864292196(R11), DX
- MOVL 56(AX), R10
- ADDL R10, DX
- MOVL R8, R11
- MOVL $0xcc9e2d51, R8
- IMULL R8, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R8
- IMULL R8, R11
- XORL R11, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), R11
- LEAL 3864292196(R11), BX
- ADDL SI, BX
- MOVL $0xcc9e2d51, SI
- IMULL SI, R10
- MOVL R10, R11
- ADDL DI, R11
- MOVL $0xcc9e2d51, SI
- IMULL SI, R11
- RORL $0x11, R11
- MOVL $0x1b873593, SI
- IMULL SI, R11
- XORL R11, BP
- RORL $0x13, BP
- LEAL (BP)(BP*4), R11
- LEAL 3864292196(R11), BP
- ADDL R9, BP
- ADDL BX, BP
- ADDL BP, BX
- PREFETCHT0 80(AX)
- MOVL 60(AX), SI
- ADDL SI, DX
- MOVL 64(AX), DI
- ADDL DI, BX
- MOVL 68(AX), R8
- ADDL R8, BP
- MOVL 72(AX), R9
- MOVL R9, R11
- MOVL $0xcc9e2d51, R10
- IMULL R10, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R10
- IMULL R10, R11
- XORL R11, DX
- RORL $0x13, DX
- LEAL (DX)(DX*4), R11
- LEAL 3864292196(R11), DX
- MOVL 76(AX), R10
- ADDL R10, DX
- MOVL R8, R11
- MOVL $0xcc9e2d51, R8
- IMULL R8, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R8
- IMULL R8, R11
- XORL R11, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), R11
- LEAL 3864292196(R11), BX
- ADDL SI, BX
- MOVL $0xcc9e2d51, SI
- IMULL SI, R10
- MOVL R10, R11
- ADDL DI, R11
- MOVL $0xcc9e2d51, SI
- IMULL SI, R11
- RORL $0x11, R11
- MOVL $0x1b873593, SI
- IMULL SI, R11
- XORL R11, BP
- RORL $0x13, BP
- LEAL (BP)(BP*4), R11
- LEAL 3864292196(R11), BP
- ADDL R9, BP
- ADDL BX, BP
- ADDL BP, BX
- ADDQ $0x50, AX
- SUBQ $0x50, CX
- JMP loop80
-
-loop20:
- CMPQ CX, $0x14
- JLE after
- MOVL (AX), SI
- ADDL SI, DX
- MOVL 4(AX), DI
- ADDL DI, BX
- MOVL 8(AX), R8
- ADDL R8, BP
- MOVL 12(AX), R9
- MOVL R9, R11
- MOVL $0xcc9e2d51, R10
- IMULL R10, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R10
- IMULL R10, R11
- XORL R11, DX
- RORL $0x13, DX
- LEAL (DX)(DX*4), R11
- LEAL 3864292196(R11), DX
- MOVL 16(AX), R10
- ADDL R10, DX
- MOVL R8, R11
- MOVL $0xcc9e2d51, R8
- IMULL R8, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R8
- IMULL R8, R11
- XORL R11, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), R11
- LEAL 3864292196(R11), BX
- ADDL SI, BX
- MOVL $0xcc9e2d51, SI
- IMULL SI, R10
- MOVL R10, R11
- ADDL DI, R11
- MOVL $0xcc9e2d51, SI
- IMULL SI, R11
- RORL $0x11, R11
- MOVL $0x1b873593, SI
- IMULL SI, R11
- XORL R11, BP
- RORL $0x13, BP
- LEAL (BP)(BP*4), R11
- LEAL 3864292196(R11), BP
- ADDL R9, BP
- ADDL BX, BP
- ADDL BP, BX
- ADDQ $0x14, AX
- SUBQ $0x14, CX
- JMP loop20
-
-after:
- MOVL $0xcc9e2d51, AX
- RORL $0x0b, BX
- IMULL AX, BX
- RORL $0x11, BX
- IMULL AX, BX
- RORL $0x0b, BP
- IMULL AX, BP
- RORL $0x11, BP
- IMULL AX, BP
- ADDL BX, DX
- RORL $0x13, DX
- MOVL DX, CX
- SHLL $0x02, CX
- ADDL CX, DX
- ADDL $0xe6546b64, DX
- RORL $0x11, DX
- IMULL AX, DX
- ADDL BP, DX
- RORL $0x13, DX
- MOVL DX, CX
- SHLL $0x02, CX
- ADDL CX, DX
- ADDL $0xe6546b64, DX
- RORL $0x11, DX
- IMULL AX, DX
- MOVL DX, ret+24(FP)
- RET
diff --git a/vendor/github.com/dgryski/go-farm/fp_generic.go b/vendor/github.com/dgryski/go-farm/fp_generic.go
deleted file mode 100644
index 2cfa1b9d..00000000
--- a/vendor/github.com/dgryski/go-farm/fp_generic.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build !amd64 purego
-
-package farm
-
-// Fingerprint64 is a 64-bit fingerprint function for byte-slices
-func Fingerprint64(s []byte) uint64 {
- return naHash64(s)
-}
-
-// Fingerprint32 is a 32-bit fingerprint function for byte-slices
-func Fingerprint32(s []byte) uint32 {
- return Hash32(s)
-}
diff --git a/vendor/github.com/dgryski/go-farm/fp_stub.go b/vendor/github.com/dgryski/go-farm/fp_stub.go
deleted file mode 100644
index 94fff8de..00000000
--- a/vendor/github.com/dgryski/go-farm/fp_stub.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by command: go run asm.go -out=fp_amd64.s -stubs=fp_stub.go. DO NOT EDIT.
-
-// +build amd64,!purego
-
-package farm
-
-func Fingerprint64(s []byte) uint64
-
-func Fingerprint32(s []byte) uint32
diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml
deleted file mode 100644
index ba95cdd1..00000000
--- a/vendor/github.com/dustin/go-humanize/.travis.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-sudo: false
-language: go
-go:
- - 1.3.x
- - 1.5.x
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
- - master
-matrix:
- allow_failures:
- - go: master
- fast_finish: true
-install:
- - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
-script:
- - go get -t -v ./...
- - diff -u <(echo -n) <(gofmt -d -s .)
- - go tool vet .
- - go test -v -race ./...
diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE
deleted file mode 100644
index 8d9a94a9..00000000
--- a/vendor/github.com/dustin/go-humanize/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-Copyright (c) 2005-2008 Dustin Sallings
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown
deleted file mode 100644
index 7d0b16b3..00000000
--- a/vendor/github.com/dustin/go-humanize/README.markdown
+++ /dev/null
@@ -1,124 +0,0 @@
-# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
-
-Just a few functions for helping humanize times and sizes.
-
-`go get` it as `github.com/dustin/go-humanize`, import it as
-`"github.com/dustin/go-humanize"`, use it as `humanize`.
-
-See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for
-complete documentation.
-
-## Sizes
-
-This lets you take numbers like `82854982` and convert them to useful
-strings like, `83 MB` or `79 MiB` (whichever you prefer).
-
-Example:
-
-```go
-fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
-```
-
-## Times
-
-This lets you take a `time.Time` and spit it out in relative terms.
-For example, `12 seconds ago` or `3 days from now`.
-
-Example:
-
-```go
-fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
-```
-
-Thanks to Kyle Lemons for the time implementation from an IRC
-conversation one day. It's pretty neat.
-
-## Ordinals
-
-From a [mailing list discussion][odisc] where a user wanted to be able
-to label ordinals.
-
- 0 -> 0th
- 1 -> 1st
- 2 -> 2nd
- 3 -> 3rd
- 4 -> 4th
- [...]
-
-Example:
-
-```go
-fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
-```
-
-## Commas
-
-Want to shove commas into numbers? Be my guest.
-
- 0 -> 0
- 100 -> 100
- 1000 -> 1,000
- 1000000000 -> 1,000,000,000
- -100000 -> -100,000
-
-Example:
-
-```go
-fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
-```
-
-## Ftoa
-
-Nicer float64 formatter that removes trailing zeros.
-
-```go
-fmt.Printf("%f", 2.24) // 2.240000
-fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
-fmt.Printf("%f", 2.0) // 2.000000
-fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
-```
-
-## SI notation
-
-Format numbers with [SI notation][sinotation].
-
-Example:
-
-```go
-humanize.SI(0.00000000223, "M") // 2.23 nM
-```
-
-## English-specific functions
-
-The following functions are in the `humanize/english` subpackage.
-
-### Plurals
-
-Simple English pluralization
-
-```go
-english.PluralWord(1, "object", "") // object
-english.PluralWord(42, "object", "") // objects
-english.PluralWord(2, "bus", "") // buses
-english.PluralWord(99, "locus", "loci") // loci
-
-english.Plural(1, "object", "") // 1 object
-english.Plural(42, "object", "") // 42 objects
-english.Plural(2, "bus", "") // 2 buses
-english.Plural(99, "locus", "loci") // 99 loci
-```
-
-### Word series
-
-Format comma-separated words lists with conjuctions:
-
-```go
-english.WordSeries([]string{"foo"}, "and") // foo
-english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
-english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
-
-english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
-```
-
-[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
-[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go
deleted file mode 100644
index f49dc337..00000000
--- a/vendor/github.com/dustin/go-humanize/big.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package humanize
-
-import (
- "math/big"
-)
-
-// order of magnitude (to a max order)
-func oomm(n, b *big.Int, maxmag int) (float64, int) {
- mag := 0
- m := &big.Int{}
- for n.Cmp(b) >= 0 {
- n.DivMod(n, b, m)
- mag++
- if mag == maxmag && maxmag >= 0 {
- break
- }
- }
- return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
-}
-
-// total order of magnitude
-// (same as above, but with no upper limit)
-func oom(n, b *big.Int) (float64, int) {
- mag := 0
- m := &big.Int{}
- for n.Cmp(b) >= 0 {
- n.DivMod(n, b, m)
- mag++
- }
- return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
-}
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go
deleted file mode 100644
index 1a2bf617..00000000
--- a/vendor/github.com/dustin/go-humanize/bigbytes.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package humanize
-
-import (
- "fmt"
- "math/big"
- "strings"
- "unicode"
-)
-
-var (
- bigIECExp = big.NewInt(1024)
-
- // BigByte is one byte in bit.Ints
- BigByte = big.NewInt(1)
- // BigKiByte is 1,024 bytes in bit.Ints
- BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
- // BigMiByte is 1,024 k bytes in bit.Ints
- BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
- // BigGiByte is 1,024 m bytes in bit.Ints
- BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
- // BigTiByte is 1,024 g bytes in bit.Ints
- BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
- // BigPiByte is 1,024 t bytes in bit.Ints
- BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
- // BigEiByte is 1,024 p bytes in bit.Ints
- BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
- // BigZiByte is 1,024 e bytes in bit.Ints
- BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
- // BigYiByte is 1,024 z bytes in bit.Ints
- BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
-)
-
-var (
- bigSIExp = big.NewInt(1000)
-
- // BigSIByte is one SI byte in big.Ints
- BigSIByte = big.NewInt(1)
- // BigKByte is 1,000 SI bytes in big.Ints
- BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
- // BigMByte is 1,000 SI k bytes in big.Ints
- BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
- // BigGByte is 1,000 SI m bytes in big.Ints
- BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
- // BigTByte is 1,000 SI g bytes in big.Ints
- BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
- // BigPByte is 1,000 SI t bytes in big.Ints
- BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
- // BigEByte is 1,000 SI p bytes in big.Ints
- BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
- // BigZByte is 1,000 SI e bytes in big.Ints
- BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
- // BigYByte is 1,000 SI z bytes in big.Ints
- BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
-)
-
-var bigBytesSizeTable = map[string]*big.Int{
- "b": BigByte,
- "kib": BigKiByte,
- "kb": BigKByte,
- "mib": BigMiByte,
- "mb": BigMByte,
- "gib": BigGiByte,
- "gb": BigGByte,
- "tib": BigTiByte,
- "tb": BigTByte,
- "pib": BigPiByte,
- "pb": BigPByte,
- "eib": BigEiByte,
- "eb": BigEByte,
- "zib": BigZiByte,
- "zb": BigZByte,
- "yib": BigYiByte,
- "yb": BigYByte,
- // Without suffix
- "": BigByte,
- "ki": BigKiByte,
- "k": BigKByte,
- "mi": BigMiByte,
- "m": BigMByte,
- "gi": BigGiByte,
- "g": BigGByte,
- "ti": BigTiByte,
- "t": BigTByte,
- "pi": BigPiByte,
- "p": BigPByte,
- "ei": BigEiByte,
- "e": BigEByte,
- "z": BigZByte,
- "zi": BigZiByte,
- "y": BigYByte,
- "yi": BigYiByte,
-}
-
-var ten = big.NewInt(10)
-
-func humanateBigBytes(s, base *big.Int, sizes []string) string {
- if s.Cmp(ten) < 0 {
- return fmt.Sprintf("%d B", s)
- }
- c := (&big.Int{}).Set(s)
- val, mag := oomm(c, base, len(sizes)-1)
- suffix := sizes[mag]
- f := "%.0f %s"
- if val < 10 {
- f = "%.1f %s"
- }
-
- return fmt.Sprintf(f, val, suffix)
-
-}
-
-// BigBytes produces a human readable representation of an SI size.
-//
-// See also: ParseBigBytes.
-//
-// BigBytes(82854982) -> 83 MB
-func BigBytes(s *big.Int) string {
- sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
- return humanateBigBytes(s, bigSIExp, sizes)
-}
-
-// BigIBytes produces a human readable representation of an IEC size.
-//
-// See also: ParseBigBytes.
-//
-// BigIBytes(82854982) -> 79 MiB
-func BigIBytes(s *big.Int) string {
- sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
- return humanateBigBytes(s, bigIECExp, sizes)
-}
-
-// ParseBigBytes parses a string representation of bytes into the number
-// of bytes it represents.
-//
-// See also: BigBytes, BigIBytes.
-//
-// ParseBigBytes("42 MB") -> 42000000, nil
-// ParseBigBytes("42 mib") -> 44040192, nil
-func ParseBigBytes(s string) (*big.Int, error) {
- lastDigit := 0
- hasComma := false
- for _, r := range s {
- if !(unicode.IsDigit(r) || r == '.' || r == ',') {
- break
- }
- if r == ',' {
- hasComma = true
- }
- lastDigit++
- }
-
- num := s[:lastDigit]
- if hasComma {
- num = strings.Replace(num, ",", "", -1)
- }
-
- val := &big.Rat{}
- _, err := fmt.Sscanf(num, "%f", val)
- if err != nil {
- return nil, err
- }
-
- extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
- if m, ok := bigBytesSizeTable[extra]; ok {
- mv := (&big.Rat{}).SetInt(m)
- val.Mul(val, mv)
- rv := &big.Int{}
- rv.Div(val.Num(), val.Denom())
- return rv, nil
- }
-
- return nil, fmt.Errorf("unhandled size name: %v", extra)
-}
diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go
deleted file mode 100644
index 0b498f48..00000000
--- a/vendor/github.com/dustin/go-humanize/bytes.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package humanize
-
-import (
- "fmt"
- "math"
- "strconv"
- "strings"
- "unicode"
-)
-
-// IEC Sizes.
-// kibis of bits
-const (
- Byte = 1 << (iota * 10)
- KiByte
- MiByte
- GiByte
- TiByte
- PiByte
- EiByte
-)
-
-// SI Sizes.
-const (
- IByte = 1
- KByte = IByte * 1000
- MByte = KByte * 1000
- GByte = MByte * 1000
- TByte = GByte * 1000
- PByte = TByte * 1000
- EByte = PByte * 1000
-)
-
-var bytesSizeTable = map[string]uint64{
- "b": Byte,
- "kib": KiByte,
- "kb": KByte,
- "mib": MiByte,
- "mb": MByte,
- "gib": GiByte,
- "gb": GByte,
- "tib": TiByte,
- "tb": TByte,
- "pib": PiByte,
- "pb": PByte,
- "eib": EiByte,
- "eb": EByte,
- // Without suffix
- "": Byte,
- "ki": KiByte,
- "k": KByte,
- "mi": MiByte,
- "m": MByte,
- "gi": GiByte,
- "g": GByte,
- "ti": TiByte,
- "t": TByte,
- "pi": PiByte,
- "p": PByte,
- "ei": EiByte,
- "e": EByte,
-}
-
-func logn(n, b float64) float64 {
- return math.Log(n) / math.Log(b)
-}
-
-func humanateBytes(s uint64, base float64, sizes []string) string {
- if s < 10 {
- return fmt.Sprintf("%d B", s)
- }
- e := math.Floor(logn(float64(s), base))
- suffix := sizes[int(e)]
- val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
- f := "%.0f %s"
- if val < 10 {
- f = "%.1f %s"
- }
-
- return fmt.Sprintf(f, val, suffix)
-}
-
-// Bytes produces a human readable representation of an SI size.
-//
-// See also: ParseBytes.
-//
-// Bytes(82854982) -> 83 MB
-func Bytes(s uint64) string {
- sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
- return humanateBytes(s, 1000, sizes)
-}
-
-// IBytes produces a human readable representation of an IEC size.
-//
-// See also: ParseBytes.
-//
-// IBytes(82854982) -> 79 MiB
-func IBytes(s uint64) string {
- sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
- return humanateBytes(s, 1024, sizes)
-}
-
-// ParseBytes parses a string representation of bytes into the number
-// of bytes it represents.
-//
-// See Also: Bytes, IBytes.
-//
-// ParseBytes("42 MB") -> 42000000, nil
-// ParseBytes("42 mib") -> 44040192, nil
-func ParseBytes(s string) (uint64, error) {
- lastDigit := 0
- hasComma := false
- for _, r := range s {
- if !(unicode.IsDigit(r) || r == '.' || r == ',') {
- break
- }
- if r == ',' {
- hasComma = true
- }
- lastDigit++
- }
-
- num := s[:lastDigit]
- if hasComma {
- num = strings.Replace(num, ",", "", -1)
- }
-
- f, err := strconv.ParseFloat(num, 64)
- if err != nil {
- return 0, err
- }
-
- extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
- if m, ok := bytesSizeTable[extra]; ok {
- f *= float64(m)
- if f >= math.MaxUint64 {
- return 0, fmt.Errorf("too large: %v", s)
- }
- return uint64(f), nil
- }
-
- return 0, fmt.Errorf("unhandled size name: %v", extra)
-}
diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go
deleted file mode 100644
index 520ae3e5..00000000
--- a/vendor/github.com/dustin/go-humanize/comma.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package humanize
-
-import (
- "bytes"
- "math"
- "math/big"
- "strconv"
- "strings"
-)
-
-// Comma produces a string form of the given number in base 10 with
-// commas after every three orders of magnitude.
-//
-// e.g. Comma(834142) -> 834,142
-func Comma(v int64) string {
- sign := ""
-
- // Min int64 can't be negated to a usable value, so it has to be special cased.
- if v == math.MinInt64 {
- return "-9,223,372,036,854,775,808"
- }
-
- if v < 0 {
- sign = "-"
- v = 0 - v
- }
-
- parts := []string{"", "", "", "", "", "", ""}
- j := len(parts) - 1
-
- for v > 999 {
- parts[j] = strconv.FormatInt(v%1000, 10)
- switch len(parts[j]) {
- case 2:
- parts[j] = "0" + parts[j]
- case 1:
- parts[j] = "00" + parts[j]
- }
- v = v / 1000
- j--
- }
- parts[j] = strconv.Itoa(int(v))
- return sign + strings.Join(parts[j:], ",")
-}
-
-// Commaf produces a string form of the given number in base 10 with
-// commas after every three orders of magnitude.
-//
-// e.g. Commaf(834142.32) -> 834,142.32
-func Commaf(v float64) string {
- buf := &bytes.Buffer{}
- if v < 0 {
- buf.Write([]byte{'-'})
- v = 0 - v
- }
-
- comma := []byte{','}
-
- parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
- pos := 0
- if len(parts[0])%3 != 0 {
- pos += len(parts[0]) % 3
- buf.WriteString(parts[0][:pos])
- buf.Write(comma)
- }
- for ; pos < len(parts[0]); pos += 3 {
- buf.WriteString(parts[0][pos : pos+3])
- buf.Write(comma)
- }
- buf.Truncate(buf.Len() - 1)
-
- if len(parts) > 1 {
- buf.Write([]byte{'.'})
- buf.WriteString(parts[1])
- }
- return buf.String()
-}
-
-// CommafWithDigits works like the Commaf but limits the resulting
-// string to the given number of decimal places.
-//
-// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
-func CommafWithDigits(f float64, decimals int) string {
- return stripTrailingDigits(Commaf(f), decimals)
-}
-
-// BigComma produces a string form of the given big.Int in base 10
-// with commas after every three orders of magnitude.
-func BigComma(b *big.Int) string {
- sign := ""
- if b.Sign() < 0 {
- sign = "-"
- b.Abs(b)
- }
-
- athousand := big.NewInt(1000)
- c := (&big.Int{}).Set(b)
- _, m := oom(c, athousand)
- parts := make([]string, m+1)
- j := len(parts) - 1
-
- mod := &big.Int{}
- for b.Cmp(athousand) >= 0 {
- b.DivMod(b, athousand, mod)
- parts[j] = strconv.FormatInt(mod.Int64(), 10)
- switch len(parts[j]) {
- case 2:
- parts[j] = "0" + parts[j]
- case 1:
- parts[j] = "00" + parts[j]
- }
- j--
- }
- parts[j] = strconv.Itoa(int(b.Int64()))
- return sign + strings.Join(parts[j:], ",")
-}
diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go
deleted file mode 100644
index 620690de..00000000
--- a/vendor/github.com/dustin/go-humanize/commaf.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// +build go1.6
-
-package humanize
-
-import (
- "bytes"
- "math/big"
- "strings"
-)
-
-// BigCommaf produces a string form of the given big.Float in base 10
-// with commas after every three orders of magnitude.
-func BigCommaf(v *big.Float) string {
- buf := &bytes.Buffer{}
- if v.Sign() < 0 {
- buf.Write([]byte{'-'})
- v.Abs(v)
- }
-
- comma := []byte{','}
-
- parts := strings.Split(v.Text('f', -1), ".")
- pos := 0
- if len(parts[0])%3 != 0 {
- pos += len(parts[0]) % 3
- buf.WriteString(parts[0][:pos])
- buf.Write(comma)
- }
- for ; pos < len(parts[0]); pos += 3 {
- buf.WriteString(parts[0][pos : pos+3])
- buf.Write(comma)
- }
- buf.Truncate(buf.Len() - 1)
-
- if len(parts) > 1 {
- buf.Write([]byte{'.'})
- buf.WriteString(parts[1])
- }
- return buf.String()
-}
diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go
deleted file mode 100644
index 1c62b640..00000000
--- a/vendor/github.com/dustin/go-humanize/ftoa.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package humanize
-
-import (
- "strconv"
- "strings"
-)
-
-func stripTrailingZeros(s string) string {
- offset := len(s) - 1
- for offset > 0 {
- if s[offset] == '.' {
- offset--
- break
- }
- if s[offset] != '0' {
- break
- }
- offset--
- }
- return s[:offset+1]
-}
-
-func stripTrailingDigits(s string, digits int) string {
- if i := strings.Index(s, "."); i >= 0 {
- if digits <= 0 {
- return s[:i]
- }
- i++
- if i+digits >= len(s) {
- return s
- }
- return s[:i+digits]
- }
- return s
-}
-
-// Ftoa converts a float to a string with no trailing zeros.
-func Ftoa(num float64) string {
- return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
-}
-
-// FtoaWithDigits converts a float to a string but limits the resulting string
-// to the given number of decimal places, and no trailing zeros.
-func FtoaWithDigits(num float64, digits int) string {
- return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
-}
diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go
deleted file mode 100644
index a2c2da31..00000000
--- a/vendor/github.com/dustin/go-humanize/humanize.go
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
-Package humanize converts boring ugly numbers to human-friendly strings and back.
-
-Durations can be turned into strings such as "3 days ago", numbers
-representing sizes like 82854982 into useful strings like, "83 MB" or
-"79 MiB" (whichever you prefer).
-*/
-package humanize
diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go
deleted file mode 100644
index dec61865..00000000
--- a/vendor/github.com/dustin/go-humanize/number.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package humanize
-
-/*
-Slightly adapted from the source to fit go-humanize.
-
-Author: https://github.com/gorhill
-Source: https://gist.github.com/gorhill/5285193
-
-*/
-
-import (
- "math"
- "strconv"
-)
-
-var (
- renderFloatPrecisionMultipliers = [...]float64{
- 1,
- 10,
- 100,
- 1000,
- 10000,
- 100000,
- 1000000,
- 10000000,
- 100000000,
- 1000000000,
- }
-
- renderFloatPrecisionRounders = [...]float64{
- 0.5,
- 0.05,
- 0.005,
- 0.0005,
- 0.00005,
- 0.000005,
- 0.0000005,
- 0.00000005,
- 0.000000005,
- 0.0000000005,
- }
-)
-
-// FormatFloat produces a formatted number as string based on the following user-specified criteria:
-// * thousands separator
-// * decimal separator
-// * decimal precision
-//
-// Usage: s := RenderFloat(format, n)
-// The format parameter tells how to render the number n.
-//
-// See examples: http://play.golang.org/p/LXc1Ddm1lJ
-//
-// Examples of format strings, given n = 12345.6789:
-// "#,###.##" => "12,345.67"
-// "#,###." => "12,345"
-// "#,###" => "12345,678"
-// "#\u202F###,##" => "12 345,68"
-// "#.###,###### => 12.345,678900
-// "" (aka default format) => 12,345.67
-//
-// The highest precision allowed is 9 digits after the decimal symbol.
-// There is also a version for integer number, FormatInteger(),
-// which is convenient for calls within template.
-func FormatFloat(format string, n float64) string {
- // Special cases:
- // NaN = "NaN"
- // +Inf = "+Infinity"
- // -Inf = "-Infinity"
- if math.IsNaN(n) {
- return "NaN"
- }
- if n > math.MaxFloat64 {
- return "Infinity"
- }
- if n < -math.MaxFloat64 {
- return "-Infinity"
- }
-
- // default format
- precision := 2
- decimalStr := "."
- thousandStr := ","
- positiveStr := ""
- negativeStr := "-"
-
- if len(format) > 0 {
- format := []rune(format)
-
- // If there is an explicit format directive,
- // then default values are these:
- precision = 9
- thousandStr = ""
-
- // collect indices of meaningful formatting directives
- formatIndx := []int{}
- for i, char := range format {
- if char != '#' && char != '0' {
- formatIndx = append(formatIndx, i)
- }
- }
-
- if len(formatIndx) > 0 {
- // Directive at index 0:
- // Must be a '+'
- // Raise an error if not the case
- // index: 0123456789
- // +0.000,000
- // +000,000.0
- // +0000.00
- // +0000
- if formatIndx[0] == 0 {
- if format[formatIndx[0]] != '+' {
- panic("RenderFloat(): invalid positive sign directive")
- }
- positiveStr = "+"
- formatIndx = formatIndx[1:]
- }
-
- // Two directives:
- // First is thousands separator
- // Raise an error if not followed by 3-digit
- // 0123456789
- // 0.000,000
- // 000,000.00
- if len(formatIndx) == 2 {
- if (formatIndx[1] - formatIndx[0]) != 4 {
- panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
- }
- thousandStr = string(format[formatIndx[0]])
- formatIndx = formatIndx[1:]
- }
-
- // One directive:
- // Directive is decimal separator
- // The number of digit-specifier following the separator indicates wanted precision
- // 0123456789
- // 0.00
- // 000,0000
- if len(formatIndx) == 1 {
- decimalStr = string(format[formatIndx[0]])
- precision = len(format) - formatIndx[0] - 1
- }
- }
- }
-
- // generate sign part
- var signStr string
- if n >= 0.000000001 {
- signStr = positiveStr
- } else if n <= -0.000000001 {
- signStr = negativeStr
- n = -n
- } else {
- signStr = ""
- n = 0.0
- }
-
- // split number into integer and fractional parts
- intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
-
- // generate integer part string
- intStr := strconv.FormatInt(int64(intf), 10)
-
- // add thousand separator if required
- if len(thousandStr) > 0 {
- for i := len(intStr); i > 3; {
- i -= 3
- intStr = intStr[:i] + thousandStr + intStr[i:]
- }
- }
-
- // no fractional part, we can leave now
- if precision == 0 {
- return signStr + intStr
- }
-
- // generate fractional part
- fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
- // may need padding
- if len(fracStr) < precision {
- fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
- }
-
- return signStr + intStr + decimalStr + fracStr
-}
-
-// FormatInteger produces a formatted number as string.
-// See FormatFloat.
-func FormatInteger(format string, n int) string {
- return FormatFloat(format, float64(n))
-}
diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go
deleted file mode 100644
index 43d88a86..00000000
--- a/vendor/github.com/dustin/go-humanize/ordinals.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package humanize
-
-import "strconv"
-
-// Ordinal gives you the input number in a rank/ordinal format.
-//
-// Ordinal(3) -> 3rd
-func Ordinal(x int) string {
- suffix := "th"
- switch x % 10 {
- case 1:
- if x%100 != 11 {
- suffix = "st"
- }
- case 2:
- if x%100 != 12 {
- suffix = "nd"
- }
- case 3:
- if x%100 != 13 {
- suffix = "rd"
- }
- }
- return strconv.Itoa(x) + suffix
-}
diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go
deleted file mode 100644
index ae659e0e..00000000
--- a/vendor/github.com/dustin/go-humanize/si.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package humanize
-
-import (
- "errors"
- "math"
- "regexp"
- "strconv"
-)
-
-var siPrefixTable = map[float64]string{
- -24: "y", // yocto
- -21: "z", // zepto
- -18: "a", // atto
- -15: "f", // femto
- -12: "p", // pico
- -9: "n", // nano
- -6: "µ", // micro
- -3: "m", // milli
- 0: "",
- 3: "k", // kilo
- 6: "M", // mega
- 9: "G", // giga
- 12: "T", // tera
- 15: "P", // peta
- 18: "E", // exa
- 21: "Z", // zetta
- 24: "Y", // yotta
-}
-
-var revSIPrefixTable = revfmap(siPrefixTable)
-
-// revfmap reverses the map and precomputes the power multiplier
-func revfmap(in map[float64]string) map[string]float64 {
- rv := map[string]float64{}
- for k, v := range in {
- rv[v] = math.Pow(10, k)
- }
- return rv
-}
-
-var riParseRegex *regexp.Regexp
-
-func init() {
- ri := `^([\-0-9.]+)\s?([`
- for _, v := range siPrefixTable {
- ri += v
- }
- ri += `]?)(.*)`
-
- riParseRegex = regexp.MustCompile(ri)
-}
-
-// ComputeSI finds the most appropriate SI prefix for the given number
-// and returns the prefix along with the value adjusted to be within
-// that prefix.
-//
-// See also: SI, ParseSI.
-//
-// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
-func ComputeSI(input float64) (float64, string) {
- if input == 0 {
- return 0, ""
- }
- mag := math.Abs(input)
- exponent := math.Floor(logn(mag, 10))
- exponent = math.Floor(exponent/3) * 3
-
- value := mag / math.Pow(10, exponent)
-
- // Handle special case where value is exactly 1000.0
- // Should return 1 M instead of 1000 k
- if value == 1000.0 {
- exponent += 3
- value = mag / math.Pow(10, exponent)
- }
-
- value = math.Copysign(value, input)
-
- prefix := siPrefixTable[exponent]
- return value, prefix
-}
-
-// SI returns a string with default formatting.
-//
-// SI uses Ftoa to format float value, removing trailing zeros.
-//
-// See also: ComputeSI, ParseSI.
-//
-// e.g. SI(1000000, "B") -> 1 MB
-// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
-func SI(input float64, unit string) string {
- value, prefix := ComputeSI(input)
- return Ftoa(value) + " " + prefix + unit
-}
-
-// SIWithDigits works like SI but limits the resulting string to the
-// given number of decimal places.
-//
-// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
-// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
-func SIWithDigits(input float64, decimals int, unit string) string {
- value, prefix := ComputeSI(input)
- return FtoaWithDigits(value, decimals) + " " + prefix + unit
-}
-
-var errInvalid = errors.New("invalid input")
-
-// ParseSI parses an SI string back into the number and unit.
-//
-// See also: SI, ComputeSI.
-//
-// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
-func ParseSI(input string) (float64, string, error) {
- found := riParseRegex.FindStringSubmatch(input)
- if len(found) != 4 {
- return 0, "", errInvalid
- }
- mag := revSIPrefixTable[found[2]]
- unit := found[3]
-
- base, err := strconv.ParseFloat(found[1], 64)
- return base * mag, unit, err
-}
diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go
deleted file mode 100644
index dd3fbf5e..00000000
--- a/vendor/github.com/dustin/go-humanize/times.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package humanize
-
-import (
- "fmt"
- "math"
- "sort"
- "time"
-)
-
-// Seconds-based time units
-const (
- Day = 24 * time.Hour
- Week = 7 * Day
- Month = 30 * Day
- Year = 12 * Month
- LongTime = 37 * Year
-)
-
-// Time formats a time into a relative string.
-//
-// Time(someT) -> "3 weeks ago"
-func Time(then time.Time) string {
- return RelTime(then, time.Now(), "ago", "from now")
-}
-
-// A RelTimeMagnitude struct contains a relative time point at which
-// the relative format of time will switch to a new format string. A
-// slice of these in ascending order by their "D" field is passed to
-// CustomRelTime to format durations.
-//
-// The Format field is a string that may contain a "%s" which will be
-// replaced with the appropriate signed label (e.g. "ago" or "from
-// now") and a "%d" that will be replaced by the quantity.
-//
-// The DivBy field is the amount of time the time difference must be
-// divided by in order to display correctly.
-//
-// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
-// DivBy should be time.Minute so whatever the duration is will be
-// expressed in minutes.
-type RelTimeMagnitude struct {
- D time.Duration
- Format string
- DivBy time.Duration
-}
-
-var defaultMagnitudes = []RelTimeMagnitude{
- {time.Second, "now", time.Second},
- {2 * time.Second, "1 second %s", 1},
- {time.Minute, "%d seconds %s", time.Second},
- {2 * time.Minute, "1 minute %s", 1},
- {time.Hour, "%d minutes %s", time.Minute},
- {2 * time.Hour, "1 hour %s", 1},
- {Day, "%d hours %s", time.Hour},
- {2 * Day, "1 day %s", 1},
- {Week, "%d days %s", Day},
- {2 * Week, "1 week %s", 1},
- {Month, "%d weeks %s", Week},
- {2 * Month, "1 month %s", 1},
- {Year, "%d months %s", Month},
- {18 * Month, "1 year %s", 1},
- {2 * Year, "2 years %s", 1},
- {LongTime, "%d years %s", Year},
- {math.MaxInt64, "a long while %s", 1},
-}
-
-// RelTime formats a time into a relative string.
-//
-// It takes two times and two labels. In addition to the generic time
-// delta string (e.g. 5 minutes), the labels are used applied so that
-// the label corresponding to the smaller time is applied.
-//
-// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
-func RelTime(a, b time.Time, albl, blbl string) string {
- return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
-}
-
-// CustomRelTime formats a time into a relative string.
-//
-// It takes two times two labels and a table of relative time formats.
-// In addition to the generic time delta string (e.g. 5 minutes), the
-// labels are used applied so that the label corresponding to the
-// smaller time is applied.
-func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
- lbl := albl
- diff := b.Sub(a)
-
- if a.After(b) {
- lbl = blbl
- diff = a.Sub(b)
- }
-
- n := sort.Search(len(magnitudes), func(i int) bool {
- return magnitudes[i].D > diff
- })
-
- if n >= len(magnitudes) {
- n = len(magnitudes) - 1
- }
- mag := magnitudes[n]
- args := []interface{}{}
- escaped := false
- for _, ch := range mag.Format {
- if escaped {
- switch ch {
- case 's':
- args = append(args, lbl)
- case 'd':
- args = append(args, diff/mag.DivBy)
- }
- escaped = false
- } else {
- escaped = ch == '%'
- }
- }
- return fmt.Sprintf(mag.Format, args...)
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig
deleted file mode 100644
index fad89585..00000000
--- a/vendor/github.com/fsnotify/fsnotify/.editorconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-root = true
-
-[*.go]
-indent_style = tab
-indent_size = 4
-insert_final_newline = true
-
-[*.{yml,yaml}]
-indent_style = space
-indent_size = 2
-insert_final_newline = true
-trim_trailing_whitespace = true
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes
deleted file mode 100644
index 32f1001b..00000000
--- a/vendor/github.com/fsnotify/fsnotify/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-go.sum linguist-generated
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore
deleted file mode 100644
index 4cd0cbaf..00000000
--- a/vendor/github.com/fsnotify/fsnotify/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-# Setup a Global .gitignore for OS and editor generated files:
-# https://help.github.com/articles/ignoring-files
-# git config --global core.excludesfile ~/.gitignore_global
-
-.vagrant
-*.sublime-project
diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap
deleted file mode 100644
index a04f2907..00000000
--- a/vendor/github.com/fsnotify/fsnotify/.mailmap
+++ /dev/null
@@ -1,2 +0,0 @@
-Chris Howey
-Nathan Youngman <4566+nathany@users.noreply.github.com>
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
deleted file mode 100644
index 6cbabe5e..00000000
--- a/vendor/github.com/fsnotify/fsnotify/AUTHORS
+++ /dev/null
@@ -1,62 +0,0 @@
-# Names should be added to this file as
-# Name or Organization
-# The email address is not required for organizations.
-
-# You can update this list using the following command:
-#
-# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS
-
-# Please keep the list sorted.
-
-Aaron L
-Adrien Bustany
-Alexey Kazakov
-Amit Krishnan
-Anmol Sethi
-Bjørn Erik Pedersen
-Brian Goff
-Bruno Bigras
-Caleb Spare
-Case Nelson
-Chris Howey
-Christoffer Buchholz
-Daniel Wagner-Hall
-Dave Cheney
-Eric Lin
-Evan Phoenix
-Francisco Souza
-Gautam Dey
-Hari haran
-Ichinose Shogo
-Johannes Ebke
-John C Barstow
-Kelvin Fo
-Ken-ichirou MATSUZAWA
-Matt Layher
-Matthias Stone
-Nathan Youngman
-Nickolai Zeldovich
-Oliver Bristow
-Patrick
-Paul Hammond
-Pawel Knap
-Pieter Droogendijk
-Pratik Shinde