diff --git a/Makefile b/Makefile index 914f1256..5b560d9b 100644 --- a/Makefile +++ b/Makefile @@ -28,9 +28,7 @@ install: -ldflags="-w" ./$${x}; done lint: -# https://github.com/dominikh/go-tools/issues/1433 -# go install honnef.co/go/tools/cmd/staticcheck@latest - go install honnef.co/go/tools/cmd/staticcheck@master + go install honnef.co/go/tools/cmd/staticcheck@latest staticcheck -version # staticcheck defaults are all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022 staticcheck -checks all ./... diff --git a/build/deploy/hermes/docker-compose.yml b/build/deploy/hermes/docker-compose.yml index 149e6a12..10bfbd84 100644 --- a/build/deploy/hermes/docker-compose.yml +++ b/build/deploy/hermes/docker-compose.yml @@ -1,7 +1,7 @@ version: "3" services: hermes-api: - image: ghcr.io/ownmfa/hermes:aafe23c5 + image: ghcr.io/ownmfa/hermes:c8374a87 command: hermes-api restart: on-failure depends_on: @@ -35,7 +35,7 @@ services: - "traefik.http.services.hermes-grpc.loadbalancer.server.scheme=h2c" hermes-notifier: - image: ghcr.io/ownmfa/hermes:aafe23c5 + image: ghcr.io/ownmfa/hermes:c8374a87 command: hermes-notifier restart: on-failure environment: diff --git a/cmd/hermes-api/main.go b/cmd/hermes-api/main.go index a864baf7..6b933076 100644 --- a/cmd/hermes-api/main.go +++ b/cmd/hermes-api/main.go @@ -11,7 +11,7 @@ import ( func main() { cfg := config.New() - hlog.SetDefault(hlog.NewJSON().WithLevel(cfg.LogLevel).WithField("service", + hlog.SetDefault(hlog.NewJSON(cfg.LogLevel).WithField("service", api.ServiceName)) metric.SetStatsD(cfg.StatsDAddr, api.ServiceName) diff --git a/cmd/hermes-notifier/main.go b/cmd/hermes-notifier/main.go index b3462856..8077212e 100644 --- a/cmd/hermes-notifier/main.go +++ b/cmd/hermes-notifier/main.go @@ -11,7 +11,7 @@ import ( func main() { cfg := config.New() - hlog.SetDefault(hlog.NewJSON().WithLevel(cfg.LogLevel).WithField("service", + hlog.SetDefault(hlog.NewJSON(cfg.LogLevel).WithField("service", notifier.ServiceName)) metric.SetStatsD(cfg.StatsDAddr, notifier.ServiceName) diff --git a/go.mod b/go.mod index f651166e..b4e5ac48 100644 --- a/go.mod +++ b/go.mod @@ -4,16 +4,15 @@ go 1.21.0 require ( github.com/NYTimes/gziphandler v1.1.1 - github.com/google/uuid v1.3.0 - github.com/gregdel/pushover v1.2.0 + github.com/google/uuid v1.3.1 + github.com/gregdel/pushover v1.2.1 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 github.com/jackc/pgx/v5 v5.4.3 github.com/jellydator/ttlcache/v3 v3.0.1 github.com/mennanov/fmutils v0.2.0 github.com/nsqio/go-nsq v1.1.0 github.com/ownmfa/api/go v1.0.27 - github.com/redis/go-redis/v9 v9.0.5 - github.com/rs/zerolog v1.30.0 + github.com/redis/go-redis/v9 v9.1.0 github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/smira/go-statsd v1.3.2 github.com/stretchr/testify v1.8.4 @@ -33,16 +32,14 @@ require ( github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/kr/text v0.2.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect golang.org/x/net v0.14.0 // indirect golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.11.0 // indirect golang.org/x/text v0.12.0 // indirect - google.golang.org/genproto v0.0.0-20230814215434-ca7cfce7776a // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230814215434-ca7cfce7776a // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230814215434-ca7cfce7776a // indirect + google.golang.org/genproto v0.0.0-20230821184602-ccc8af3d0e93 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230821184602-ccc8af3d0e93 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230821184602-ccc8af3d0e93 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index f9769ef9..0d9417ec 100644 --- a/go.sum +++ b/go.sum @@ -2,15 +2,14 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao= -github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= +github.com/bsm/ginkgo/v2 v2.9.5 h1:rtVBYPs3+TC5iLUVOis1B9tjLTup7Cj5IfzosKtvTJ0= +github.com/bsm/ginkgo/v2 v2.9.5/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -21,7 +20,6 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= @@ -47,10 +45,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gregdel/pushover v1.2.0 h1:SLnpvJijUyEZvkJNyrldGhFhryYgQYlThSLpB5Oqt5k= -github.com/gregdel/pushover v1.2.0/go.mod h1:EcaO66Nn1StkpEm1iKtBTV3d2A16SoMsVER1PthX7to= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gregdel/pushover v1.2.1 h1:IPPJCdzXz60gMqnlzS0ZAW5z5aS1gI4nU+YM0Pe+ssA= +github.com/gregdel/pushover v1.2.1/go.mod h1:EcaO66Nn1StkpEm1iKtBTV3d2A16SoMsVER1PthX7to= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 h1:dygLcbEBA+t/P7ck6a8AkXv6juQ4cK0RHBoh32jxhHM= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2/go.mod h1:Ap9RLCIJVtgQg1/BBgVEfypOAySvvlcpcVQkSzJCH4Y= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -65,30 +63,19 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mennanov/fmutils v0.2.0 h1:Hw/iuQPdKtiB2B9YYh+NX8iv7U7eQu1rICPjr8NvxSo= github.com/mennanov/fmutils v0.2.0/go.mod h1:DE+qeI9Xy5s1GA4trgq8H26jr5DgJ4a9+0D1DPVCqyk= github.com/nsqio/go-nsq v1.1.0 h1:PQg+xxiUjA7V+TLdXw7nVrJ5Jbl3sN86EhGCQj4+FYE= github.com/nsqio/go-nsq v1.1.0/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/ownmfa/api/go v1.0.27 h1:fOruRrzqnAJezhSSxpZjB8wd0djA+ii98x+eI6fI6GE= github.com/ownmfa/api/go v1.0.27/go.mod h1:99PmUKxcZdiNGTHyJRlDkennbI7OWYiuRny6u9zbpkI= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/redis/go-redis/v9 v9.0.5 h1:CuQcn5HIEeK7BgElubPP8CGtE0KakrnbBSTLjathl5o= -github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= +github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= +github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= -github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= github.com/smira/go-statsd v1.3.2 h1:1EeuzxNZ/TD9apbTOFSM9nulqfcsQFmT4u1A2DREabI= @@ -125,10 +112,6 @@ golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -146,12 +129,12 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20230814215434-ca7cfce7776a h1:+pp9kgkk8qyqFeQyXoDBFI4U+YYB15cmRkQ3pi6v/h4= -google.golang.org/genproto v0.0.0-20230814215434-ca7cfce7776a/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20230814215434-ca7cfce7776a h1:ICnet6R+XBkBfhCeApORkkEZbXfPUIHmqOC+5MdMtaQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230814215434-ca7cfce7776a/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230814215434-ca7cfce7776a h1:5rTPHLf5eLPfqGvw3fLpEmUpko2Ky91ft14LxGs5BZc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230814215434-ca7cfce7776a/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto v0.0.0-20230821184602-ccc8af3d0e93 h1:zv6ieVm8jNcN33At1+APsRISkRgynuWUxUhv6G123jY= +google.golang.org/genproto v0.0.0-20230821184602-ccc8af3d0e93/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto/googleapis/api v0.0.0-20230821184602-ccc8af3d0e93 h1:v4jF/cZj3rPjpsS7a+TEb8RofwMmPnfn+QiPkDyduDA= +google.golang.org/genproto/googleapis/api v0.0.0-20230821184602-ccc8af3d0e93/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230821184602-ccc8af3d0e93 h1:OpqhBqmDJdwQbcZeN2G+wAjNq6xnmZHrmS+//OOUxT8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230821184602-ccc8af3d0e93/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= diff --git a/pkg/hlog/default.go b/pkg/hlog/default.go index d4607b78..3424c3d8 100644 --- a/pkg/hlog/default.go +++ b/pkg/hlog/default.go @@ -2,39 +2,36 @@ package hlog import ( "fmt" - "sync" + "sync/atomic" ) -// Since logger is global and may be replaced, locking is required. -var ( - logger Logger - loggerMu sync.Mutex -) +// logger is an atomic value of type Logger. +var logger atomic.Value -// NewConsole returns a new Logger with console formatting at the debug level. -func NewConsole() Logger { - return newZlogConsole() +// NewConsole returns a new Logger with console formatting at the specified +// level. +func NewConsole(level string) Logger { + return newStlogConsole(level) } -// NewJSON returns a new Logger with JSON formatting at the debug level. -func NewJSON() Logger { - return newZlogJSON() +// NewJSON returns a new Logger with JSON formatting at the specified level. +func NewJSON(level string) Logger { + return newStlogJSON(level) } // Default returns the default logger, which is thread-safe. func Default() Logger { - loggerMu.Lock() - defer loggerMu.Unlock() + l, ok := logger.Load().(Logger) + if !ok { + panic("Default logger.Load: false") + } - return logger + return l } // SetDefault sets a new default logger. func SetDefault(l Logger) { - loggerMu.Lock() - defer loggerMu.Unlock() - - logger = l + logger.Store(l) } // WithField returns a derived Logger from the default Logger with a string @@ -44,42 +41,42 @@ func WithField(key, val string) Logger { } // Debug logs a new message with debug level. -func Debug(v ...interface{}) { +func Debug(v ...any) { Default().Debug(fmt.Sprint(v...)) } // Debugf logs a new formatted message with debug level. -func Debugf(format string, v ...interface{}) { +func Debugf(format string, v ...any) { Default().Debugf(format, v...) } // Info logs a new message with info level. -func Info(v ...interface{}) { +func Info(v ...any) { Default().Info(fmt.Sprint(v...)) } // Infof logs a new formatted message with info level. -func Infof(format string, v ...interface{}) { +func Infof(format string, v ...any) { Default().Infof(format, v...) } // Error logs a new message with error level. -func Error(v ...interface{}) { +func Error(v ...any) { Default().Error(fmt.Sprint(v...)) } // Errorf logs a new formatted message with error level. -func Errorf(format string, v ...interface{}) { +func Errorf(format string, v ...any) { Default().Errorf(format, v...) } // Fatal logs a new message with fatal level followed by a call to os.Exit(1). -func Fatal(v ...interface{}) { +func Fatal(v ...any) { Default().Fatal(fmt.Sprint(v...)) } // Fatalf logs a new formatted message with fatal level followed by a call to // os.Exit(1). -func Fatalf(format string, v ...interface{}) { +func Fatalf(format string, v ...any) { Default().Fatalf(format, v...) } diff --git a/pkg/hlog/default_test.go b/pkg/hlog/default_test.go index d89e670e..4467ff66 100644 --- a/pkg/hlog/default_test.go +++ b/pkg/hlog/default_test.go @@ -4,6 +4,7 @@ package hlog import ( "fmt" + "strconv" "testing" "github.com/ownmfa/hermes/pkg/test/random" @@ -31,7 +32,7 @@ func TestDefault(t *testing.T) { } func TestDefaultConsole(t *testing.T) { - SetDefault(NewConsole()) + SetDefault(NewConsole("DEBUG")) for i := 0; i < 5; i++ { lTest := i @@ -51,7 +52,7 @@ func TestDefaultConsole(t *testing.T) { } func TestDefaultJSON(t *testing.T) { - SetDefault(NewJSON()) + SetDefault(NewJSON("DEBUG")) for i := 0; i < 5; i++ { lTest := i @@ -73,21 +74,21 @@ func TestDefaultJSON(t *testing.T) { func TestDefaultWithField(t *testing.T) { t.Parallel() - logger := WithField(random.String(10), random.String(10)) - t.Logf("logger: %#v", logger) - for i := 0; i < 5; i++ { lTest := i t.Run(fmt.Sprintf("Can log %v with string", lTest), func(t *testing.T) { t.Parallel() - logger.Debug("Debug") - logger.Debugf("Debugf: %v", lTest) - logger.Info("Info") - logger.Infof("Infof: %v", lTest) - logger.Error("Error") - logger.Errorf("Errorf: %v", lTest) + logField := WithField(strconv.Itoa(lTest), random.String(10)) + t.Logf("logField: %#v", logField) + + logField.Debug("Debug") + logField.Debugf("Debugf: %v", lTest) + logField.Info("Info") + logField.Infof("Infof: %v", lTest) + logField.Error("Error") + logField.Errorf("Errorf: %v", lTest) // Do not test Fatal* due to os.Exit. }) } diff --git a/pkg/hlog/init.go b/pkg/hlog/init.go index b9e2eafe..659cb3b5 100644 --- a/pkg/hlog/init.go +++ b/pkg/hlog/init.go @@ -1,11 +1,5 @@ package hlog -import ( - "github.com/rs/zerolog" -) - func init() { - zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs - - SetDefault(NewConsole()) + SetDefault(NewConsole("DEBUG")) } diff --git a/pkg/hlog/logger.go b/pkg/hlog/logger.go index b7667c52..6dbca944 100644 --- a/pkg/hlog/logger.go +++ b/pkg/hlog/logger.go @@ -5,27 +5,25 @@ package hlog // Logger defines the methods provided by a Log. type Logger interface { - // WithLevel returns a derived Logger with the level set to level. - WithLevel(level string) Logger // WithField returns a derived Logger with a string field. WithField(key, val string) Logger // Debug logs a new message with debug level. - Debug(v ...interface{}) + Debug(v ...any) // Debugf logs a new formatted message with debug level. - Debugf(format string, v ...interface{}) + Debugf(format string, v ...any) // Info logs a new message with info level. - Info(v ...interface{}) + Info(v ...any) // Infof logs a new formatted message with info level. - Infof(format string, v ...interface{}) + Infof(format string, v ...any) // Error logs a new message with error level. - Error(v ...interface{}) + Error(v ...any) // Errorf logs a new formatted message with error level. - Errorf(format string, v ...interface{}) + Errorf(format string, v ...any) // Fatal logs a new message with fatal level followed by a call to // os.Exit(1). - Fatal(v ...interface{}) + Fatal(v ...any) // Fatalf logs a new formatted message with fatal level followed by a call // to os.Exit(1). - Fatalf(format string, v ...interface{}) + Fatalf(format string, v ...any) } diff --git a/pkg/hlog/slog.go b/pkg/hlog/slog.go new file mode 100644 index 00000000..b0d5754e --- /dev/null +++ b/pkg/hlog/slog.go @@ -0,0 +1,122 @@ +package hlog + +import ( + "context" + "fmt" + "log/slog" + "os" + "strings" +) + +// levels maps strings to log levels. +var levels = map[string]slog.Level{ + "DEBUG": slog.LevelDebug, + "INFO": slog.LevelInfo, + "ERROR": slog.LevelError, + "FATAL": slog.LevelError + 4, +} + +// stlog contains methods to write logs using slog and implements the Logger +// interface. +type stlog struct { + sl *slog.Logger +} + +// Verify stlog implements Logger. +var _ Logger = &stlog{} + +// parseLevel parses a string into a log level. +func parseLevel(level string) slog.Level { + slevel, ok := levels[strings.ToUpper(level)] + if !ok { + slog.LogAttrs(context.Background(), slog.LevelError, + fmt.Sprintf("parseLevel unknown level, using INFO: %s", level)) + slevel = slog.LevelInfo + } + + return slevel +} + +// newStlogConsole returns a new Logger with console formatting at the specified +// level. +func newStlogConsole(level string) Logger { + l := new(slog.LevelVar) + l.Set(slog.LevelDebug) + + s := &stlog{ + sl: slog.New(slog.NewTextHandler(os.Stderr, + &slog.HandlerOptions{Level: l})), + } + + l.Set(parseLevel(level)) + + return s +} + +// newStlogJSON returns a new Logger with JSON formatting at the specified +// level. +func newStlogJSON(level string) Logger { + l := new(slog.LevelVar) + l.Set(slog.LevelDebug) + + s := &stlog{ + sl: slog.New(slog.NewJSONHandler(os.Stderr, + &slog.HandlerOptions{Level: l})), + } + + l.Set(parseLevel(level)) + + return s +} + +// WithField returns a derived Logger with a string field. +func (s *stlog) WithField(key, val string) Logger { + return &stlog{sl: s.sl.With(key, val)} +} + +// Debug logs a new message with debug level. +func (s *stlog) Debug(v ...any) { + s.sl.LogAttrs(context.Background(), slog.LevelDebug, fmt.Sprint(v...)) +} + +// Debugf logs a new formatted message with debug level. +func (s *stlog) Debugf(format string, v ...any) { + s.sl.LogAttrs(context.Background(), slog.LevelDebug, fmt.Sprintf(format, + v...)) +} + +// Info logs a new message with info level. +func (s *stlog) Info(v ...any) { + s.sl.LogAttrs(context.Background(), slog.LevelInfo, fmt.Sprint(v...)) +} + +// Infof logs a new formatted message with info level. +func (s *stlog) Infof(format string, v ...any) { + s.sl.LogAttrs(context.Background(), slog.LevelInfo, fmt.Sprintf(format, + v...)) +} + +// Error logs a new message with error level. +func (s *stlog) Error(v ...any) { + s.sl.LogAttrs(context.Background(), slog.LevelError, fmt.Sprint(v...)) +} + +// Errorf logs a new formatted message with error level. +func (s *stlog) Errorf(format string, v ...any) { + s.sl.LogAttrs(context.Background(), slog.LevelError, fmt.Sprintf(format, + v...)) +} + +// Fatal logs a new message with fatal level followed by a call to os.Exit(1). +func (s *stlog) Fatal(v ...any) { + s.sl.LogAttrs(context.Background(), slog.LevelError+4, fmt.Sprint(v...)) + os.Exit(1) +} + +// Fatalf logs a new formatted message with fatal level followed by a call to +// os.Exit(1). +func (s *stlog) Fatalf(format string, v ...any) { + s.sl.LogAttrs(context.Background(), slog.LevelError+4, fmt.Sprintf(format, + v...)) + os.Exit(1) +} diff --git a/pkg/hlog/zerolog_test.go b/pkg/hlog/slog_test.go similarity index 54% rename from pkg/hlog/zerolog_test.go rename to pkg/hlog/slog_test.go index 1e3bb0ab..f81d37ea 100644 --- a/pkg/hlog/zerolog_test.go +++ b/pkg/hlog/slog_test.go @@ -4,18 +4,15 @@ package hlog import ( "fmt" + "strconv" "testing" "github.com/ownmfa/hermes/pkg/test/random" ) -func TestZlogWithLevel(t *testing.T) { +func TestStlogLevel(t *testing.T) { t.Parallel() - log := newZlogConsole() - t.Logf("log: %#v", log) - log.Debug("Debug (default)") - tests := []string{ "DEBUG", "info", @@ -30,23 +27,24 @@ func TestZlogWithLevel(t *testing.T) { t.Run(fmt.Sprintf("Can log %v", lTest), func(t *testing.T) { t.Parallel() - logLevel := log.WithLevel(lTest) + logLevel := newStlogConsole(lTest) t.Logf("logLevel: %#v", logLevel) + logLevel.Debug("Debug") - logLevel.Debugf("Debugf: %#v", logLevel) + logLevel.Debugf("Debugf: %v and above", lTest) logLevel.Info("Info") - logLevel.Infof("Infof: %#v", logLevel) + logLevel.Infof("Infof: %v and above", lTest) logLevel.Error("Error") - logLevel.Errorf("Errorf: %#v", logLevel) + logLevel.Errorf("Errorf: %v and above", lTest) // Do not test Fatal* due to os.Exit. }) } } -func TestZlogWithField(t *testing.T) { +func TestStlogWithField(t *testing.T) { t.Parallel() - logger := newZlogJSON().WithField(random.String(10), random.String(10)) + logger := newStlogJSON("DEBUG") t.Logf("logger: %#v", logger) for i := 0; i < 5; i++ { @@ -55,12 +53,15 @@ func TestZlogWithField(t *testing.T) { t.Run(fmt.Sprintf("Can log %v with string", lTest), func(t *testing.T) { t.Parallel() - logger.Debug("Debug") - logger.Debugf("Debugf: %v", lTest) - logger.Info("Info") - logger.Infof("Infof: %v", lTest) - logger.Error("Error") - logger.Errorf("Errorf: %v", lTest) + logField := logger.WithField(strconv.Itoa(lTest), random.String(10)) + t.Logf("logField: %#v", logField) + + logField.Debug("Debug") + logField.Debugf("Debugf: %v", lTest) + logField.Info("Info") + logField.Infof("Infof: %v", lTest) + logField.Error("Error") + logField.Errorf("Errorf: %v", lTest) // Do not test Fatal* due to os.Exit. }) } diff --git a/pkg/hlog/zerolog.go b/pkg/hlog/zerolog.go deleted file mode 100644 index d42cfa1a..00000000 --- a/pkg/hlog/zerolog.go +++ /dev/null @@ -1,102 +0,0 @@ -package hlog - -import ( - "fmt" - "os" - "strings" - "time" - - "github.com/rs/zerolog" -) - -// zlog contains methods to write logs using zerolog and implements the Logger -// interface. -type zlog struct { - zl zerolog.Logger -} - -// Verify zlog implements Logger. -var _ Logger = &zlog{} - -// newZlogConsole returns a new Logger with console formatting at the debug -// level. -func newZlogConsole() Logger { - cw := zerolog.ConsoleWriter{ - Out: os.Stdout, - // VSCode does not support colors in the output channel: - // https://github.com/Microsoft/vscode/issues/571 - NoColor: true, - TimeFormat: time.RFC3339, - } - - return &zlog{ - zl: zerolog.New(cw).With().Timestamp().Logger(). - Level(zerolog.DebugLevel), - } -} - -// newZlogJSON returns a new Logger with JSON formatting at the debug level. -func newZlogJSON() Logger { - return &zlog{ - zl: zerolog.New(os.Stderr).With().Timestamp().Logger(). - Level(zerolog.DebugLevel), - } -} - -// WithLevel returns a derived Logger with the level set to level. -func (z *zlog) WithLevel(level string) Logger { - zlevel, err := zerolog.ParseLevel(strings.ToLower(level)) - if err != nil { - z.zl.Error().Msgf("SetLevel unknown level, using INFO: %v", level) - - return &zlog{zl: z.zl.Level(zerolog.InfoLevel)} - } - - return &zlog{zl: z.zl.Level(zlevel)} -} - -// WithField returns a derived Logger with a string field. -func (z *zlog) WithField(key, val string) Logger { - return &zlog{zl: z.zl.With().Str(key, val).Logger()} -} - -// Debug logs a new message with debug level. -func (z *zlog) Debug(v ...interface{}) { - z.zl.Debug().Msg(fmt.Sprint(v...)) -} - -// Debugf logs a new formatted message with debug level. -func (z *zlog) Debugf(format string, v ...interface{}) { - z.zl.Debug().Msgf(format, v...) -} - -// Info logs a new message with info level. -func (z *zlog) Info(v ...interface{}) { - z.zl.Info().Msg(fmt.Sprint(v...)) -} - -// Infof logs a new formatted message with info level. -func (z *zlog) Infof(format string, v ...interface{}) { - z.zl.Info().Msgf(format, v...) -} - -// Error logs a new message with error level. -func (z *zlog) Error(v ...interface{}) { - z.zl.Error().Msg(fmt.Sprint(v...)) -} - -// Errorf logs a new formatted message with error level. -func (z *zlog) Errorf(format string, v ...interface{}) { - z.zl.Error().Msgf(format, v...) -} - -// Fatal logs a new message with fatal level followed by a call to os.Exit(1). -func (z *zlog) Fatal(v ...interface{}) { - z.zl.Fatal().Msg(fmt.Sprint(v...)) -} - -// Fatalf logs a new formatted message with fatal level followed by a call to -// os.Exit(1). -func (z *zlog) Fatalf(format string, v ...interface{}) { - z.zl.Fatal().Msgf(format, v...) -} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60..00000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 00000000..2bd78667 --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09f..55668887 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,6 +2,22 @@ We definitely welcome patches and contribution to this project! +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as desrcibed in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46f..3e9a6188 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +# uuid The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -`go get github.com/google/uuid` +```sh +go get github.com/google/uuid +``` ###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78edc..b2a0bc87 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. +// This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207ae..a56138cc 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -69,7 +69,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { + if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -101,7 +101,8 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -117,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -145,7 +146,8 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") diff --git a/vendor/github.com/gregdel/pushover/response.go b/vendor/github.com/gregdel/pushover/response.go index 93690d12..f22cd2e7 100644 --- a/vendor/github.com/gregdel/pushover/response.go +++ b/vendor/github.com/gregdel/pushover/response.go @@ -13,7 +13,8 @@ type Response struct { // String represents a printable form of the response. func (r Response) String() string { - ret := fmt.Sprintf("Request id: %s\n", r.ID) + ret := fmt.Sprintf("Status: %d\n", r.Status) + ret += fmt.Sprintf("Request id: %s\n", r.ID) if r.Receipt != "" { ret += fmt.Sprintf("Receipt: %s\n", r.Receipt) } diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE deleted file mode 100644 index 91b5cef3..00000000 --- a/vendor/github.com/mattn/go-colorable/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md deleted file mode 100644 index ca048371..00000000 --- a/vendor/github.com/mattn/go-colorable/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# go-colorable - -[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) -[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) -[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) -[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) - -Colorable writer for windows. - -For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) -This package is possible to handle escape sequence for ansi color on windows. - -## Too Bad! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) - - -## So Good! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) - -## Usage - -```go -logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) -logrus.SetOutput(colorable.NewColorableStdout()) - -logrus.Info("succeeded") -logrus.Warn("not correct") -logrus.Error("something error") -logrus.Fatal("panic") -``` - -You can compile above code on non-windows OSs. - -## Installation - -``` -$ go get github.com/mattn/go-colorable -``` - -# License - -MIT - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go deleted file mode 100644 index 416d1bbb..00000000 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build appengine -// +build appengine - -package colorable - -import ( - "io" - "os" - - _ "github.com/mattn/go-isatty" -) - -// NewColorable returns new instance of Writer which handles escape sequence. -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - return file -} - -// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. -func NewColorableStdout() io.Writer { - return os.Stdout -} - -// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. -func NewColorableStderr() io.Writer { - return os.Stderr -} - -// EnableColorsStdout enable colors if possible. -func EnableColorsStdout(enabled *bool) func() { - if enabled != nil { - *enabled = true - } - return func() {} -} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go deleted file mode 100644 index 766d9460..00000000 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build !windows && !appengine -// +build !windows,!appengine - -package colorable - -import ( - "io" - "os" - - _ "github.com/mattn/go-isatty" -) - -// NewColorable returns new instance of Writer which handles escape sequence. -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - return file -} - -// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. -func NewColorableStdout() io.Writer { - return os.Stdout -} - -// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. -func NewColorableStderr() io.Writer { - return os.Stderr -} - -// EnableColorsStdout enable colors if possible. -func EnableColorsStdout(enabled *bool) func() { - if enabled != nil { - *enabled = true - } - return func() {} -} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go deleted file mode 100644 index 1846ad5a..00000000 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ /dev/null @@ -1,1047 +0,0 @@ -//go:build windows && !appengine -// +build windows,!appengine - -package colorable - -import ( - "bytes" - "io" - "math" - "os" - "strconv" - "strings" - "sync" - "syscall" - "unsafe" - - "github.com/mattn/go-isatty" -) - -const ( - foregroundBlue = 0x1 - foregroundGreen = 0x2 - foregroundRed = 0x4 - foregroundIntensity = 0x8 - foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) - backgroundBlue = 0x10 - backgroundGreen = 0x20 - backgroundRed = 0x40 - backgroundIntensity = 0x80 - backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) - commonLvbUnderscore = 0x8000 - - cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 -) - -const ( - genericRead = 0x80000000 - genericWrite = 0x40000000 -) - -const ( - consoleTextmodeBuffer = 0x1 -) - -type wchar uint16 -type short int16 -type dword uint32 -type word uint16 - -type coord struct { - x short - y short -} - -type smallRect struct { - left short - top short - right short - bottom short -} - -type consoleScreenBufferInfo struct { - size coord - cursorPosition coord - attributes word - window smallRect - maximumWindowSize coord -} - -type consoleCursorInfo struct { - size dword - visible int32 -} - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") - procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") - procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") - procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") - procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") - procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") - procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - procSetConsoleMode = kernel32.NewProc("SetConsoleMode") - procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") -) - -// Writer provides colorable Writer to the console -type Writer struct { - out io.Writer - handle syscall.Handle - althandle syscall.Handle - oldattr word - oldpos coord - rest bytes.Buffer - mutex sync.Mutex -} - -// NewColorable returns new instance of Writer which handles escape sequence from File. -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - if isatty.IsTerminal(file.Fd()) { - var mode uint32 - if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { - return file - } - var csbi consoleScreenBufferInfo - handle := syscall.Handle(file.Fd()) - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} - } - return file -} - -// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. -func NewColorableStdout() io.Writer { - return NewColorable(os.Stdout) -} - -// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. -func NewColorableStderr() io.Writer { - return NewColorable(os.Stderr) -} - -var color256 = map[int]int{ - 0: 0x000000, - 1: 0x800000, - 2: 0x008000, - 3: 0x808000, - 4: 0x000080, - 5: 0x800080, - 6: 0x008080, - 7: 0xc0c0c0, - 8: 0x808080, - 9: 0xff0000, - 10: 0x00ff00, - 11: 0xffff00, - 12: 0x0000ff, - 13: 0xff00ff, - 14: 0x00ffff, - 15: 0xffffff, - 16: 0x000000, - 17: 0x00005f, - 18: 0x000087, - 19: 0x0000af, - 20: 0x0000d7, - 21: 0x0000ff, - 22: 0x005f00, - 23: 0x005f5f, - 24: 0x005f87, - 25: 0x005faf, - 26: 0x005fd7, - 27: 0x005fff, - 28: 0x008700, - 29: 0x00875f, - 30: 0x008787, - 31: 0x0087af, - 32: 0x0087d7, - 33: 0x0087ff, - 34: 0x00af00, - 35: 0x00af5f, - 36: 0x00af87, - 37: 0x00afaf, - 38: 0x00afd7, - 39: 0x00afff, - 40: 0x00d700, - 41: 0x00d75f, - 42: 0x00d787, - 43: 0x00d7af, - 44: 0x00d7d7, - 45: 0x00d7ff, - 46: 0x00ff00, - 47: 0x00ff5f, - 48: 0x00ff87, - 49: 0x00ffaf, - 50: 0x00ffd7, - 51: 0x00ffff, - 52: 0x5f0000, - 53: 0x5f005f, - 54: 0x5f0087, - 55: 0x5f00af, - 56: 0x5f00d7, - 57: 0x5f00ff, - 58: 0x5f5f00, - 59: 0x5f5f5f, - 60: 0x5f5f87, - 61: 0x5f5faf, - 62: 0x5f5fd7, - 63: 0x5f5fff, - 64: 0x5f8700, - 65: 0x5f875f, - 66: 0x5f8787, - 67: 0x5f87af, - 68: 0x5f87d7, - 69: 0x5f87ff, - 70: 0x5faf00, - 71: 0x5faf5f, - 72: 0x5faf87, - 73: 0x5fafaf, - 74: 0x5fafd7, - 75: 0x5fafff, - 76: 0x5fd700, - 77: 0x5fd75f, - 78: 0x5fd787, - 79: 0x5fd7af, - 80: 0x5fd7d7, - 81: 0x5fd7ff, - 82: 0x5fff00, - 83: 0x5fff5f, - 84: 0x5fff87, - 85: 0x5fffaf, - 86: 0x5fffd7, - 87: 0x5fffff, - 88: 0x870000, - 89: 0x87005f, - 90: 0x870087, - 91: 0x8700af, - 92: 0x8700d7, - 93: 0x8700ff, - 94: 0x875f00, - 95: 0x875f5f, - 96: 0x875f87, - 97: 0x875faf, - 98: 0x875fd7, - 99: 0x875fff, - 100: 0x878700, - 101: 0x87875f, - 102: 0x878787, - 103: 0x8787af, - 104: 0x8787d7, - 105: 0x8787ff, - 106: 0x87af00, - 107: 0x87af5f, - 108: 0x87af87, - 109: 0x87afaf, - 110: 0x87afd7, - 111: 0x87afff, - 112: 0x87d700, - 113: 0x87d75f, - 114: 0x87d787, - 115: 0x87d7af, - 116: 0x87d7d7, - 117: 0x87d7ff, - 118: 0x87ff00, - 119: 0x87ff5f, - 120: 0x87ff87, - 121: 0x87ffaf, - 122: 0x87ffd7, - 123: 0x87ffff, - 124: 0xaf0000, - 125: 0xaf005f, - 126: 0xaf0087, - 127: 0xaf00af, - 128: 0xaf00d7, - 129: 0xaf00ff, - 130: 0xaf5f00, - 131: 0xaf5f5f, - 132: 0xaf5f87, - 133: 0xaf5faf, - 134: 0xaf5fd7, - 135: 0xaf5fff, - 136: 0xaf8700, - 137: 0xaf875f, - 138: 0xaf8787, - 139: 0xaf87af, - 140: 0xaf87d7, - 141: 0xaf87ff, - 142: 0xafaf00, - 143: 0xafaf5f, - 144: 0xafaf87, - 145: 0xafafaf, - 146: 0xafafd7, - 147: 0xafafff, - 148: 0xafd700, - 149: 0xafd75f, - 150: 0xafd787, - 151: 0xafd7af, - 152: 0xafd7d7, - 153: 0xafd7ff, - 154: 0xafff00, - 155: 0xafff5f, - 156: 0xafff87, - 157: 0xafffaf, - 158: 0xafffd7, - 159: 0xafffff, - 160: 0xd70000, - 161: 0xd7005f, - 162: 0xd70087, - 163: 0xd700af, - 164: 0xd700d7, - 165: 0xd700ff, - 166: 0xd75f00, - 167: 0xd75f5f, - 168: 0xd75f87, - 169: 0xd75faf, - 170: 0xd75fd7, - 171: 0xd75fff, - 172: 0xd78700, - 173: 0xd7875f, - 174: 0xd78787, - 175: 0xd787af, - 176: 0xd787d7, - 177: 0xd787ff, - 178: 0xd7af00, - 179: 0xd7af5f, - 180: 0xd7af87, - 181: 0xd7afaf, - 182: 0xd7afd7, - 183: 0xd7afff, - 184: 0xd7d700, - 185: 0xd7d75f, - 186: 0xd7d787, - 187: 0xd7d7af, - 188: 0xd7d7d7, - 189: 0xd7d7ff, - 190: 0xd7ff00, - 191: 0xd7ff5f, - 192: 0xd7ff87, - 193: 0xd7ffaf, - 194: 0xd7ffd7, - 195: 0xd7ffff, - 196: 0xff0000, - 197: 0xff005f, - 198: 0xff0087, - 199: 0xff00af, - 200: 0xff00d7, - 201: 0xff00ff, - 202: 0xff5f00, - 203: 0xff5f5f, - 204: 0xff5f87, - 205: 0xff5faf, - 206: 0xff5fd7, - 207: 0xff5fff, - 208: 0xff8700, - 209: 0xff875f, - 210: 0xff8787, - 211: 0xff87af, - 212: 0xff87d7, - 213: 0xff87ff, - 214: 0xffaf00, - 215: 0xffaf5f, - 216: 0xffaf87, - 217: 0xffafaf, - 218: 0xffafd7, - 219: 0xffafff, - 220: 0xffd700, - 221: 0xffd75f, - 222: 0xffd787, - 223: 0xffd7af, - 224: 0xffd7d7, - 225: 0xffd7ff, - 226: 0xffff00, - 227: 0xffff5f, - 228: 0xffff87, - 229: 0xffffaf, - 230: 0xffffd7, - 231: 0xffffff, - 232: 0x080808, - 233: 0x121212, - 234: 0x1c1c1c, - 235: 0x262626, - 236: 0x303030, - 237: 0x3a3a3a, - 238: 0x444444, - 239: 0x4e4e4e, - 240: 0x585858, - 241: 0x626262, - 242: 0x6c6c6c, - 243: 0x767676, - 244: 0x808080, - 245: 0x8a8a8a, - 246: 0x949494, - 247: 0x9e9e9e, - 248: 0xa8a8a8, - 249: 0xb2b2b2, - 250: 0xbcbcbc, - 251: 0xc6c6c6, - 252: 0xd0d0d0, - 253: 0xdadada, - 254: 0xe4e4e4, - 255: 0xeeeeee, -} - -// `\033]0;TITLESTR\007` -func doTitleSequence(er *bytes.Reader) error { - var c byte - var err error - - c, err = er.ReadByte() - if err != nil { - return err - } - if c != '0' && c != '2' { - return nil - } - c, err = er.ReadByte() - if err != nil { - return err - } - if c != ';' { - return nil - } - title := make([]byte, 0, 80) - for { - c, err = er.ReadByte() - if err != nil { - return err - } - if c == 0x07 || c == '\n' { - break - } - title = append(title, c) - } - if len(title) > 0 { - title8, err := syscall.UTF16PtrFromString(string(title)) - if err == nil { - procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) - } - } - return nil -} - -// returns Atoi(s) unless s == "" in which case it returns def -func atoiWithDefault(s string, def int) (int, error) { - if s == "" { - return def, nil - } - return strconv.Atoi(s) -} - -// Write writes data on console -func (w *Writer) Write(data []byte) (n int, err error) { - w.mutex.Lock() - defer w.mutex.Unlock() - var csbi consoleScreenBufferInfo - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - - handle := w.handle - - var er *bytes.Reader - if w.rest.Len() > 0 { - var rest bytes.Buffer - w.rest.WriteTo(&rest) - w.rest.Reset() - rest.Write(data) - er = bytes.NewReader(rest.Bytes()) - } else { - er = bytes.NewReader(data) - } - var plaintext bytes.Buffer -loop: - for { - c1, err := er.ReadByte() - if err != nil { - plaintext.WriteTo(w.out) - break loop - } - if c1 != 0x1b { - plaintext.WriteByte(c1) - continue - } - _, err = plaintext.WriteTo(w.out) - if err != nil { - break loop - } - c2, err := er.ReadByte() - if err != nil { - break loop - } - - switch c2 { - case '>': - continue - case ']': - w.rest.WriteByte(c1) - w.rest.WriteByte(c2) - er.WriteTo(&w.rest) - if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { - break loop - } - er = bytes.NewReader(w.rest.Bytes()[2:]) - err := doTitleSequence(er) - if err != nil { - break loop - } - w.rest.Reset() - continue - // https://github.com/mattn/go-colorable/issues/27 - case '7': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - w.oldpos = csbi.cursorPosition - continue - case '8': - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) - continue - case 0x5b: - // execute part after switch - default: - continue - } - - w.rest.WriteByte(c1) - w.rest.WriteByte(c2) - er.WriteTo(&w.rest) - - var buf bytes.Buffer - var m byte - for i, c := range w.rest.Bytes()[2:] { - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - m = c - er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) - w.rest.Reset() - break - } - buf.Write([]byte(string(c))) - } - if m == 0 { - break loop - } - - switch m { - case 'A': - n, err = atoiWithDefault(buf.String(), 1) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'B': - n, err = atoiWithDefault(buf.String(), 1) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'C': - n, err = atoiWithDefault(buf.String(), 1) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x += short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'D': - n, err = atoiWithDefault(buf.String(), 1) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x -= short(n) - if csbi.cursorPosition.x < 0 { - csbi.cursorPosition.x = 0 - } - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'E': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = 0 - csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'F': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = 0 - csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'G': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - if n < 1 { - n = 1 - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = short(n - 1) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'H', 'f': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - if buf.Len() > 0 { - token := strings.Split(buf.String(), ";") - switch len(token) { - case 1: - n1, err := strconv.Atoi(token[0]) - if err != nil { - continue - } - csbi.cursorPosition.y = short(n1 - 1) - case 2: - n1, err := strconv.Atoi(token[0]) - if err != nil { - continue - } - n2, err := strconv.Atoi(token[1]) - if err != nil { - continue - } - csbi.cursorPosition.x = short(n2 - 1) - csbi.cursorPosition.y = short(n1 - 1) - } - } else { - csbi.cursorPosition.y = 0 - } - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'J': - n := 0 - if buf.Len() > 0 { - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - } - var count, written dword - var cursor coord - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - switch n { - case 0: - cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) - case 1: - cursor = coord{x: csbi.window.left, y: csbi.window.top} - count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) - case 2: - cursor = coord{x: csbi.window.left, y: csbi.window.top} - count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) - } - procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - case 'K': - n := 0 - if buf.Len() > 0 { - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - var cursor coord - var count, written dword - switch n { - case 0: - cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - count = dword(csbi.size.x - csbi.cursorPosition.x) - case 1: - cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} - count = dword(csbi.size.x - csbi.cursorPosition.x) - case 2: - cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} - count = dword(csbi.size.x) - } - procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - case 'X': - n := 0 - if buf.Len() > 0 { - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - var cursor coord - var written dword - cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - case 'm': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - attr := csbi.attributes - cs := buf.String() - if cs == "" { - procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) - continue - } - token := strings.Split(cs, ";") - for i := 0; i < len(token); i++ { - ns := token[i] - if n, err = strconv.Atoi(ns); err == nil { - switch { - case n == 0 || n == 100: - attr = w.oldattr - case n == 4: - attr |= commonLvbUnderscore - case (1 <= n && n <= 3) || n == 5: - attr |= foregroundIntensity - case n == 7 || n == 27: - attr = - (attr &^ (foregroundMask | backgroundMask)) | - ((attr & foregroundMask) << 4) | - ((attr & backgroundMask) >> 4) - case n == 22: - attr &^= foregroundIntensity - case n == 24: - attr &^= commonLvbUnderscore - case 30 <= n && n <= 37: - attr &= backgroundMask - if (n-30)&1 != 0 { - attr |= foregroundRed - } - if (n-30)&2 != 0 { - attr |= foregroundGreen - } - if (n-30)&4 != 0 { - attr |= foregroundBlue - } - case n == 38: // set foreground color. - if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256foreAttr == nil { - n256setup() - } - attr &= backgroundMask - attr |= n256foreAttr[n256%len(n256foreAttr)] - i += 2 - } - } else if len(token) == 5 && token[i+1] == "2" { - var r, g, b int - r, _ = strconv.Atoi(token[i+2]) - g, _ = strconv.Atoi(token[i+3]) - b, _ = strconv.Atoi(token[i+4]) - i += 4 - if r > 127 { - attr |= foregroundRed - } - if g > 127 { - attr |= foregroundGreen - } - if b > 127 { - attr |= foregroundBlue - } - } else { - attr = attr & (w.oldattr & backgroundMask) - } - case n == 39: // reset foreground color. - attr &= backgroundMask - attr |= w.oldattr & foregroundMask - case 40 <= n && n <= 47: - attr &= foregroundMask - if (n-40)&1 != 0 { - attr |= backgroundRed - } - if (n-40)&2 != 0 { - attr |= backgroundGreen - } - if (n-40)&4 != 0 { - attr |= backgroundBlue - } - case n == 48: // set background color. - if i < len(token)-2 && token[i+1] == "5" { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256backAttr == nil { - n256setup() - } - attr &= foregroundMask - attr |= n256backAttr[n256%len(n256backAttr)] - i += 2 - } - } else if len(token) == 5 && token[i+1] == "2" { - var r, g, b int - r, _ = strconv.Atoi(token[i+2]) - g, _ = strconv.Atoi(token[i+3]) - b, _ = strconv.Atoi(token[i+4]) - i += 4 - if r > 127 { - attr |= backgroundRed - } - if g > 127 { - attr |= backgroundGreen - } - if b > 127 { - attr |= backgroundBlue - } - } else { - attr = attr & (w.oldattr & foregroundMask) - } - case n == 49: // reset foreground color. - attr &= foregroundMask - attr |= w.oldattr & backgroundMask - case 90 <= n && n <= 97: - attr = (attr & backgroundMask) - attr |= foregroundIntensity - if (n-90)&1 != 0 { - attr |= foregroundRed - } - if (n-90)&2 != 0 { - attr |= foregroundGreen - } - if (n-90)&4 != 0 { - attr |= foregroundBlue - } - case 100 <= n && n <= 107: - attr = (attr & foregroundMask) - attr |= backgroundIntensity - if (n-100)&1 != 0 { - attr |= backgroundRed - } - if (n-100)&2 != 0 { - attr |= backgroundGreen - } - if (n-100)&4 != 0 { - attr |= backgroundBlue - } - } - procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) - } - } - case 'h': - var ci consoleCursorInfo - cs := buf.String() - if cs == "5>" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 0 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?25" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 1 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?1049" { - if w.althandle == 0 { - h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) - w.althandle = syscall.Handle(h) - if w.althandle != 0 { - handle = w.althandle - } - } - } - case 'l': - var ci consoleCursorInfo - cs := buf.String() - if cs == "5>" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 1 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?25" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 0 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?1049" { - if w.althandle != 0 { - syscall.CloseHandle(w.althandle) - w.althandle = 0 - handle = w.handle - } - } - case 's': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - w.oldpos = csbi.cursorPosition - case 'u': - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) - } - } - - return len(data), nil -} - -type consoleColor struct { - rgb int - red bool - green bool - blue bool - intensity bool -} - -func (c consoleColor) foregroundAttr() (attr word) { - if c.red { - attr |= foregroundRed - } - if c.green { - attr |= foregroundGreen - } - if c.blue { - attr |= foregroundBlue - } - if c.intensity { - attr |= foregroundIntensity - } - return -} - -func (c consoleColor) backgroundAttr() (attr word) { - if c.red { - attr |= backgroundRed - } - if c.green { - attr |= backgroundGreen - } - if c.blue { - attr |= backgroundBlue - } - if c.intensity { - attr |= backgroundIntensity - } - return -} - -var color16 = []consoleColor{ - {0x000000, false, false, false, false}, - {0x000080, false, false, true, false}, - {0x008000, false, true, false, false}, - {0x008080, false, true, true, false}, - {0x800000, true, false, false, false}, - {0x800080, true, false, true, false}, - {0x808000, true, true, false, false}, - {0xc0c0c0, true, true, true, false}, - {0x808080, false, false, false, true}, - {0x0000ff, false, false, true, true}, - {0x00ff00, false, true, false, true}, - {0x00ffff, false, true, true, true}, - {0xff0000, true, false, false, true}, - {0xff00ff, true, false, true, true}, - {0xffff00, true, true, false, true}, - {0xffffff, true, true, true, true}, -} - -type hsv struct { - h, s, v float32 -} - -func (a hsv) dist(b hsv) float32 { - dh := a.h - b.h - switch { - case dh > 0.5: - dh = 1 - dh - case dh < -0.5: - dh = -1 - dh - } - ds := a.s - b.s - dv := a.v - b.v - return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) -} - -func toHSV(rgb int) hsv { - r, g, b := float32((rgb&0xFF0000)>>16)/256.0, - float32((rgb&0x00FF00)>>8)/256.0, - float32(rgb&0x0000FF)/256.0 - min, max := minmax3f(r, g, b) - h := max - min - if h > 0 { - if max == r { - h = (g - b) / h - if h < 0 { - h += 6 - } - } else if max == g { - h = 2 + (b-r)/h - } else { - h = 4 + (r-g)/h - } - } - h /= 6.0 - s := max - min - if max != 0 { - s /= max - } - v := max - return hsv{h: h, s: s, v: v} -} - -type hsvTable []hsv - -func toHSVTable(rgbTable []consoleColor) hsvTable { - t := make(hsvTable, len(rgbTable)) - for i, c := range rgbTable { - t[i] = toHSV(c.rgb) - } - return t -} - -func (t hsvTable) find(rgb int) consoleColor { - hsv := toHSV(rgb) - n := 7 - l := float32(5.0) - for i, p := range t { - d := hsv.dist(p) - if d < l { - l, n = d, i - } - } - return color16[n] -} - -func minmax3f(a, b, c float32) (min, max float32) { - if a < b { - if b < c { - return a, c - } else if a < c { - return a, b - } else { - return c, b - } - } else { - if a < c { - return b, c - } else if b < c { - return b, a - } else { - return c, a - } - } -} - -var n256foreAttr []word -var n256backAttr []word - -func n256setup() { - n256foreAttr = make([]word, 256) - n256backAttr = make([]word, 256) - t := toHSVTable(color16) - for i, rgb := range color256 { - c := t.find(rgb) - n256foreAttr[i] = c.foregroundAttr() - n256backAttr[i] = c.backgroundAttr() - } -} - -// EnableColorsStdout enable colors if possible. -func EnableColorsStdout(enabled *bool) func() { - var mode uint32 - h := os.Stdout.Fd() - if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { - if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { - if enabled != nil { - *enabled = true - } - return func() { - procSetConsoleMode.Call(h, uintptr(mode)) - } - } - } - if enabled != nil { - *enabled = true - } - return func() {} -} diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh deleted file mode 100644 index 012162b0..00000000 --- a/vendor/github.com/mattn/go-colorable/go.test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list ./... | grep -v vendor); do - go test -race -coverprofile=profile.out -covermode=atomic "$d" - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go deleted file mode 100644 index 05d6f74b..00000000 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ /dev/null @@ -1,57 +0,0 @@ -package colorable - -import ( - "bytes" - "io" -) - -// NonColorable holds writer but removes escape sequence. -type NonColorable struct { - out io.Writer -} - -// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. -func NewNonColorable(w io.Writer) io.Writer { - return &NonColorable{out: w} -} - -// Write writes data on console -func (w *NonColorable) Write(data []byte) (n int, err error) { - er := bytes.NewReader(data) - var plaintext bytes.Buffer -loop: - for { - c1, err := er.ReadByte() - if err != nil { - plaintext.WriteTo(w.out) - break loop - } - if c1 != 0x1b { - plaintext.WriteByte(c1) - continue - } - _, err = plaintext.WriteTo(w.out) - if err != nil { - break loop - } - c2, err := er.ReadByte() - if err != nil { - break loop - } - if c2 != 0x5b { - continue - } - - for { - c, err := er.ReadByte() - if err != nil { - break loop - } - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - break - } - } - } - - return len(data), nil -} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE deleted file mode 100644 index 65dc692b..00000000 --- a/vendor/github.com/mattn/go-isatty/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) Yasuhiro MATSUMOTO - -MIT License (Expat) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md deleted file mode 100644 index 38418353..00000000 --- a/vendor/github.com/mattn/go-isatty/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# go-isatty - -[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) -[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) -[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) -[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) - -isatty for golang - -## Usage - -```go -package main - -import ( - "fmt" - "github.com/mattn/go-isatty" - "os" -) - -func main() { - if isatty.IsTerminal(os.Stdout.Fd()) { - fmt.Println("Is Terminal") - } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { - fmt.Println("Is Cygwin/MSYS2 Terminal") - } else { - fmt.Println("Is Not Terminal") - } -} -``` - -## Installation - -``` -$ go get github.com/mattn/go-isatty -``` - -## License - -MIT - -## Author - -Yasuhiro Matsumoto (a.k.a mattn) - -## Thanks - -* k-takata: base idea for IsCygwinTerminal - - https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go deleted file mode 100644 index 17d4f90e..00000000 --- a/vendor/github.com/mattn/go-isatty/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package isatty implements interface to isatty -package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh deleted file mode 100644 index 012162b0..00000000 --- a/vendor/github.com/mattn/go-isatty/go.test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list ./... | grep -v vendor); do - go test -race -coverprofile=profile.out -covermode=atomic "$d" - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go deleted file mode 100644 index d569c0c9..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine -// +build darwin freebsd openbsd netbsd dragonfly hurd -// +build !appengine - -package isatty - -import "golang.org/x/sys/unix" - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) - return err == nil -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go deleted file mode 100644 index 31503226..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build appengine || js || nacl || wasm -// +build appengine js nacl wasm - -package isatty - -// IsTerminal returns true if the file descriptor is terminal which -// is always false on js and appengine classic which is a sandboxed PaaS. -func IsTerminal(fd uintptr) bool { - return false -} - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go deleted file mode 100644 index bae7f9bb..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_plan9.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build plan9 -// +build plan9 - -package isatty - -import ( - "syscall" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - path, err := syscall.Fd2path(int(fd)) - if err != nil { - return false - } - return path == "/dev/cons" || path == "/mnt/term/dev/cons" -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go deleted file mode 100644 index 0c3acf2d..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build solaris && !appengine -// +build solaris,!appengine - -package isatty - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c -func IsTerminal(fd uintptr) bool { - _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) - return err == nil -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go deleted file mode 100644 index 67787657..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build (linux || aix || zos) && !appengine -// +build linux aix zos -// +build !appengine - -package isatty - -import "golang.org/x/sys/unix" - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) - return err == nil -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go deleted file mode 100644 index 8e3c9917..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go +++ /dev/null @@ -1,125 +0,0 @@ -//go:build windows && !appengine -// +build windows,!appengine - -package isatty - -import ( - "errors" - "strings" - "syscall" - "unicode/utf16" - "unsafe" -) - -const ( - objectNameInfo uintptr = 1 - fileNameInfo = 2 - fileTypePipe = 3 -) - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - ntdll = syscall.NewLazyDLL("ntdll.dll") - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") - procGetFileType = kernel32.NewProc("GetFileType") - procNtQueryObject = ntdll.NewProc("NtQueryObject") -) - -func init() { - // Check if GetFileInformationByHandleEx is available. - if procGetFileInformationByHandleEx.Find() != nil { - procGetFileInformationByHandleEx = nil - } -} - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} - -// Check pipe name is used for cygwin/msys2 pty. -// Cygwin/MSYS2 PTY has a name like: -// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master -func isCygwinPipeName(name string) bool { - token := strings.Split(name, "-") - if len(token) < 5 { - return false - } - - if token[0] != `\msys` && - token[0] != `\cygwin` && - token[0] != `\Device\NamedPipe\msys` && - token[0] != `\Device\NamedPipe\cygwin` { - return false - } - - if token[1] == "" { - return false - } - - if !strings.HasPrefix(token[2], "pty") { - return false - } - - if token[3] != `from` && token[3] != `to` { - return false - } - - if token[4] != "master" { - return false - } - - return true -} - -// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler -// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion -// guys are using Windows XP, this is a workaround for those guys, it will also work on system from -// Windows vista to 10 -// see https://stackoverflow.com/a/18792477 for details -func getFileNameByHandle(fd uintptr) (string, error) { - if procNtQueryObject == nil { - return "", errors.New("ntdll.dll: NtQueryObject not supported") - } - - var buf [4 + syscall.MAX_PATH]uint16 - var result int - r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, - fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) - if r != 0 { - return "", e - } - return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil -} - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. -func IsCygwinTerminal(fd uintptr) bool { - if procGetFileInformationByHandleEx == nil { - name, err := getFileNameByHandle(fd) - if err != nil { - return false - } - return isCygwinPipeName(name) - } - - // Cygwin/msys's pty is a pipe. - ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) - if ft != fileTypePipe || e != 0 { - return false - } - - var buf [2 + syscall.MAX_PATH]uint16 - r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), - 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), - uintptr(len(buf)*2), 0, 0) - if r == 0 || e != 0 { - return false - } - - l := *(*uint32)(unsafe.Pointer(&buf)) - return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) -} diff --git a/vendor/github.com/redis/go-redis/v9/.gitignore b/vendor/github.com/redis/go-redis/v9/.gitignore index dc322f9b..64a7cb51 100644 --- a/vendor/github.com/redis/go-redis/v9/.gitignore +++ b/vendor/github.com/redis/go-redis/v9/.gitignore @@ -1,3 +1,4 @@ *.rdb testdata/* .idea/ +.DS_Store diff --git a/vendor/github.com/redis/go-redis/v9/Makefile b/vendor/github.com/redis/go-redis/v9/Makefile index 285f65dd..b59c3955 100644 --- a/vendor/github.com/redis/go-redis/v9/Makefile +++ b/vendor/github.com/redis/go-redis/v9/Makefile @@ -23,7 +23,7 @@ bench: testdeps testdata/redis: mkdir -p $@ - wget -qO- https://download.redis.io/releases/redis-7.2-rc1.tar.gz | tar xvz --strip-components=1 -C $@ + wget -qO- https://download.redis.io/releases/redis-7.2-rc3.tar.gz | tar xvz --strip-components=1 -C $@ testdata/redis/src/redis-server: testdata/redis cd $< && make all diff --git a/vendor/github.com/redis/go-redis/v9/README.md b/vendor/github.com/redis/go-redis/v9/README.md index 36d60fd4..3486e8e5 100644 --- a/vendor/github.com/redis/go-redis/v9/README.md +++ b/vendor/github.com/redis/go-redis/v9/README.md @@ -46,6 +46,7 @@ key value NoSQL database that uses RocksDB as storage engine and is compatible w - [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html). - [Redis Ring](https://redis.uptrace.dev/guide/ring.html). - [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html). +- [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/) ## Installation @@ -105,6 +106,40 @@ func ExampleClient() { } ``` +The above can be modified to specify the version of the RESP protocol by adding the `protocol` option to the `Options` struct: + +```go + rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3 + }) + +``` + +### Connecting via a redis url + +go-redis also supports connecting via the [redis uri specification](https://github.com/redis/redis-specifications/tree/master/uri/redis.txt). The example below demonstrates how the connection can easily be configured using a string, adhering to this specification. + +```go +import ( + "context" + "github.com/redis/go-redis/v9" + "fmt" +) + +var ctx = context.Background() + +func ExampleClient() { + url := "redis://localhost:6379?password=hello&protocol=3" + opts, err := redis.ParseURL(url) + if err != nil { + panic(err) + } + rdb := redis.NewClient(opts) +``` + ## Look and feel Some corner cases: diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go index f10e7365..1bd4d5db 100644 --- a/vendor/github.com/redis/go-redis/v9/command.go +++ b/vendor/github.com/redis/go-redis/v9/command.go @@ -340,6 +340,8 @@ func (cmd *Cmd) Bool() (bool, error) { func toBool(val interface{}) (bool, error) { switch val := val.(type) { + case bool: + return val, nil case int64: return val != 0, nil case string: @@ -3711,6 +3713,71 @@ func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error { return nil } +//----------------------------------------------------------------------- + +type MapStringInterfaceSliceCmd struct { + baseCmd + + val []map[string]interface{} +} + +var _ Cmder = (*MapStringInterfaceSliceCmd)(nil) + +func NewMapStringInterfaceSliceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceSliceCmd { + return &MapStringInterfaceSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapStringInterfaceSliceCmd) SetVal(val []map[string]interface{}) { + cmd.val = val +} + +func (cmd *MapStringInterfaceSliceCmd) Val() []map[string]interface{} { + return cmd.val +} + +func (cmd *MapStringInterfaceSliceCmd) Result() ([]map[string]interface{}, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *MapStringInterfaceSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapStringInterfaceSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]map[string]interface{}, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return err + } + cmd.val[i] = make(map[string]interface{}, nn) + for f := 0; f < nn; f++ { + k, err := rd.ReadString() + if err != nil { + return err + } + v, err := rd.ReadReply() + if err != nil { + if err != Nil { + return err + } + } + cmd.val[i][k] = v + } + } + return nil +} + //------------------------------------------------------------------------------ type KeyValuesCmd struct { diff --git a/vendor/github.com/redis/go-redis/v9/commands.go b/vendor/github.com/redis/go-redis/v9/commands.go index 34f4d2c2..07c8e2c8 100644 --- a/vendor/github.com/redis/go-redis/v9/commands.go +++ b/vendor/github.com/redis/go-redis/v9/commands.go @@ -504,6 +504,9 @@ type Cmdable interface { ACLLogReset(ctx context.Context) *StatusCmd ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd + + gearsCmdable + probabilisticCmdable } type StatefulCmdable interface { diff --git a/vendor/github.com/redis/go-redis/v9/package.json b/vendor/github.com/redis/go-redis/v9/package.json index e26e2914..9fff597c 100644 --- a/vendor/github.com/redis/go-redis/v9/package.json +++ b/vendor/github.com/redis/go-redis/v9/package.json @@ -1,6 +1,6 @@ { "name": "redis", - "version": "9.0.5", + "version": "9.1.0", "main": "index.js", "repository": "git@github.com:redis/go-redis.git", "author": "Vladimir Mihailenco ", diff --git a/vendor/github.com/redis/go-redis/v9/probabilistic.go b/vendor/github.com/redis/go-redis/v9/probabilistic.go new file mode 100644 index 00000000..8e32bca9 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/probabilistic.go @@ -0,0 +1,1433 @@ +package redis + +import ( + "context" + "fmt" + + "github.com/redis/go-redis/v9/internal/proto" +) + +type probabilisticCmdable interface { + BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd + BFCard(ctx context.Context, key string) *IntCmd + BFExists(ctx context.Context, key string, element interface{}) *BoolCmd + BFInfo(ctx context.Context, key string) *BFInfoCmd + BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd + BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd + BFInfoSize(ctx context.Context, key string) *BFInfoCmd + BFInfoFilters(ctx context.Context, key string) *BFInfoCmd + BFInfoItems(ctx context.Context, key string) *BFInfoCmd + BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd + BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd + BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd + BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd + BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd + BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd + BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd + BFReserveArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd + BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd + BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd + + CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd + CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd + CFCount(ctx context.Context, key string, element interface{}) *IntCmd + CFDel(ctx context.Context, key string, element interface{}) *BoolCmd + CFExists(ctx context.Context, key string, element interface{}) *BoolCmd + CFInfo(ctx context.Context, key string) *CFInfoCmd + CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd + CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd + CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd + CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd + CFReserveArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd + CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd + CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd + CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd + CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd + CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd + + CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd + CMSInfo(ctx context.Context, key string) *CMSInfoCmd + CMSInitByDim(ctx context.Context, key string, width, height int64) *StatusCmd + CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd + CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd + CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd + CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd + + TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd + TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd + TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd + TopKInfo(ctx context.Context, key string) *TopKInfoCmd + TopKList(ctx context.Context, key string) *StringSliceCmd + TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd + TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd + TopKReserve(ctx context.Context, key string, k int64) *StatusCmd + TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd + + TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd + TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd + TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd + TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd + TDigestCreate(ctx context.Context, key string) *StatusCmd + TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd + TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd + TDigestMax(ctx context.Context, key string) *FloatCmd + TDigestMin(ctx context.Context, key string) *FloatCmd + TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd + TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd + TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd + TDigestReset(ctx context.Context, key string) *StatusCmd + TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd + TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd +} + +type BFInsertOptions struct { + Capacity int64 + Error float64 + Expansion int64 + NonScaling bool + NoCreate bool +} + +type BFReserveOptions struct { + Capacity int64 + Error float64 + Expansion int64 + NonScaling bool +} + +type CFReserveOptions struct { + Capacity int64 + BucketSize int64 + MaxIterations int64 + Expansion int64 +} + +type CFInsertOptions struct { + Capacity int64 + NoCreate bool +} + +// ------------------------------------------- +// Bloom filter commands +//------------------------------------------- + +// BFReserve creates an empty Bloom filter with a single sub-filter +// for the initial specified capacity and with an upper bound error_rate. +// For more information - https://redis.io/commands/bf.reserve/ +func (c cmdable) BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd { + args := []interface{}{"BF.RESERVE", key, errorRate, capacity} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// BFReserveExpansion creates an empty Bloom filter with a single sub-filter +// for the initial specified capacity and with an upper bound error_rate. +// This function also allows for specifying an expansion rate for the filter. +// For more information - https://redis.io/commands/bf.reserve/ +func (c cmdable) BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd { + args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "EXPANSION", expansion} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// BFReserveNonScaling creates an empty Bloom filter with a single sub-filter +// for the initial specified capacity and with an upper bound error_rate. +// This function also allows for specifying that the filter should not scale. +// For more information - https://redis.io/commands/bf.reserve/ +func (c cmdable) BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd { + args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "NONSCALING"} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// BFReserveArgs creates an empty Bloom filter with a single sub-filter +// for the initial specified capacity and with an upper bound error_rate. +// This function also allows for specifying additional options such as expansion rate and non-scaling behavior. +// For more information - https://redis.io/commands/bf.reserve/ +func (c cmdable) BFReserveArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd { + args := []interface{}{"BF.RESERVE", key} + if options != nil { + if options.Error != 0 { + args = append(args, options.Error) + } + if options.Capacity != 0 { + args = append(args, options.Capacity) + } + if options.Expansion != 0 { + args = append(args, "EXPANSION", options.Expansion) + } + if options.NonScaling { + args = append(args, "NONSCALING") + } + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// BFAdd adds an item to a Bloom filter. +// For more information - https://redis.io/commands/bf.add/ +func (c cmdable) BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd { + args := []interface{}{"BF.ADD", key, element} + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// BFCard returns the cardinality of a Bloom filter - +// number of items that were added to a Bloom filter and detected as unique +// (items that caused at least one bit to be set in at least one sub-filter). +// For more information - https://redis.io/commands/bf.card/ +func (c cmdable) BFCard(ctx context.Context, key string) *IntCmd { + args := []interface{}{"BF.CARD", key} + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// BFExists determines whether a given item was added to a Bloom filter. +// For more information - https://redis.io/commands/bf.exists/ +func (c cmdable) BFExists(ctx context.Context, key string, element interface{}) *BoolCmd { + args := []interface{}{"BF.EXISTS", key, element} + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// BFLoadChunk restores a Bloom filter previously saved using BF.SCANDUMP. +// For more information - https://redis.io/commands/bf.loadchunk/ +func (c cmdable) BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd { + args := []interface{}{"BF.LOADCHUNK", key, iterator, data} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Begins an incremental save of the Bloom filter. +// This command is useful for large Bloom filters that cannot fit into the DUMP and RESTORE model. +// For more information - https://redis.io/commands/bf.scandump/ +func (c cmdable) BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd { + args := []interface{}{"BF.SCANDUMP", key, iterator} + cmd := newScanDumpCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type ScanDump struct { + Iter int64 + Data string +} + +type ScanDumpCmd struct { + baseCmd + + val ScanDump +} + +func newScanDumpCmd(ctx context.Context, args ...interface{}) *ScanDumpCmd { + return &ScanDumpCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ScanDumpCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ScanDumpCmd) SetVal(val ScanDump) { + cmd.val = val +} + +func (cmd *ScanDumpCmd) Result() (ScanDump, error) { + return cmd.val, cmd.err +} + +func (cmd *ScanDumpCmd) Val() ScanDump { + return cmd.val +} + +func (cmd *ScanDumpCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + cmd.val = ScanDump{} + for i := 0; i < n; i++ { + iter, err := rd.ReadInt() + if err != nil { + return err + } + data, err := rd.ReadString() + if err != nil { + return err + } + cmd.val.Data = data + cmd.val.Iter = iter + + } + + return nil +} + +// Returns information about a Bloom filter. +// For more information - https://redis.io/commands/bf.info/ +func (c cmdable) BFInfo(ctx context.Context, key string) *BFInfoCmd { + args := []interface{}{"BF.INFO", key} + cmd := NewBFInfoCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type BFInfo struct { + Capacity int64 + Size int64 + Filters int64 + ItemsInserted int64 + ExpansionRate int64 +} + +type BFInfoCmd struct { + baseCmd + + val BFInfo +} + +func NewBFInfoCmd(ctx context.Context, args ...interface{}) *BFInfoCmd { + return &BFInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BFInfoCmd) SetVal(val BFInfo) { + cmd.val = val +} +func (cmd *BFInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BFInfoCmd) Val() BFInfo { + return cmd.val +} + +func (cmd *BFInfoCmd) Result() (BFInfo, error) { + return cmd.val, cmd.err +} + +func (cmd *BFInfoCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + var result BFInfo + for f := 0; f < n; f++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "Capacity": + result.Capacity, err = rd.ReadInt() + case "Size": + result.Size, err = rd.ReadInt() + case "Number of filters": + result.Filters, err = rd.ReadInt() + case "Number of items inserted": + result.ItemsInserted, err = rd.ReadInt() + case "Expansion rate": + result.ExpansionRate, err = rd.ReadInt() + default: + return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key) + } + + if err != nil { + return err + } + } + + cmd.val = result + return nil +} + +// BFInfoCapacity returns information about the capacity of a Bloom filter. +// For more information - https://redis.io/commands/bf.info/ +func (c cmdable) BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd { + return c.BFInfoArg(ctx, key, "CAPACITY") +} + +// BFInfoSize returns information about the size of a Bloom filter. +// For more information - https://redis.io/commands/bf.info/ +func (c cmdable) BFInfoSize(ctx context.Context, key string) *BFInfoCmd { + return c.BFInfoArg(ctx, key, "SIZE") +} + +// BFInfoFilters returns information about the filters of a Bloom filter. +// For more information - https://redis.io/commands/bf.info/ +func (c cmdable) BFInfoFilters(ctx context.Context, key string) *BFInfoCmd { + return c.BFInfoArg(ctx, key, "FILTERS") +} + +// BFInfoItems returns information about the items of a Bloom filter. +// For more information - https://redis.io/commands/bf.info/ +func (c cmdable) BFInfoItems(ctx context.Context, key string) *BFInfoCmd { + return c.BFInfoArg(ctx, key, "ITEMS") +} + +// BFInfoExpansion returns information about the expansion rate of a Bloom filter. +// For more information - https://redis.io/commands/bf.info/ +func (c cmdable) BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd { + return c.BFInfoArg(ctx, key, "EXPANSION") +} + +// BFInfoArg returns information about a specific option of a Bloom filter. +// For more information - https://redis.io/commands/bf.info/ +func (c cmdable) BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd { + args := []interface{}{"BF.INFO", key, option} + cmd := NewBFInfoCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// BFInsert inserts elements into a Bloom filter. +// This function also allows for specifying additional options such as: +// capacity, error rate, expansion rate, and non-scaling behavior. +// For more information - https://redis.io/commands/bf.insert/ +func (c cmdable) BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd { + args := []interface{}{"BF.INSERT", key} + if options != nil { + if options.Capacity != 0 { + args = append(args, "CAPACITY", options.Capacity) + } + if options.Error != 0 { + args = append(args, "ERROR", options.Error) + } + if options.Expansion != 0 { + args = append(args, "EXPANSION", options.Expansion) + } + if options.NoCreate { + args = append(args, "NOCREATE") + } + if options.NonScaling { + args = append(args, "NONSCALING") + } + } + args = append(args, "ITEMS") + args = append(args, elements...) + + cmd := NewBoolSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// BFMAdd adds multiple elements to a Bloom filter. +// Returns an array of booleans indicating whether each element was added to the filter or not. +// For more information - https://redis.io/commands/bf.madd/ +func (c cmdable) BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd { + args := []interface{}{"BF.MADD", key} + args = append(args, elements...) + cmd := NewBoolSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// BFMExists check if multiple elements exist in a Bloom filter. +// Returns an array of booleans indicating whether each element exists in the filter or not. +// For more information - https://redis.io/commands/bf.mexists/ +func (c cmdable) BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd { + args := []interface{}{"BF.MEXISTS", key} + args = append(args, elements...) + + cmd := NewBoolSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ------------------------------------------- +// Cuckoo filter commands +//------------------------------------------- + +// CFReserve creates an empty Cuckoo filter with the specified capacity. +// For more information - https://redis.io/commands/cf.reserve/ +func (c cmdable) CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd { + args := []interface{}{"CF.RESERVE", key, capacity} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFReserveExpansion creates an empty Cuckoo filter with the specified capacity and expansion rate. +// For more information - https://redis.io/commands/cf.reserve/ +func (c cmdable) CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd { + args := []interface{}{"CF.RESERVE", key, capacity, "EXPANSION", expansion} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFReserveBucketSize creates an empty Cuckoo filter with the specified capacity and bucket size. +// For more information - https://redis.io/commands/cf.reserve/ +func (c cmdable) CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd { + args := []interface{}{"CF.RESERVE", key, capacity, "BUCKETSIZE", bucketsize} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFReserveMaxIterations creates an empty Cuckoo filter with the specified capacity and maximum number of iterations. +// For more information - https://redis.io/commands/cf.reserve/ +func (c cmdable) CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd { + args := []interface{}{"CF.RESERVE", key, capacity, "MAXITERATIONS", maxiterations} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFReserveArgs creates an empty Cuckoo filter with the specified options. +// This function allows for specifying additional options such as bucket size and maximum number of iterations. +// For more information - https://redis.io/commands/cf.reserve/ +func (c cmdable) CFReserveArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd { + args := []interface{}{"CF.RESERVE", key, options.Capacity} + if options.BucketSize != 0 { + args = append(args, "BUCKETSIZE", options.BucketSize) + } + if options.MaxIterations != 0 { + args = append(args, "MAXITERATIONS", options.MaxIterations) + } + if options.Expansion != 0 { + args = append(args, "EXPANSION", options.Expansion) + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFAdd adds an element to a Cuckoo filter. +// Returns true if the element was added to the filter or false if it already exists in the filter. +// For more information - https://redis.io/commands/cf.add/ +func (c cmdable) CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd { + args := []interface{}{"CF.ADD", key, element} + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFAddNX adds an element to a Cuckoo filter only if it does not already exist in the filter. +// Returns true if the element was added to the filter or false if it already exists in the filter. +// For more information - https://redis.io/commands/cf.addnx/ +func (c cmdable) CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd { + args := []interface{}{"CF.ADDNX", key, element} + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFCount returns an estimate of the number of times an element may be in a Cuckoo Filter. +// For more information - https://redis.io/commands/cf.count/ +func (c cmdable) CFCount(ctx context.Context, key string, element interface{}) *IntCmd { + args := []interface{}{"CF.COUNT", key, element} + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFDel deletes an item once from the cuckoo filter. +// For more information - https://redis.io/commands/cf.del/ +func (c cmdable) CFDel(ctx context.Context, key string, element interface{}) *BoolCmd { + args := []interface{}{"CF.DEL", key, element} + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFExists determines whether an item may exist in the Cuckoo Filter or not. +// For more information - https://redis.io/commands/cf.exists/ +func (c cmdable) CFExists(ctx context.Context, key string, element interface{}) *BoolCmd { + args := []interface{}{"CF.EXISTS", key, element} + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFLoadChunk restores a filter previously saved using SCANDUMP. +// For more information - https://redis.io/commands/cf.loadchunk/ +func (c cmdable) CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd { + args := []interface{}{"CF.LOADCHUNK", key, iterator, data} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFScanDump begins an incremental save of the cuckoo filter. +// For more information - https://redis.io/commands/cf.scandump/ +func (c cmdable) CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd { + args := []interface{}{"CF.SCANDUMP", key, iterator} + cmd := newScanDumpCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type CFInfo struct { + Size int64 + NumBuckets int64 + NumFilters int64 + NumItemsInserted int64 + NumItemsDeleted int64 + BucketSize int64 + ExpansionRate int64 + MaxIteration int64 +} + +type CFInfoCmd struct { + baseCmd + + val CFInfo +} + +func NewCFInfoCmd(ctx context.Context, args ...interface{}) *CFInfoCmd { + return &CFInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *CFInfoCmd) SetVal(val CFInfo) { + cmd.val = val +} + +func (cmd *CFInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CFInfoCmd) Val() CFInfo { + return cmd.val +} + +func (cmd *CFInfoCmd) Result() (CFInfo, error) { + return cmd.val, cmd.err +} + +func (cmd *CFInfoCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + var result CFInfo + for f := 0; f < n; f++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "Size": + result.Size, err = rd.ReadInt() + case "Number of buckets": + result.NumBuckets, err = rd.ReadInt() + case "Number of filters": + result.NumFilters, err = rd.ReadInt() + case "Number of items inserted": + result.NumItemsInserted, err = rd.ReadInt() + case "Number of items deleted": + result.NumItemsDeleted, err = rd.ReadInt() + case "Bucket size": + result.BucketSize, err = rd.ReadInt() + case "Expansion rate": + result.ExpansionRate, err = rd.ReadInt() + case "Max iterations": + result.MaxIteration, err = rd.ReadInt() + + default: + return fmt.Errorf("redis: CF.INFO unexpected key %s", key) + } + + if err != nil { + return err + } + } + + cmd.val = result + return nil +} + +// CFInfo returns information about a Cuckoo filter. +// For more information - https://redis.io/commands/cf.info/ +func (c cmdable) CFInfo(ctx context.Context, key string) *CFInfoCmd { + args := []interface{}{"CF.INFO", key} + cmd := NewCFInfoCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFInsert inserts elements into a Cuckoo filter. +// This function also allows for specifying additional options such as capacity, error rate, expansion rate, and non-scaling behavior. +// Returns an array of booleans indicating whether each element was added to the filter or not. +// For more information - https://redis.io/commands/cf.insert/ +func (c cmdable) CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd { + args := []interface{}{"CF.INSERT", key} + args = c.getCfInsertArgs(args, options, elements...) + + cmd := NewBoolSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CFInsertNX inserts elements into a Cuckoo filter only if they do not already exist in the filter. +// This function also allows for specifying additional options such as: +// capacity, error rate, expansion rate, and non-scaling behavior. +// Returns an array of integers indicating whether each element was added to the filter or not. +// For more information - https://redis.io/commands/cf.insertnx/ +func (c cmdable) CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd { + args := []interface{}{"CF.INSERTNX", key} + args = c.getCfInsertArgs(args, options, elements...) + + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) getCfInsertArgs(args []interface{}, options *CFInsertOptions, elements ...interface{}) []interface{} { + if options != nil { + if options.Capacity != 0 { + args = append(args, "CAPACITY", options.Capacity) + } + if options.NoCreate { + args = append(args, "NOCREATE") + } + } + args = append(args, "ITEMS") + args = append(args, elements...) + + return args +} + +// CFMExists check if multiple elements exist in a Cuckoo filter. +// Returns an array of booleans indicating whether each element exists in the filter or not. +// For more information - https://redis.io/commands/cf.mexists/ +func (c cmdable) CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd { + args := []interface{}{"CF.MEXISTS", key} + args = append(args, elements...) + cmd := NewBoolSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ------------------------------------------- +// CMS commands +//------------------------------------------- + +// CMSIncrBy increments the count of one or more items in a Count-Min Sketch filter. +// Returns an array of integers representing the updated count of each item. +// For more information - https://redis.io/commands/cms.incrby/ +func (c cmdable) CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd { + args := make([]interface{}, 2, 2+len(elements)) + args[0] = "CMS.INCRBY" + args[1] = key + args = appendArgs(args, elements) + + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type CMSInfo struct { + Width int64 + Depth int64 + Count int64 +} + +type CMSInfoCmd struct { + baseCmd + + val CMSInfo +} + +func NewCMSInfoCmd(ctx context.Context, args ...interface{}) *CMSInfoCmd { + return &CMSInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *CMSInfoCmd) SetVal(val CMSInfo) { + cmd.val = val +} + +func (cmd *CMSInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CMSInfoCmd) Val() CMSInfo { + return cmd.val +} + +func (cmd *CMSInfoCmd) Result() (CMSInfo, error) { + return cmd.val, cmd.err +} + +func (cmd *CMSInfoCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + var result CMSInfo + for f := 0; f < n; f++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "width": + result.Width, err = rd.ReadInt() + case "depth": + result.Depth, err = rd.ReadInt() + case "count": + result.Count, err = rd.ReadInt() + default: + return fmt.Errorf("redis: CMS.INFO unexpected key %s", key) + } + + if err != nil { + return err + } + } + + cmd.val = result + return nil +} + +// CMSInfo returns information about a Count-Min Sketch filter. +// For more information - https://redis.io/commands/cms.info/ +func (c cmdable) CMSInfo(ctx context.Context, key string) *CMSInfoCmd { + args := []interface{}{"CMS.INFO", key} + cmd := NewCMSInfoCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CMSInitByDim creates an empty Count-Min Sketch filter with the specified dimensions. +// For more information - https://redis.io/commands/cms.initbydim/ +func (c cmdable) CMSInitByDim(ctx context.Context, key string, width, depth int64) *StatusCmd { + args := []interface{}{"CMS.INITBYDIM", key, width, depth} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CMSInitByProb creates an empty Count-Min Sketch filter with the specified error rate and probability. +// For more information - https://redis.io/commands/cms.initbyprob/ +func (c cmdable) CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd { + args := []interface{}{"CMS.INITBYPROB", key, errorRate, probability} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CMSMerge merges multiple Count-Min Sketch filters into a single filter. +// The destination filter must not exist and will be created with the dimensions of the first source filter. +// The number of items in each source filter must be equal. +// Returns OK on success or an error if the filters could not be merged. +// For more information - https://redis.io/commands/cms.merge/ +func (c cmdable) CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd { + args := []interface{}{"CMS.MERGE", destKey, len(sourceKeys)} + for _, s := range sourceKeys { + args = append(args, s) + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CMSMergeWithWeight merges multiple Count-Min Sketch filters into a single filter with weights for each source filter. +// The destination filter must not exist and will be created with the dimensions of the first source filter. +// The number of items in each source filter must be equal. +// Returns OK on success or an error if the filters could not be merged. +// For more information - https://redis.io/commands/cms.merge/ +func (c cmdable) CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd { + args := make([]interface{}, 0, 4+(len(sourceKeys)*2+1)) + args = append(args, "CMS.MERGE", destKey, len(sourceKeys)) + + if len(sourceKeys) > 0 { + sk := make([]interface{}, len(sourceKeys)) + sw := make([]interface{}, len(sourceKeys)) + + i := 0 + for k, w := range sourceKeys { + sk[i] = k + sw[i] = w + i++ + } + + args = append(args, sk...) + args = append(args, "WEIGHTS") + args = append(args, sw...) + } + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// CMSQuery returns count for item(s). +// For more information - https://redis.io/commands/cms.query/ +func (c cmdable) CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd { + args := []interface{}{"CMS.QUERY", key} + args = append(args, elements...) + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ------------------------------------------- +// TopK commands +//-------------------------------------------- + +// TopKAdd adds one or more elements to a Top-K filter. +// Returns an array of strings representing the items that were removed from the filter, if any. +// For more information - https://redis.io/commands/topk.add/ +func (c cmdable) TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd { + args := make([]interface{}, 2, 2+len(elements)) + args[0] = "TOPK.ADD" + args[1] = key + args = appendArgs(args, elements) + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TopKReserve creates an empty Top-K filter with the specified number of top items to keep. +// For more information - https://redis.io/commands/topk.reserve/ +func (c cmdable) TopKReserve(ctx context.Context, key string, k int64) *StatusCmd { + args := []interface{}{"TOPK.RESERVE", key, k} + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TopKReserveWithOptions creates an empty Top-K filter with the specified number of top items to keep and additional options. +// This function allows for specifying additional options such as width, depth and decay. +// For more information - https://redis.io/commands/topk.reserve/ +func (c cmdable) TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd { + args := []interface{}{"TOPK.RESERVE", key, k, width, depth, decay} + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type TopKInfo struct { + K int64 + Width int64 + Depth int64 + Decay float64 +} + +type TopKInfoCmd struct { + baseCmd + + val TopKInfo +} + +func NewTopKInfoCmd(ctx context.Context, args ...interface{}) *TopKInfoCmd { + return &TopKInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *TopKInfoCmd) SetVal(val TopKInfo) { + cmd.val = val +} + +func (cmd *TopKInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TopKInfoCmd) Val() TopKInfo { + return cmd.val +} + +func (cmd *TopKInfoCmd) Result() (TopKInfo, error) { + return cmd.val, cmd.err +} + +func (cmd *TopKInfoCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + var result TopKInfo + for f := 0; f < n; f++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "k": + result.K, err = rd.ReadInt() + case "width": + result.Width, err = rd.ReadInt() + case "depth": + result.Depth, err = rd.ReadInt() + case "decay": + result.Decay, err = rd.ReadFloat() + default: + return fmt.Errorf("redis: topk.info unexpected key %s", key) + } + + if err != nil { + return err + } + } + + cmd.val = result + return nil +} + +// TopKInfo returns information about a Top-K filter. +// For more information - https://redis.io/commands/topk.info/ +func (c cmdable) TopKInfo(ctx context.Context, key string) *TopKInfoCmd { + args := []interface{}{"TOPK.INFO", key} + + cmd := NewTopKInfoCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TopKQuery check if multiple elements exist in a Top-K filter. +// Returns an array of booleans indicating whether each element exists in the filter or not. +// For more information - https://redis.io/commands/topk.query/ +func (c cmdable) TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd { + args := make([]interface{}, 2, 2+len(elements)) + args[0] = "TOPK.QUERY" + args[1] = key + args = appendArgs(args, elements) + + cmd := NewBoolSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TopKCount returns an estimate of the number of times an item may be in a Top-K filter. +// For more information - https://redis.io/commands/topk.count/ +func (c cmdable) TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd { + args := make([]interface{}, 2, 2+len(elements)) + args[0] = "TOPK.COUNT" + args[1] = key + args = appendArgs(args, elements) + + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TopKIncrBy increases the count of one or more items in a Top-K filter. +// For more information - https://redis.io/commands/topk.incrby/ +func (c cmdable) TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd { + args := make([]interface{}, 2, 2+len(elements)) + args[0] = "TOPK.INCRBY" + args[1] = key + args = appendArgs(args, elements) + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TopKList returns all items in Top-K list. +// For more information - https://redis.io/commands/topk.list/ +func (c cmdable) TopKList(ctx context.Context, key string) *StringSliceCmd { + args := []interface{}{"TOPK.LIST", key} + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TopKListWithCount returns all items in Top-K list with their respective count. +// For more information - https://redis.io/commands/topk.list/ +func (c cmdable) TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd { + args := []interface{}{"TOPK.LIST", key, "WITHCOUNT"} + + cmd := NewMapStringIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ------------------------------------------- +// t-digest commands +// -------------------------------------------- + +// TDigestAdd adds one or more elements to a t-Digest data structure. +// Returns OK on success or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.add/ +func (c cmdable) TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd { + args := make([]interface{}, 2, 2+len(elements)) + args[0] = "TDIGEST.ADD" + args[1] = key + + // Convert floatSlice to []interface{} + interfaceSlice := make([]interface{}, len(elements)) + for i, v := range elements { + interfaceSlice[i] = v + } + + args = append(args, interfaceSlice...) + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestByRank returns an array of values from a t-Digest data structure based on their rank. +// The rank of an element is its position in the sorted list of all elements in the t-Digest. +// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.byrank/ +func (c cmdable) TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd { + args := make([]interface{}, 2, 2+len(rank)) + args[0] = "TDIGEST.BYRANK" + args[1] = key + + // Convert uint slice to []interface{} + interfaceSlice := make([]interface{}, len(rank)) + for i, v := range rank { + interfaceSlice[i] = v + } + + args = append(args, interfaceSlice...) + + cmd := NewFloatSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestByRevRank returns an array of values from a t-Digest data structure based on their reverse rank. +// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order. +// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.byrevrank/ +func (c cmdable) TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd { + args := make([]interface{}, 2, 2+len(rank)) + args[0] = "TDIGEST.BYREVRANK" + args[1] = key + + // Convert uint slice to []interface{} + interfaceSlice := make([]interface{}, len(rank)) + for i, v := range rank { + interfaceSlice[i] = v + } + + args = append(args, interfaceSlice...) + + cmd := NewFloatSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestCDF returns an array of cumulative distribution function (CDF) values for one or more elements in a t-Digest data structure. +// The CDF value for an element is the fraction of all elements in the t-Digest that are less than or equal to it. +// Returns an array of floats representing the CDF values for each element or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.cdf/ +func (c cmdable) TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd { + args := make([]interface{}, 2, 2+len(elements)) + args[0] = "TDIGEST.CDF" + args[1] = key + + // Convert floatSlice to []interface{} + interfaceSlice := make([]interface{}, len(elements)) + for i, v := range elements { + interfaceSlice[i] = v + } + + args = append(args, interfaceSlice...) + + cmd := NewFloatSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestCreate creates an empty t-Digest data structure with default parameters. +// Returns OK on success or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.create/ +func (c cmdable) TDigestCreate(ctx context.Context, key string) *StatusCmd { + args := []interface{}{"TDIGEST.CREATE", key} + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestCreateWithCompression creates an empty t-Digest data structure with a specified compression parameter. +// The compression parameter controls the accuracy and memory usage of the t-Digest. +// Returns OK on success or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.create/ +func (c cmdable) TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd { + args := []interface{}{"TDIGEST.CREATE", key, "COMPRESSION", compression} + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type TDigestInfo struct { + Compression int64 + Capacity int64 + MergedNodes int64 + UnmergedNodes int64 + MergedWeight int64 + UnmergedWeight int64 + Observations int64 + TotalCompressions int64 + MemoryUsage int64 +} + +type TDigestInfoCmd struct { + baseCmd + + val TDigestInfo +} + +func NewTDigestInfoCmd(ctx context.Context, args ...interface{}) *TDigestInfoCmd { + return &TDigestInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *TDigestInfoCmd) SetVal(val TDigestInfo) { + cmd.val = val +} + +func (cmd *TDigestInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TDigestInfoCmd) Val() TDigestInfo { + return cmd.val +} + +func (cmd *TDigestInfoCmd) Result() (TDigestInfo, error) { + return cmd.val, cmd.err +} + +func (cmd *TDigestInfoCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + var result TDigestInfo + for f := 0; f < n; f++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "Compression": + result.Compression, err = rd.ReadInt() + case "Capacity": + result.Capacity, err = rd.ReadInt() + case "Merged nodes": + result.MergedNodes, err = rd.ReadInt() + case "Unmerged nodes": + result.UnmergedNodes, err = rd.ReadInt() + case "Merged weight": + result.MergedWeight, err = rd.ReadInt() + case "Unmerged weight": + result.UnmergedWeight, err = rd.ReadInt() + case "Observations": + result.Observations, err = rd.ReadInt() + case "Total compressions": + result.TotalCompressions, err = rd.ReadInt() + case "Memory usage": + result.MemoryUsage, err = rd.ReadInt() + default: + return fmt.Errorf("redis: tdigest.info unexpected key %s", key) + } + + if err != nil { + return err + } + } + + cmd.val = result + return nil +} + +// TDigestInfo returns information about a t-Digest data structure. +// For more information - https://redis.io/commands/tdigest.info/ +func (c cmdable) TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd { + args := []interface{}{"TDIGEST.INFO", key} + + cmd := NewTDigestInfoCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestMax returns the maximum value from a t-Digest data structure. +// For more information - https://redis.io/commands/tdigest.max/ +func (c cmdable) TDigestMax(ctx context.Context, key string) *FloatCmd { + args := []interface{}{"TDIGEST.MAX", key} + + cmd := NewFloatCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type TDigestMergeOptions struct { + Compression int64 + Override bool +} + +// TDigestMerge merges multiple t-Digest data structures into a single t-Digest. +// This function also allows for specifying additional options such as compression and override behavior. +// Returns OK on success or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.merge/ +func (c cmdable) TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd { + args := []interface{}{"TDIGEST.MERGE", destKey, len(sourceKeys)} + + for _, sourceKey := range sourceKeys { + args = append(args, sourceKey) + } + + if options != nil { + if options.Compression != 0 { + args = append(args, "COMPRESSION", options.Compression) + } + if options.Override { + args = append(args, "OVERRIDE") + } + } + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestMin returns the minimum value from a t-Digest data structure. +// For more information - https://redis.io/commands/tdigest.min/ +func (c cmdable) TDigestMin(ctx context.Context, key string) *FloatCmd { + args := []interface{}{"TDIGEST.MIN", key} + + cmd := NewFloatCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestQuantile returns an array of quantile values for one or more elements in a t-Digest data structure. +// The quantile value for an element is the fraction of all elements in the t-Digest that are less than or equal to it. +// Returns an array of floats representing the quantile values for each element or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.quantile/ +func (c cmdable) TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd { + args := make([]interface{}, 2, 2+len(elements)) + args[0] = "TDIGEST.QUANTILE" + args[1] = key + + // Convert floatSlice to []interface{} + interfaceSlice := make([]interface{}, len(elements)) + for i, v := range elements { + interfaceSlice[i] = v + } + + args = append(args, interfaceSlice...) + + cmd := NewFloatSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestRank returns an array of rank values for one or more elements in a t-Digest data structure. +// The rank of an element is its position in the sorted list of all elements in the t-Digest. +// Returns an array of integers representing the rank values for each element or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.rank/ +func (c cmdable) TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "TDIGEST.RANK" + args[1] = key + + // Convert floatSlice to []interface{} + interfaceSlice := make([]interface{}, len(values)) + for i, v := range values { + interfaceSlice[i] = v + } + + args = append(args, interfaceSlice...) + + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestReset resets a t-Digest data structure to its initial state. +// Returns OK on success or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.reset/ +func (c cmdable) TDigestReset(ctx context.Context, key string) *StatusCmd { + args := []interface{}{"TDIGEST.RESET", key} + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestRevRank returns an array of reverse rank values for one or more elements in a t-Digest data structure. +// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order. +// Returns an array of integers representing the reverse rank values for each element or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.revrank/ +func (c cmdable) TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "TDIGEST.REVRANK" + args[1] = key + + // Convert floatSlice to []interface{} + interfaceSlice := make([]interface{}, len(values)) + for i, v := range values { + interfaceSlice[i] = v + } + + args = append(args, interfaceSlice...) + + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TDigestTrimmedMean returns the trimmed mean value from a t-Digest data structure. +// The trimmed mean is calculated by removing a specified fraction of the highest and lowest values from the t-Digest and then calculating the mean of the remaining values. +// Returns a float representing the trimmed mean value or an error if the operation could not be completed. +// For more information - https://redis.io/commands/tdigest.trimmed_mean/ +func (c cmdable) TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd { + args := []interface{}{"TDIGEST.TRIMMED_MEAN", key, lowCutQuantile, highCutQuantile} + + cmd := NewFloatCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/redis_gears.go b/vendor/github.com/redis/go-redis/v9/redis_gears.go new file mode 100644 index 00000000..5fafea40 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/redis_gears.go @@ -0,0 +1,161 @@ +package redis + +import ( + "context" + "fmt" + "strings" +) + +type gearsCmdable interface { + TFunctionLoad(ctx context.Context, lib string) *StatusCmd + TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd + TFunctionDelete(ctx context.Context, libName string) *StatusCmd + TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd + TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd + TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd + TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd + TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd + TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd +} +type TFunctionLoadOptions struct { + Replace bool + Config string +} + +type TFunctionListOptions struct { + Withcode bool + Verbose int + Library string +} + +type TFCallOptions struct { + Keys []string + Arguments []string +} + +// TFunctionLoad - load a new JavaScript library into Redis. +// For more information - https://redis.io/commands/tfunction-load/ +func (c cmdable) TFunctionLoad(ctx context.Context, lib string) *StatusCmd { + args := []interface{}{"TFUNCTION", "LOAD", lib} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd { + args := []interface{}{"TFUNCTION", "LOAD"} + if options != nil { + if options.Replace { + args = append(args, "REPLACE") + } + if options.Config != "" { + args = append(args, "CONFIG", options.Config) + } + } + args = append(args, lib) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TFunctionDelete - delete a JavaScript library from Redis. +// For more information - https://redis.io/commands/tfunction-delete/ +func (c cmdable) TFunctionDelete(ctx context.Context, libName string) *StatusCmd { + args := []interface{}{"TFUNCTION", "DELETE", libName} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TFunctionList - list the functions with additional information about each function. +// For more information - https://redis.io/commands/tfunction-list/ +func (c cmdable) TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd { + args := []interface{}{"TFUNCTION", "LIST"} + cmd := NewMapStringInterfaceSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd { + args := []interface{}{"TFUNCTION", "LIST"} + if options != nil { + if options.Withcode { + args = append(args, "WITHCODE") + } + if options.Verbose != 0 { + v := strings.Repeat("v", options.Verbose) + args = append(args, v) + } + if options.Library != "" { + args = append(args, "LIBRARY", options.Library) + + } + } + cmd := NewMapStringInterfaceSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TFCall - invoke a function. +// For more information - https://redis.io/commands/tfcall/ +func (c cmdable) TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd { + lf := libName + "." + funcName + args := []interface{}{"TFCALL", lf, numKeys} + cmd := NewCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd { + lf := libName + "." + funcName + args := []interface{}{"TFCALL", lf, numKeys} + if options != nil { + if options.Keys != nil { + for _, key := range options.Keys { + + args = append(args, key) + } + } + if options.Arguments != nil { + for _, key := range options.Arguments { + + args = append(args, key) + } + } + } + cmd := NewCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// TFCallASYNC - invoke an asynchronous JavaScript function (coroutine). +// For more information - https://redis.io/commands/TFCallASYNC/ +func (c cmdable) TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd { + lf := fmt.Sprintf("%s.%s", libName, funcName) + args := []interface{}{"TFCALLASYNC", lf, numKeys} + cmd := NewCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd { + lf := fmt.Sprintf("%s.%s", libName, funcName) + args := []interface{}{"TFCALLASYNC", lf, numKeys} + if options != nil { + if options.Keys != nil { + for _, key := range options.Keys { + + args = append(args, key) + } + } + if options.Arguments != nil { + for _, key := range options.Arguments { + + args = append(args, key) + } + } + } + cmd := NewCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/version.go b/vendor/github.com/redis/go-redis/v9/version.go index e81eb1ab..d68ab6e0 100644 --- a/vendor/github.com/redis/go-redis/v9/version.go +++ b/vendor/github.com/redis/go-redis/v9/version.go @@ -2,5 +2,5 @@ package redis // Version is the current release version. func Version() string { - return "9.0.5" + return "9.1.0" } diff --git a/vendor/github.com/rs/zerolog/.gitignore b/vendor/github.com/rs/zerolog/.gitignore deleted file mode 100644 index 8ebe58b1..00000000 --- a/vendor/github.com/rs/zerolog/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -tmp - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/rs/zerolog/CNAME b/vendor/github.com/rs/zerolog/CNAME deleted file mode 100644 index 9ce57a6e..00000000 --- a/vendor/github.com/rs/zerolog/CNAME +++ /dev/null @@ -1 +0,0 @@ -zerolog.io \ No newline at end of file diff --git a/vendor/github.com/rs/zerolog/LICENSE b/vendor/github.com/rs/zerolog/LICENSE deleted file mode 100644 index 677e07f7..00000000 --- a/vendor/github.com/rs/zerolog/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 Olivier Poitrey - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/rs/zerolog/README.md b/vendor/github.com/rs/zerolog/README.md deleted file mode 100644 index b83ae159..00000000 --- a/vendor/github.com/rs/zerolog/README.md +++ /dev/null @@ -1,786 +0,0 @@ -# Zero Allocation JSON Logger - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog) - -The zerolog package provides a fast and simple logger dedicated to JSON output. - -Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON (or CBOR) log events by avoiding allocations and reflection. - -Uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with a simpler to use API and even better performance. - -To keep the code base and the API simple, zerolog focuses on efficient structured logging only. Pretty logging on the console is made possible using the provided (but inefficient) [`zerolog.ConsoleWriter`](#pretty-logging). - -![Pretty Logging Image](pretty.png) - -## Who uses zerolog - -Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) and add your company / project to the list. - -## Features - -* [Blazing fast](#benchmarks) -* [Low to zero allocation](#benchmarks) -* [Leveled logging](#leveled-logging) -* [Sampling](#log-sampling) -* [Hooks](#hooks) -* [Contextual fields](#contextual-logging) -* [`context.Context` integration](#contextcontext-integration) -* [Integration with `net/http`](#integration-with-nethttp) -* [JSON and CBOR encoding formats](#binary-encoding) -* [Pretty logging for development](#pretty-logging) -* [Error Logging (with optional Stacktrace)](#error-logging) - -## Installation - -```bash -go get -u github.com/rs/zerolog/log -``` - -## Getting Started - -### Simple Logging Example - -For simple logging, import the global logger package **github.com/rs/zerolog/log** - -```go -package main - -import ( - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" -) - -func main() { - // UNIX Time is faster and smaller than most timestamps - zerolog.TimeFieldFormat = zerolog.TimeFormatUnix - - log.Print("hello world") -} - -// Output: {"time":1516134303,"level":"debug","message":"hello world"} -``` -> Note: By default log writes to `os.Stderr` -> Note: The default log level for `log.Print` is *debug* - -### Contextual Logging - -**zerolog** allows data to be added to log messages in the form of key:value pairs. The data added to the message adds "context" about the log event that can be critical for debugging as well as myriad other purposes. An example of this is below: - -```go -package main - -import ( - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" -) - -func main() { - zerolog.TimeFieldFormat = zerolog.TimeFormatUnix - - log.Debug(). - Str("Scale", "833 cents"). - Float64("Interval", 833.09). - Msg("Fibonacci is everywhere") - - log.Debug(). - Str("Name", "Tom"). - Send() -} - -// Output: {"level":"debug","Scale":"833 cents","Interval":833.09,"time":1562212768,"message":"Fibonacci is everywhere"} -// Output: {"level":"debug","Name":"Tom","time":1562212768} -``` - -> You'll note in the above example that when adding contextual fields, the fields are strongly typed. You can find the full list of supported fields [here](#standard-types) - -### Leveled Logging - -#### Simple Leveled Logging Example - -```go -package main - -import ( - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" -) - -func main() { - zerolog.TimeFieldFormat = zerolog.TimeFormatUnix - - log.Info().Msg("hello world") -} - -// Output: {"time":1516134303,"level":"info","message":"hello world"} -``` - -> It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this. - -**zerolog** allows for logging at the following levels (from highest to lowest): - -* panic (`zerolog.PanicLevel`, 5) -* fatal (`zerolog.FatalLevel`, 4) -* error (`zerolog.ErrorLevel`, 3) -* warn (`zerolog.WarnLevel`, 2) -* info (`zerolog.InfoLevel`, 1) -* debug (`zerolog.DebugLevel`, 0) -* trace (`zerolog.TraceLevel`, -1) - -You can set the Global logging level to any of these options using the `SetGlobalLevel` function in the zerolog package, passing in one of the given constants above, e.g. `zerolog.InfoLevel` would be the "info" level. Whichever level is chosen, all logs with a level greater than or equal to that level will be written. To turn off logging entirely, pass the `zerolog.Disabled` constant. - -#### Setting Global Log Level - -This example uses command-line flags to demonstrate various outputs depending on the chosen log level. - -```go -package main - -import ( - "flag" - - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" -) - -func main() { - zerolog.TimeFieldFormat = zerolog.TimeFormatUnix - debug := flag.Bool("debug", false, "sets log level to debug") - - flag.Parse() - - // Default level for this example is info, unless debug flag is present - zerolog.SetGlobalLevel(zerolog.InfoLevel) - if *debug { - zerolog.SetGlobalLevel(zerolog.DebugLevel) - } - - log.Debug().Msg("This message appears only when log level set to Debug") - log.Info().Msg("This message appears when log level set to Debug or Info") - - if e := log.Debug(); e.Enabled() { - // Compute log output only if enabled. - value := "bar" - e.Str("foo", value).Msg("some debug message") - } -} -``` - -Info Output (no flag) - -```bash -$ ./logLevelExample -{"time":1516387492,"level":"info","message":"This message appears when log level set to Debug or Info"} -``` - -Debug Output (debug flag set) - -```bash -$ ./logLevelExample -debug -{"time":1516387573,"level":"debug","message":"This message appears only when log level set to Debug"} -{"time":1516387573,"level":"info","message":"This message appears when log level set to Debug or Info"} -{"time":1516387573,"level":"debug","foo":"bar","message":"some debug message"} -``` - -#### Logging without Level or Message - -You may choose to log without a specific level by using the `Log` method. You may also write without a message by setting an empty string in the `msg string` parameter of the `Msg` method. Both are demonstrated in the example below. - -```go -package main - -import ( - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" -) - -func main() { - zerolog.TimeFieldFormat = zerolog.TimeFormatUnix - - log.Log(). - Str("foo", "bar"). - Msg("") -} - -// Output: {"time":1494567715,"foo":"bar"} -``` - -### Error Logging - -You can log errors using the `Err` method - -```go -package main - -import ( - "errors" - - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" -) - -func main() { - zerolog.TimeFieldFormat = zerolog.TimeFormatUnix - - err := errors.New("seems we have an error here") - log.Error().Err(err).Msg("") -} - -// Output: {"level":"error","error":"seems we have an error here","time":1609085256} -``` - -> The default field name for errors is `error`, you can change this by setting `zerolog.ErrorFieldName` to meet your needs. - -#### Error Logging with Stacktrace - -Using `github.com/pkg/errors`, you can add a formatted stacktrace to your errors. - -```go -package main - -import ( - "github.com/pkg/errors" - "github.com/rs/zerolog/pkgerrors" - - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" -) - -func main() { - zerolog.TimeFieldFormat = zerolog.TimeFormatUnix - zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack - - err := outer() - log.Error().Stack().Err(err).Msg("") -} - -func inner() error { - return errors.New("seems we have an error here") -} - -func middle() error { - err := inner() - if err != nil { - return err - } - return nil -} - -func outer() error { - err := middle() - if err != nil { - return err - } - return nil -} - -// Output: {"level":"error","stack":[{"func":"inner","line":"20","source":"errors.go"},{"func":"middle","line":"24","source":"errors.go"},{"func":"outer","line":"32","source":"errors.go"},{"func":"main","line":"15","source":"errors.go"},{"func":"main","line":"204","source":"proc.go"},{"func":"goexit","line":"1374","source":"asm_amd64.s"}],"error":"seems we have an error here","time":1609086683} -``` - -> zerolog.ErrorStackMarshaler must be set in order for the stack to output anything. - -#### Logging Fatal Messages - -```go -package main - -import ( - "errors" - - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" -) - -func main() { - err := errors.New("A repo man spends his life getting into tense situations") - service := "myservice" - - zerolog.TimeFieldFormat = zerolog.TimeFormatUnix - - log.Fatal(). - Err(err). - Str("service", service). - Msgf("Cannot start %s", service) -} - -// Output: {"time":1516133263,"level":"fatal","error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"} -// exit status 1 -``` - -> NOTE: Using `Msgf` generates one allocation even when the logger is disabled. - - -### Create logger instance to manage different outputs - -```go -logger := zerolog.New(os.Stderr).With().Timestamp().Logger() - -logger.Info().Str("foo", "bar").Msg("hello world") - -// Output: {"level":"info","time":1494567715,"message":"hello world","foo":"bar"} -``` - -### Sub-loggers let you chain loggers with additional context - -```go -sublogger := log.With(). - Str("component", "foo"). - Logger() -sublogger.Info().Msg("hello world") - -// Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"} -``` - -### Pretty logging - -To log a human-friendly, colorized output, use `zerolog.ConsoleWriter`: - -```go -log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) - -log.Info().Str("foo", "bar").Msg("Hello world") - -// Output: 3:04PM INF Hello World foo=bar -``` - -To customize the configuration and formatting: - -```go -output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339} -output.FormatLevel = func(i interface{}) string { - return strings.ToUpper(fmt.Sprintf("| %-6s|", i)) -} -output.FormatMessage = func(i interface{}) string { - return fmt.Sprintf("***%s****", i) -} -output.FormatFieldName = func(i interface{}) string { - return fmt.Sprintf("%s:", i) -} -output.FormatFieldValue = func(i interface{}) string { - return strings.ToUpper(fmt.Sprintf("%s", i)) -} - -log := zerolog.New(output).With().Timestamp().Logger() - -log.Info().Str("foo", "bar").Msg("Hello World") - -// Output: 2006-01-02T15:04:05Z07:00 | INFO | ***Hello World**** foo:BAR -``` - -### Sub dictionary - -```go -log.Info(). - Str("foo", "bar"). - Dict("dict", zerolog.Dict(). - Str("bar", "baz"). - Int("n", 1), - ).Msg("hello world") - -// Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"} -``` - -### Customize automatic field names - -```go -zerolog.TimestampFieldName = "t" -zerolog.LevelFieldName = "l" -zerolog.MessageFieldName = "m" - -log.Info().Msg("hello world") - -// Output: {"l":"info","t":1494567715,"m":"hello world"} -``` - -### Add contextual fields to the global logger - -```go -log.Logger = log.With().Str("foo", "bar").Logger() -``` - -### Add file and line number to log - -Equivalent of `Llongfile`: - -```go -log.Logger = log.With().Caller().Logger() -log.Info().Msg("hello world") - -// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"} -``` - -Equivalent of `Lshortfile`: - -```go -zerolog.CallerMarshalFunc = func(pc uintptr, file string, line int) string { - short := file - for i := len(file) - 1; i > 0; i-- { - if file[i] == '/' { - short = file[i+1:] - break - } - } - file = short - return file + ":" + strconv.Itoa(line) -} -log.Logger = log.With().Caller().Logger() -log.Info().Msg("hello world") - -// Output: {"level": "info", "message": "hello world", "caller": "some_file:21"} -``` - -### Thread-safe, lock-free, non-blocking writer - -If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follows: - -```go -wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) { - fmt.Printf("Logger Dropped %d messages", missed) - }) -log := zerolog.New(wr) -log.Print("test") -``` - -You will need to install `code.cloudfoundry.org/go-diodes` to use this feature. - -### Log Sampling - -```go -sampled := log.Sample(&zerolog.BasicSampler{N: 10}) -sampled.Info().Msg("will be logged every 10 messages") - -// Output: {"time":1494567715,"level":"info","message":"will be logged every 10 messages"} -``` - -More advanced sampling: - -```go -// Will let 5 debug messages per period of 1 second. -// Over 5 debug message, 1 every 100 debug messages are logged. -// Other levels are not sampled. -sampled := log.Sample(zerolog.LevelSampler{ - DebugSampler: &zerolog.BurstSampler{ - Burst: 5, - Period: 1*time.Second, - NextSampler: &zerolog.BasicSampler{N: 100}, - }, -}) -sampled.Debug().Msg("hello world") - -// Output: {"time":1494567715,"level":"debug","message":"hello world"} -``` - -### Hooks - -```go -type SeverityHook struct{} - -func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { - if level != zerolog.NoLevel { - e.Str("severity", level.String()) - } -} - -hooked := log.Hook(SeverityHook{}) -hooked.Warn().Msg("") - -// Output: {"level":"warn","severity":"warn"} -``` - -### Pass a sub-logger by context - -```go -ctx := log.With().Str("component", "module").Logger().WithContext(ctx) - -log.Ctx(ctx).Info().Msg("hello world") - -// Output: {"component":"module","level":"info","message":"hello world"} -``` - -### Set as standard logger output - -```go -stdlog := zerolog.New(os.Stdout).With(). - Str("foo", "bar"). - Logger() - -stdlog.SetFlags(0) -stdlog.SetOutput(log) - -stdlog.Print("hello world") - -// Output: {"foo":"bar","message":"hello world"} -``` - -### context.Context integration - -Go contexts are commonly passed throughout Go code, and this can help you pass -your Logger into places it might otherwise be hard to inject. The `Logger` -instance may be attached to Go context (`context.Context`) using -`Logger.WithContext(ctx)` and extracted from it using `zerolog.Ctx(ctx)`. -For example: - -```go -func f() { - logger := zerolog.New(os.Stdout) - ctx := context.Background() - - // Attach the Logger to the context.Context - ctx = logger.WithContext(ctx) - someFunc(ctx) -} - -func someFunc(ctx context.Context) { - // Get Logger from the go Context. if it's nil, then - // `zerolog.DefaultContextLogger` is returned, if - // `DefaultContextLogger` is nil, then a disabled logger is returned. - logger := zerolog.Ctx(ctx) - logger.Info().Msg("Hello") -} -``` - -A second form of `context.Context` integration allows you to pass the current -context.Context into the logged event, and retrieve it from hooks. This can be -useful to log trace and span IDs or other information stored in the go context, -and facilitates the unification of logging and tracing in some systems: - -```go -type TracingHook struct{} - -func (h TracingHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { - ctx := e.Ctx() - spanId := getSpanIdFromContext(ctx) // as per your tracing framework - e.Str("span-id", spanId) -} - -func f() { - // Setup the logger - logger := zerolog.New(os.Stdout) - logger = logger.Hook(TracingHook{}) - - ctx := context.Background() - // Use the Ctx function to make the context available to the hook - logger.Info().Ctx(ctx).Msg("Hello") -} -``` - -### Integration with `net/http` - -The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`. - -In this example we use [alice](https://github.com/justinas/alice) to install logger for better readability. - -```go -log := zerolog.New(os.Stdout).With(). - Timestamp(). - Str("role", "my-service"). - Str("host", host). - Logger() - -c := alice.New() - -// Install the logger handler with default output on the console -c = c.Append(hlog.NewHandler(log)) - -// Install some provided extra handler to set some request's context fields. -// Thanks to that handler, all our logs will come with some prepopulated fields. -c = c.Append(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) { - hlog.FromRequest(r).Info(). - Str("method", r.Method). - Stringer("url", r.URL). - Int("status", status). - Int("size", size). - Dur("duration", duration). - Msg("") -})) -c = c.Append(hlog.RemoteAddrHandler("ip")) -c = c.Append(hlog.UserAgentHandler("user_agent")) -c = c.Append(hlog.RefererHandler("referer")) -c = c.Append(hlog.RequestIDHandler("req_id", "Request-Id")) - -// Here is your final handler -h := c.Then(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Get the logger from the request's context. You can safely assume it - // will be always there: if the handler is removed, hlog.FromRequest - // will return a no-op logger. - hlog.FromRequest(r).Info(). - Str("user", "current user"). - Str("status", "ok"). - Msg("Something happened") - - // Output: {"level":"info","time":"2001-02-03T04:05:06Z","role":"my-service","host":"local-hostname","req_id":"b4g0l5t6tfid6dtrapu0","user":"current user","status":"ok","message":"Something happened"} -})) -http.Handle("/", h) - -if err := http.ListenAndServe(":8080", nil); err != nil { - log.Fatal().Err(err).Msg("Startup failed") -} -``` - -## Multiple Log Output -`zerolog.MultiLevelWriter` may be used to send the log message to multiple outputs. -In this example, we send the log message to both `os.Stdout` and the in-built ConsoleWriter. -```go -func main() { - consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout} - - multi := zerolog.MultiLevelWriter(consoleWriter, os.Stdout) - - logger := zerolog.New(multi).With().Timestamp().Logger() - - logger.Info().Msg("Hello World!") -} - -// Output (Line 1: Console; Line 2: Stdout) -// 12:36PM INF Hello World! -// {"level":"info","time":"2019-11-07T12:36:38+03:00","message":"Hello World!"} -``` - -## Global Settings - -Some settings can be changed and will be applied to all loggers: - -* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods). -* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Call this with `zerolog.Disabled` to disable logging altogether (quiet mode). -* `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events. -* `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name. -* `zerolog.LevelFieldName`: Can be set to customize level field name. -* `zerolog.MessageFieldName`: Can be set to customize message field name. -* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name. -* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp. -* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`). -* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`). -* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking. - -## Field Types - -### Standard Types - -* `Str` -* `Bool` -* `Int`, `Int8`, `Int16`, `Int32`, `Int64` -* `Uint`, `Uint8`, `Uint16`, `Uint32`, `Uint64` -* `Float32`, `Float64` - -### Advanced Fields - -* `Err`: Takes an `error` and renders it as a string using the `zerolog.ErrorFieldName` field name. -* `Func`: Run a `func` only if the level is enabled. -* `Timestamp`: Inserts a timestamp field with `zerolog.TimestampFieldName` field name, formatted using `zerolog.TimeFieldFormat`. -* `Time`: Adds a field with time formatted with `zerolog.TimeFieldFormat`. -* `Dur`: Adds a field with `time.Duration`. -* `Dict`: Adds a sub-key/value as a field of the event. -* `RawJSON`: Adds a field with an already encoded JSON (`[]byte`) -* `Hex`: Adds a field with value formatted as a hexadecimal string (`[]byte`) -* `Interface`: Uses reflection to marshal the type. - -Most fields are also available in the slice format (`Strs` for `[]string`, `Errs` for `[]error` etc.) - -## Binary Encoding - -In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](https://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows: - -```bash -go build -tags binary_log . -``` - -To Decode binary encoded log files you can use any CBOR decoder. One has been tested to work -with zerolog library is [CSD](https://github.com/toravir/csd/). - -## Related Projects - -* [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog` -* [overlog](https://github.com/Trendyol/overlog): Implementation of `Mapped Diagnostic Context` interface using `zerolog` -* [zerologr](https://github.com/go-logr/zerologr): Implementation of `logr.LogSink` interface using `zerolog` - -## Benchmarks - -See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks. - -All operations are allocation free (those numbers *include* JSON encoding): - -```text -BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op -BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op -BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op -BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op -BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op -``` - -There are a few Go logging benchmarks and comparisons that include zerolog. - -* [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) -* [uber-common/zap](https://github.com/uber-go/zap#performance) - -Using Uber's zap comparison benchmark: - -Log a message and 10 fields: - -| Library | Time | Bytes Allocated | Objects Allocated | -| :--- | :---: | :---: | :---: | -| zerolog | 767 ns/op | 552 B/op | 6 allocs/op | -| :zap: zap | 848 ns/op | 704 B/op | 2 allocs/op | -| :zap: zap (sugared) | 1363 ns/op | 1610 B/op | 20 allocs/op | -| go-kit | 3614 ns/op | 2895 B/op | 66 allocs/op | -| lion | 5392 ns/op | 5807 B/op | 63 allocs/op | -| logrus | 5661 ns/op | 6092 B/op | 78 allocs/op | -| apex/log | 15332 ns/op | 3832 B/op | 65 allocs/op | -| log15 | 20657 ns/op | 5632 B/op | 93 allocs/op | - -Log a message with a logger that already has 10 fields of context: - -| Library | Time | Bytes Allocated | Objects Allocated | -| :--- | :---: | :---: | :---: | -| zerolog | 52 ns/op | 0 B/op | 0 allocs/op | -| :zap: zap | 283 ns/op | 0 B/op | 0 allocs/op | -| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | -| lion | 2702 ns/op | 4074 B/op | 38 allocs/op | -| go-kit | 3378 ns/op | 3046 B/op | 52 allocs/op | -| logrus | 4309 ns/op | 4564 B/op | 63 allocs/op | -| apex/log | 13456 ns/op | 2898 B/op | 51 allocs/op | -| log15 | 14179 ns/op | 2642 B/op | 44 allocs/op | - -Log a static string, without any context or `printf`-style templating: - -| Library | Time | Bytes Allocated | Objects Allocated | -| :--- | :---: | :---: | :---: | -| zerolog | 50 ns/op | 0 B/op | 0 allocs/op | -| :zap: zap | 236 ns/op | 0 B/op | 0 allocs/op | -| standard library | 453 ns/op | 80 B/op | 2 allocs/op | -| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | -| go-kit | 508 ns/op | 656 B/op | 13 allocs/op | -| lion | 771 ns/op | 1224 B/op | 10 allocs/op | -| logrus | 1244 ns/op | 1505 B/op | 27 allocs/op | -| apex/log | 2751 ns/op | 584 B/op | 11 allocs/op | -| log15 | 5181 ns/op | 1592 B/op | 26 allocs/op | - -## Caveats - -### Field duplication - -Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON: - -```go -logger := zerolog.New(os.Stderr).With().Timestamp().Logger() -logger.Info(). - Timestamp(). - Msg("dup") -// Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} -``` - -In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt. - -### Concurrency safety - -Be careful when calling UpdateContext. It is not concurrency safe. Use the With method to create a child logger: - -```go -func handler(w http.ResponseWriter, r *http.Request) { - // Create a child logger for concurrency safety - logger := log.Logger.With().Logger() - - // Add context fields, for example User-Agent from HTTP headers - logger.UpdateContext(func(c zerolog.Context) zerolog.Context { - ... - }) -} -``` diff --git a/vendor/github.com/rs/zerolog/_config.yml b/vendor/github.com/rs/zerolog/_config.yml deleted file mode 100644 index a1e896d7..00000000 --- a/vendor/github.com/rs/zerolog/_config.yml +++ /dev/null @@ -1 +0,0 @@ -remote_theme: rs/gh-readme diff --git a/vendor/github.com/rs/zerolog/array.go b/vendor/github.com/rs/zerolog/array.go deleted file mode 100644 index 99612ee9..00000000 --- a/vendor/github.com/rs/zerolog/array.go +++ /dev/null @@ -1,240 +0,0 @@ -package zerolog - -import ( - "net" - "sync" - "time" -) - -var arrayPool = &sync.Pool{ - New: func() interface{} { - return &Array{ - buf: make([]byte, 0, 500), - } - }, -} - -// Array is used to prepopulate an array of items -// which can be re-used to add to log messages. -type Array struct { - buf []byte -} - -func putArray(a *Array) { - // Proper usage of a sync.Pool requires each entry to have approximately - // the same memory cost. To obtain this property when the stored type - // contains a variably-sized buffer, we add a hard limit on the maximum buffer - // to place back in the pool. - // - // See https://golang.org/issue/23199 - const maxSize = 1 << 16 // 64KiB - if cap(a.buf) > maxSize { - return - } - arrayPool.Put(a) -} - -// Arr creates an array to be added to an Event or Context. -func Arr() *Array { - a := arrayPool.Get().(*Array) - a.buf = a.buf[:0] - return a -} - -// MarshalZerologArray method here is no-op - since data is -// already in the needed format. -func (*Array) MarshalZerologArray(*Array) { -} - -func (a *Array) write(dst []byte) []byte { - dst = enc.AppendArrayStart(dst) - if len(a.buf) > 0 { - dst = append(dst, a.buf...) - } - dst = enc.AppendArrayEnd(dst) - putArray(a) - return dst -} - -// Object marshals an object that implement the LogObjectMarshaler -// interface and appends it to the array. -func (a *Array) Object(obj LogObjectMarshaler) *Array { - e := Dict() - obj.MarshalZerologObject(e) - e.buf = enc.AppendEndMarker(e.buf) - a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) - putEvent(e) - return a -} - -// Str appends the val as a string to the array. -func (a *Array) Str(val string) *Array { - a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val) - return a -} - -// Bytes appends the val as a string to the array. -func (a *Array) Bytes(val []byte) *Array { - a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val) - return a -} - -// Hex appends the val as a hex string to the array. -func (a *Array) Hex(val []byte) *Array { - a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val) - return a -} - -// RawJSON adds already encoded JSON to the array. -func (a *Array) RawJSON(val []byte) *Array { - a.buf = appendJSON(enc.AppendArrayDelim(a.buf), val) - return a -} - -// Err serializes and appends the err to the array. -func (a *Array) Err(err error) *Array { - switch m := ErrorMarshalFunc(err).(type) { - case LogObjectMarshaler: - e := newEvent(nil, 0) - e.buf = e.buf[:0] - e.appendObject(m) - a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) - putEvent(e) - case error: - if m == nil || isNilValue(m) { - a.buf = enc.AppendNil(enc.AppendArrayDelim(a.buf)) - } else { - a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error()) - } - case string: - a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m) - default: - a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), m) - } - - return a -} - -// Bool appends the val as a bool to the array. -func (a *Array) Bool(b bool) *Array { - a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b) - return a -} - -// Int appends i as a int to the array. -func (a *Array) Int(i int) *Array { - a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i) - return a -} - -// Int8 appends i as a int8 to the array. -func (a *Array) Int8(i int8) *Array { - a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i) - return a -} - -// Int16 appends i as a int16 to the array. -func (a *Array) Int16(i int16) *Array { - a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i) - return a -} - -// Int32 appends i as a int32 to the array. -func (a *Array) Int32(i int32) *Array { - a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i) - return a -} - -// Int64 appends i as a int64 to the array. -func (a *Array) Int64(i int64) *Array { - a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i) - return a -} - -// Uint appends i as a uint to the array. -func (a *Array) Uint(i uint) *Array { - a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i) - return a -} - -// Uint8 appends i as a uint8 to the array. -func (a *Array) Uint8(i uint8) *Array { - a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i) - return a -} - -// Uint16 appends i as a uint16 to the array. -func (a *Array) Uint16(i uint16) *Array { - a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i) - return a -} - -// Uint32 appends i as a uint32 to the array. -func (a *Array) Uint32(i uint32) *Array { - a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i) - return a -} - -// Uint64 appends i as a uint64 to the array. -func (a *Array) Uint64(i uint64) *Array { - a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i) - return a -} - -// Float32 appends f as a float32 to the array. -func (a *Array) Float32(f float32) *Array { - a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f) - return a -} - -// Float64 appends f as a float64 to the array. -func (a *Array) Float64(f float64) *Array { - a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f) - return a -} - -// Time appends t formatted as string using zerolog.TimeFieldFormat. -func (a *Array) Time(t time.Time) *Array { - a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat) - return a -} - -// Dur appends d to the array. -func (a *Array) Dur(d time.Duration) *Array { - a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger) - return a -} - -// Interface appends i marshaled using reflection. -func (a *Array) Interface(i interface{}) *Array { - if obj, ok := i.(LogObjectMarshaler); ok { - return a.Object(obj) - } - a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), i) - return a -} - -// IPAddr adds IPv4 or IPv6 address to the array -func (a *Array) IPAddr(ip net.IP) *Array { - a.buf = enc.AppendIPAddr(enc.AppendArrayDelim(a.buf), ip) - return a -} - -// IPPrefix adds IPv4 or IPv6 Prefix (IP + mask) to the array -func (a *Array) IPPrefix(pfx net.IPNet) *Array { - a.buf = enc.AppendIPPrefix(enc.AppendArrayDelim(a.buf), pfx) - return a -} - -// MACAddr adds a MAC (Ethernet) address to the array -func (a *Array) MACAddr(ha net.HardwareAddr) *Array { - a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha) - return a -} - -// Dict adds the dict Event to the array -func (a *Array) Dict(dict *Event) *Array { - dict.buf = enc.AppendEndMarker(dict.buf) - a.buf = append(enc.AppendArrayDelim(a.buf), dict.buf...) - return a -} diff --git a/vendor/github.com/rs/zerolog/console.go b/vendor/github.com/rs/zerolog/console.go deleted file mode 100644 index 8b0e0c61..00000000 --- a/vendor/github.com/rs/zerolog/console.go +++ /dev/null @@ -1,450 +0,0 @@ -package zerolog - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/mattn/go-colorable" -) - -const ( - colorBlack = iota + 30 - colorRed - colorGreen - colorYellow - colorBlue - colorMagenta - colorCyan - colorWhite - - colorBold = 1 - colorDarkGray = 90 -) - -var ( - consoleBufPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(make([]byte, 0, 100)) - }, - } -) - -const ( - consoleDefaultTimeFormat = time.Kitchen -) - -// Formatter transforms the input into a formatted string. -type Formatter func(interface{}) string - -// ConsoleWriter parses the JSON input and writes it in an -// (optionally) colorized, human-friendly format to Out. -type ConsoleWriter struct { - // Out is the output destination. - Out io.Writer - - // NoColor disables the colorized output. - NoColor bool - - // TimeFormat specifies the format for timestamp in output. - TimeFormat string - - // PartsOrder defines the order of parts in output. - PartsOrder []string - - // PartsExclude defines parts to not display in output. - PartsExclude []string - - // FieldsExclude defines contextual fields to not display in output. - FieldsExclude []string - - FormatTimestamp Formatter - FormatLevel Formatter - FormatCaller Formatter - FormatMessage Formatter - FormatFieldName Formatter - FormatFieldValue Formatter - FormatErrFieldName Formatter - FormatErrFieldValue Formatter - - FormatExtra func(map[string]interface{}, *bytes.Buffer) error -} - -// NewConsoleWriter creates and initializes a new ConsoleWriter. -func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter { - w := ConsoleWriter{ - Out: os.Stdout, - TimeFormat: consoleDefaultTimeFormat, - PartsOrder: consoleDefaultPartsOrder(), - } - - for _, opt := range options { - opt(&w) - } - - // Fix color on Windows - if w.Out == os.Stdout || w.Out == os.Stderr { - w.Out = colorable.NewColorable(w.Out.(*os.File)) - } - - return w -} - -// Write transforms the JSON input with formatters and appends to w.Out. -func (w ConsoleWriter) Write(p []byte) (n int, err error) { - // Fix color on Windows - if w.Out == os.Stdout || w.Out == os.Stderr { - w.Out = colorable.NewColorable(w.Out.(*os.File)) - } - - if w.PartsOrder == nil { - w.PartsOrder = consoleDefaultPartsOrder() - } - - var buf = consoleBufPool.Get().(*bytes.Buffer) - defer func() { - buf.Reset() - consoleBufPool.Put(buf) - }() - - var evt map[string]interface{} - p = decodeIfBinaryToBytes(p) - d := json.NewDecoder(bytes.NewReader(p)) - d.UseNumber() - err = d.Decode(&evt) - if err != nil { - return n, fmt.Errorf("cannot decode event: %s", err) - } - - for _, p := range w.PartsOrder { - w.writePart(buf, evt, p) - } - - w.writeFields(evt, buf) - - if w.FormatExtra != nil { - err = w.FormatExtra(evt, buf) - if err != nil { - return n, err - } - } - - err = buf.WriteByte('\n') - if err != nil { - return n, err - } - - _, err = buf.WriteTo(w.Out) - return len(p), err -} - -// writeFields appends formatted key-value pairs to buf. -func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) { - var fields = make([]string, 0, len(evt)) - for field := range evt { - var isExcluded bool - for _, excluded := range w.FieldsExclude { - if field == excluded { - isExcluded = true - break - } - } - if isExcluded { - continue - } - - switch field { - case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName: - continue - } - fields = append(fields, field) - } - sort.Strings(fields) - - // Write space only if something has already been written to the buffer, and if there are fields. - if buf.Len() > 0 && len(fields) > 0 { - buf.WriteByte(' ') - } - - // Move the "error" field to the front - ei := sort.Search(len(fields), func(i int) bool { return fields[i] >= ErrorFieldName }) - if ei < len(fields) && fields[ei] == ErrorFieldName { - fields[ei] = "" - fields = append([]string{ErrorFieldName}, fields...) - var xfields = make([]string, 0, len(fields)) - for _, field := range fields { - if field == "" { // Skip empty fields - continue - } - xfields = append(xfields, field) - } - fields = xfields - } - - for i, field := range fields { - var fn Formatter - var fv Formatter - - if field == ErrorFieldName { - if w.FormatErrFieldName == nil { - fn = consoleDefaultFormatErrFieldName(w.NoColor) - } else { - fn = w.FormatErrFieldName - } - - if w.FormatErrFieldValue == nil { - fv = consoleDefaultFormatErrFieldValue(w.NoColor) - } else { - fv = w.FormatErrFieldValue - } - } else { - if w.FormatFieldName == nil { - fn = consoleDefaultFormatFieldName(w.NoColor) - } else { - fn = w.FormatFieldName - } - - if w.FormatFieldValue == nil { - fv = consoleDefaultFormatFieldValue - } else { - fv = w.FormatFieldValue - } - } - - buf.WriteString(fn(field)) - - switch fValue := evt[field].(type) { - case string: - if needsQuote(fValue) { - buf.WriteString(fv(strconv.Quote(fValue))) - } else { - buf.WriteString(fv(fValue)) - } - case json.Number: - buf.WriteString(fv(fValue)) - default: - b, err := InterfaceMarshalFunc(fValue) - if err != nil { - fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err) - } else { - fmt.Fprint(buf, fv(b)) - } - } - - if i < len(fields)-1 { // Skip space for last field - buf.WriteByte(' ') - } - } -} - -// writePart appends a formatted part to buf. -func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, p string) { - var f Formatter - - if w.PartsExclude != nil && len(w.PartsExclude) > 0 { - for _, exclude := range w.PartsExclude { - if exclude == p { - return - } - } - } - - switch p { - case LevelFieldName: - if w.FormatLevel == nil { - f = consoleDefaultFormatLevel(w.NoColor) - } else { - f = w.FormatLevel - } - case TimestampFieldName: - if w.FormatTimestamp == nil { - f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor) - } else { - f = w.FormatTimestamp - } - case MessageFieldName: - if w.FormatMessage == nil { - f = consoleDefaultFormatMessage - } else { - f = w.FormatMessage - } - case CallerFieldName: - if w.FormatCaller == nil { - f = consoleDefaultFormatCaller(w.NoColor) - } else { - f = w.FormatCaller - } - default: - if w.FormatFieldValue == nil { - f = consoleDefaultFormatFieldValue - } else { - f = w.FormatFieldValue - } - } - - var s = f(evt[p]) - - if len(s) > 0 { - if buf.Len() > 0 { - buf.WriteByte(' ') // Write space only if not the first part - } - buf.WriteString(s) - } -} - -// needsQuote returns true when the string s should be quoted in output. -func needsQuote(s string) bool { - for i := range s { - if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' { - return true - } - } - return false -} - -// colorize returns the string s wrapped in ANSI code c, unless disabled is true. -func colorize(s interface{}, c int, disabled bool) string { - if disabled { - return fmt.Sprintf("%s", s) - } - return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s) -} - -// ----- DEFAULT FORMATTERS --------------------------------------------------- - -func consoleDefaultPartsOrder() []string { - return []string{ - TimestampFieldName, - LevelFieldName, - CallerFieldName, - MessageFieldName, - } -} - -func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter { - if timeFormat == "" { - timeFormat = consoleDefaultTimeFormat - } - return func(i interface{}) string { - t := "" - switch tt := i.(type) { - case string: - ts, err := time.ParseInLocation(TimeFieldFormat, tt, time.Local) - if err != nil { - t = tt - } else { - t = ts.Local().Format(timeFormat) - } - case json.Number: - i, err := tt.Int64() - if err != nil { - t = tt.String() - } else { - var sec, nsec int64 - - switch TimeFieldFormat { - case TimeFormatUnixNano: - sec, nsec = 0, i - case TimeFormatUnixMicro: - sec, nsec = 0, int64(time.Duration(i)*time.Microsecond) - case TimeFormatUnixMs: - sec, nsec = 0, int64(time.Duration(i)*time.Millisecond) - default: - sec, nsec = i, 0 - } - - ts := time.Unix(sec, nsec) - t = ts.Format(timeFormat) - } - } - return colorize(t, colorDarkGray, noColor) - } -} - -func consoleDefaultFormatLevel(noColor bool) Formatter { - return func(i interface{}) string { - var l string - if ll, ok := i.(string); ok { - switch ll { - case LevelTraceValue: - l = colorize("TRC", colorMagenta, noColor) - case LevelDebugValue: - l = colorize("DBG", colorYellow, noColor) - case LevelInfoValue: - l = colorize("INF", colorGreen, noColor) - case LevelWarnValue: - l = colorize("WRN", colorRed, noColor) - case LevelErrorValue: - l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor) - case LevelFatalValue: - l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor) - case LevelPanicValue: - l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor) - default: - l = colorize(ll, colorBold, noColor) - } - } else { - if i == nil { - l = colorize("???", colorBold, noColor) - } else { - l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3] - } - } - return l - } -} - -func consoleDefaultFormatCaller(noColor bool) Formatter { - return func(i interface{}) string { - var c string - if cc, ok := i.(string); ok { - c = cc - } - if len(c) > 0 { - if cwd, err := os.Getwd(); err == nil { - if rel, err := filepath.Rel(cwd, c); err == nil { - c = rel - } - } - c = colorize(c, colorBold, noColor) + colorize(" >", colorCyan, noColor) - } - return c - } -} - -func consoleDefaultFormatMessage(i interface{}) string { - if i == nil { - return "" - } - return fmt.Sprintf("%s", i) -} - -func consoleDefaultFormatFieldName(noColor bool) Formatter { - return func(i interface{}) string { - return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) - } -} - -func consoleDefaultFormatFieldValue(i interface{}) string { - return fmt.Sprintf("%s", i) -} - -func consoleDefaultFormatErrFieldName(noColor bool) Formatter { - return func(i interface{}) string { - return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) - } -} - -func consoleDefaultFormatErrFieldValue(noColor bool) Formatter { - return func(i interface{}) string { - return colorize(fmt.Sprintf("%s", i), colorRed, noColor) - } -} diff --git a/vendor/github.com/rs/zerolog/context.go b/vendor/github.com/rs/zerolog/context.go deleted file mode 100644 index 9d860e50..00000000 --- a/vendor/github.com/rs/zerolog/context.go +++ /dev/null @@ -1,444 +0,0 @@ -package zerolog - -import ( - "context" - "fmt" - "io/ioutil" - "math" - "net" - "time" -) - -// Context configures a new sub-logger with contextual fields. -type Context struct { - l Logger -} - -// Logger returns the logger with the context previously set. -func (c Context) Logger() Logger { - return c.l -} - -// Fields is a helper function to use a map or slice to set fields using type assertion. -// Only map[string]interface{} and []interface{} are accepted. []interface{} must -// alternate string keys and arbitrary values, and extraneous ones are ignored. -func (c Context) Fields(fields interface{}) Context { - c.l.context = appendFields(c.l.context, fields) - return c -} - -// Dict adds the field key with the dict to the logger context. -func (c Context) Dict(key string, dict *Event) Context { - dict.buf = enc.AppendEndMarker(dict.buf) - c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...) - putEvent(dict) - return c -} - -// Array adds the field key with an array to the event context. -// Use zerolog.Arr() to create the array or pass a type that -// implement the LogArrayMarshaler interface. -func (c Context) Array(key string, arr LogArrayMarshaler) Context { - c.l.context = enc.AppendKey(c.l.context, key) - if arr, ok := arr.(*Array); ok { - c.l.context = arr.write(c.l.context) - return c - } - var a *Array - if aa, ok := arr.(*Array); ok { - a = aa - } else { - a = Arr() - arr.MarshalZerologArray(a) - } - c.l.context = a.write(c.l.context) - return c -} - -// Object marshals an object that implement the LogObjectMarshaler interface. -func (c Context) Object(key string, obj LogObjectMarshaler) Context { - e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) - e.Object(key, obj) - c.l.context = enc.AppendObjectData(c.l.context, e.buf) - putEvent(e) - return c -} - -// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface. -func (c Context) EmbedObject(obj LogObjectMarshaler) Context { - e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) - e.EmbedObject(obj) - c.l.context = enc.AppendObjectData(c.l.context, e.buf) - putEvent(e) - return c -} - -// Str adds the field key with val as a string to the logger context. -func (c Context) Str(key, val string) Context { - c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val) - return c -} - -// Strs adds the field key with val as a string to the logger context. -func (c Context) Strs(key string, vals []string) Context { - c.l.context = enc.AppendStrings(enc.AppendKey(c.l.context, key), vals) - return c -} - -// Stringer adds the field key with val.String() (or null if val is nil) to the logger context. -func (c Context) Stringer(key string, val fmt.Stringer) Context { - if val != nil { - c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val.String()) - return c - } - - c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), nil) - return c -} - -// Bytes adds the field key with val as a []byte to the logger context. -func (c Context) Bytes(key string, val []byte) Context { - c.l.context = enc.AppendBytes(enc.AppendKey(c.l.context, key), val) - return c -} - -// Hex adds the field key with val as a hex string to the logger context. -func (c Context) Hex(key string, val []byte) Context { - c.l.context = enc.AppendHex(enc.AppendKey(c.l.context, key), val) - return c -} - -// RawJSON adds already encoded JSON to context. -// -// No sanity check is performed on b; it must not contain carriage returns and -// be valid JSON. -func (c Context) RawJSON(key string, b []byte) Context { - c.l.context = appendJSON(enc.AppendKey(c.l.context, key), b) - return c -} - -// AnErr adds the field key with serialized err to the logger context. -func (c Context) AnErr(key string, err error) Context { - switch m := ErrorMarshalFunc(err).(type) { - case nil: - return c - case LogObjectMarshaler: - return c.Object(key, m) - case error: - if m == nil || isNilValue(m) { - return c - } else { - return c.Str(key, m.Error()) - } - case string: - return c.Str(key, m) - default: - return c.Interface(key, m) - } -} - -// Errs adds the field key with errs as an array of serialized errors to the -// logger context. -func (c Context) Errs(key string, errs []error) Context { - arr := Arr() - for _, err := range errs { - switch m := ErrorMarshalFunc(err).(type) { - case LogObjectMarshaler: - arr = arr.Object(m) - case error: - if m == nil || isNilValue(m) { - arr = arr.Interface(nil) - } else { - arr = arr.Str(m.Error()) - } - case string: - arr = arr.Str(m) - default: - arr = arr.Interface(m) - } - } - - return c.Array(key, arr) -} - -// Err adds the field "error" with serialized err to the logger context. -func (c Context) Err(err error) Context { - return c.AnErr(ErrorFieldName, err) -} - -// Ctx adds the context.Context to the logger context. The context.Context is -// not rendered in the error message, but is made available for hooks to use. -// A typical use case is to extract tracing information from the -// context.Context. -func (c Context) Ctx(ctx context.Context) Context { - c.l.ctx = ctx - return c -} - -// Bool adds the field key with val as a bool to the logger context. -func (c Context) Bool(key string, b bool) Context { - c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b) - return c -} - -// Bools adds the field key with val as a []bool to the logger context. -func (c Context) Bools(key string, b []bool) Context { - c.l.context = enc.AppendBools(enc.AppendKey(c.l.context, key), b) - return c -} - -// Int adds the field key with i as a int to the logger context. -func (c Context) Int(key string, i int) Context { - c.l.context = enc.AppendInt(enc.AppendKey(c.l.context, key), i) - return c -} - -// Ints adds the field key with i as a []int to the logger context. -func (c Context) Ints(key string, i []int) Context { - c.l.context = enc.AppendInts(enc.AppendKey(c.l.context, key), i) - return c -} - -// Int8 adds the field key with i as a int8 to the logger context. -func (c Context) Int8(key string, i int8) Context { - c.l.context = enc.AppendInt8(enc.AppendKey(c.l.context, key), i) - return c -} - -// Ints8 adds the field key with i as a []int8 to the logger context. -func (c Context) Ints8(key string, i []int8) Context { - c.l.context = enc.AppendInts8(enc.AppendKey(c.l.context, key), i) - return c -} - -// Int16 adds the field key with i as a int16 to the logger context. -func (c Context) Int16(key string, i int16) Context { - c.l.context = enc.AppendInt16(enc.AppendKey(c.l.context, key), i) - return c -} - -// Ints16 adds the field key with i as a []int16 to the logger context. -func (c Context) Ints16(key string, i []int16) Context { - c.l.context = enc.AppendInts16(enc.AppendKey(c.l.context, key), i) - return c -} - -// Int32 adds the field key with i as a int32 to the logger context. -func (c Context) Int32(key string, i int32) Context { - c.l.context = enc.AppendInt32(enc.AppendKey(c.l.context, key), i) - return c -} - -// Ints32 adds the field key with i as a []int32 to the logger context. -func (c Context) Ints32(key string, i []int32) Context { - c.l.context = enc.AppendInts32(enc.AppendKey(c.l.context, key), i) - return c -} - -// Int64 adds the field key with i as a int64 to the logger context. -func (c Context) Int64(key string, i int64) Context { - c.l.context = enc.AppendInt64(enc.AppendKey(c.l.context, key), i) - return c -} - -// Ints64 adds the field key with i as a []int64 to the logger context. -func (c Context) Ints64(key string, i []int64) Context { - c.l.context = enc.AppendInts64(enc.AppendKey(c.l.context, key), i) - return c -} - -// Uint adds the field key with i as a uint to the logger context. -func (c Context) Uint(key string, i uint) Context { - c.l.context = enc.AppendUint(enc.AppendKey(c.l.context, key), i) - return c -} - -// Uints adds the field key with i as a []uint to the logger context. -func (c Context) Uints(key string, i []uint) Context { - c.l.context = enc.AppendUints(enc.AppendKey(c.l.context, key), i) - return c -} - -// Uint8 adds the field key with i as a uint8 to the logger context. -func (c Context) Uint8(key string, i uint8) Context { - c.l.context = enc.AppendUint8(enc.AppendKey(c.l.context, key), i) - return c -} - -// Uints8 adds the field key with i as a []uint8 to the logger context. -func (c Context) Uints8(key string, i []uint8) Context { - c.l.context = enc.AppendUints8(enc.AppendKey(c.l.context, key), i) - return c -} - -// Uint16 adds the field key with i as a uint16 to the logger context. -func (c Context) Uint16(key string, i uint16) Context { - c.l.context = enc.AppendUint16(enc.AppendKey(c.l.context, key), i) - return c -} - -// Uints16 adds the field key with i as a []uint16 to the logger context. -func (c Context) Uints16(key string, i []uint16) Context { - c.l.context = enc.AppendUints16(enc.AppendKey(c.l.context, key), i) - return c -} - -// Uint32 adds the field key with i as a uint32 to the logger context. -func (c Context) Uint32(key string, i uint32) Context { - c.l.context = enc.AppendUint32(enc.AppendKey(c.l.context, key), i) - return c -} - -// Uints32 adds the field key with i as a []uint32 to the logger context. -func (c Context) Uints32(key string, i []uint32) Context { - c.l.context = enc.AppendUints32(enc.AppendKey(c.l.context, key), i) - return c -} - -// Uint64 adds the field key with i as a uint64 to the logger context. -func (c Context) Uint64(key string, i uint64) Context { - c.l.context = enc.AppendUint64(enc.AppendKey(c.l.context, key), i) - return c -} - -// Uints64 adds the field key with i as a []uint64 to the logger context. -func (c Context) Uints64(key string, i []uint64) Context { - c.l.context = enc.AppendUints64(enc.AppendKey(c.l.context, key), i) - return c -} - -// Float32 adds the field key with f as a float32 to the logger context. -func (c Context) Float32(key string, f float32) Context { - c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f) - return c -} - -// Floats32 adds the field key with f as a []float32 to the logger context. -func (c Context) Floats32(key string, f []float32) Context { - c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f) - return c -} - -// Float64 adds the field key with f as a float64 to the logger context. -func (c Context) Float64(key string, f float64) Context { - c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f) - return c -} - -// Floats64 adds the field key with f as a []float64 to the logger context. -func (c Context) Floats64(key string, f []float64) Context { - c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f) - return c -} - -type timestampHook struct{} - -func (ts timestampHook) Run(e *Event, level Level, msg string) { - e.Timestamp() -} - -var th = timestampHook{} - -// Timestamp adds the current local time to the logger context with the "time" key, formatted using zerolog.TimeFieldFormat. -// To customize the key name, change zerolog.TimestampFieldName. -// To customize the time format, change zerolog.TimeFieldFormat. -// -// NOTE: It won't dedupe the "time" key if the *Context has one already. -func (c Context) Timestamp() Context { - c.l = c.l.Hook(th) - return c -} - -// Time adds the field key with t formated as string using zerolog.TimeFieldFormat. -func (c Context) Time(key string, t time.Time) Context { - c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) - return c -} - -// Times adds the field key with t formated as string using zerolog.TimeFieldFormat. -func (c Context) Times(key string, t []time.Time) Context { - c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) - return c -} - -// Dur adds the fields key with d divided by unit and stored as a float. -func (c Context) Dur(key string, d time.Duration) Context { - c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) - return c -} - -// Durs adds the fields key with d divided by unit and stored as a float. -func (c Context) Durs(key string, d []time.Duration) Context { - c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) - return c -} - -// Interface adds the field key with obj marshaled using reflection. -func (c Context) Interface(key string, i interface{}) Context { - c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i) - return c -} - -type callerHook struct { - callerSkipFrameCount int -} - -func newCallerHook(skipFrameCount int) callerHook { - return callerHook{callerSkipFrameCount: skipFrameCount} -} - -func (ch callerHook) Run(e *Event, level Level, msg string) { - switch ch.callerSkipFrameCount { - case useGlobalSkipFrameCount: - // Extra frames to skip (added by hook infra). - e.caller(CallerSkipFrameCount + contextCallerSkipFrameCount) - default: - // Extra frames to skip (added by hook infra). - e.caller(ch.callerSkipFrameCount + contextCallerSkipFrameCount) - } -} - -// useGlobalSkipFrameCount acts as a flag to informat callerHook.Run -// to use the global CallerSkipFrameCount. -const useGlobalSkipFrameCount = math.MinInt32 - -// ch is the default caller hook using the global CallerSkipFrameCount. -var ch = newCallerHook(useGlobalSkipFrameCount) - -// Caller adds the file:line of the caller with the zerolog.CallerFieldName key. -func (c Context) Caller() Context { - c.l = c.l.Hook(ch) - return c -} - -// CallerWithSkipFrameCount adds the file:line of the caller with the zerolog.CallerFieldName key. -// The specified skipFrameCount int will override the global CallerSkipFrameCount for this context's respective logger. -// If set to -1 the global CallerSkipFrameCount will be used. -func (c Context) CallerWithSkipFrameCount(skipFrameCount int) Context { - c.l = c.l.Hook(newCallerHook(skipFrameCount)) - return c -} - -// Stack enables stack trace printing for the error passed to Err(). -func (c Context) Stack() Context { - c.l.stack = true - return c -} - -// IPAddr adds IPv4 or IPv6 Address to the context -func (c Context) IPAddr(key string, ip net.IP) Context { - c.l.context = enc.AppendIPAddr(enc.AppendKey(c.l.context, key), ip) - return c -} - -// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the context -func (c Context) IPPrefix(key string, pfx net.IPNet) Context { - c.l.context = enc.AppendIPPrefix(enc.AppendKey(c.l.context, key), pfx) - return c -} - -// MACAddr adds MAC address to the context -func (c Context) MACAddr(key string, ha net.HardwareAddr) Context { - c.l.context = enc.AppendMACAddr(enc.AppendKey(c.l.context, key), ha) - return c -} diff --git a/vendor/github.com/rs/zerolog/ctx.go b/vendor/github.com/rs/zerolog/ctx.go deleted file mode 100644 index 60432d15..00000000 --- a/vendor/github.com/rs/zerolog/ctx.go +++ /dev/null @@ -1,52 +0,0 @@ -package zerolog - -import ( - "context" -) - -var disabledLogger *Logger - -func init() { - SetGlobalLevel(TraceLevel) - l := Nop() - disabledLogger = &l -} - -type ctxKey struct{} - -// WithContext returns a copy of ctx with the receiver attached. The Logger -// attached to the provided Context (if any) will not be effected. If the -// receiver's log level is Disabled it will only be attached to the returned -// Context if the provided Context has a previously attached Logger. If the -// provided Context has no attached Logger, a Disabled Logger will not be -// attached. -// -// Note: to modify the existing Logger attached to a Context (instead of -// replacing it in a new Context), use UpdateContext with the following -// notation: -// -// ctx := r.Context() -// l := zerolog.Ctx(ctx) -// l.UpdateContext(func(c Context) Context { -// return c.Str("bar", "baz") -// }) -// -func (l Logger) WithContext(ctx context.Context) context.Context { - if _, ok := ctx.Value(ctxKey{}).(*Logger); !ok && l.level == Disabled { - // Do not store disabled logger. - return ctx - } - return context.WithValue(ctx, ctxKey{}, &l) -} - -// Ctx returns the Logger associated with the ctx. If no logger -// is associated, DefaultContextLogger is returned, unless DefaultContextLogger -// is nil, in which case a disabled logger is returned. -func Ctx(ctx context.Context) *Logger { - if l, ok := ctx.Value(ctxKey{}).(*Logger); ok { - return l - } else if l = DefaultContextLogger; l != nil { - return l - } - return disabledLogger -} diff --git a/vendor/github.com/rs/zerolog/encoder.go b/vendor/github.com/rs/zerolog/encoder.go deleted file mode 100644 index 09b24e80..00000000 --- a/vendor/github.com/rs/zerolog/encoder.go +++ /dev/null @@ -1,56 +0,0 @@ -package zerolog - -import ( - "net" - "time" -) - -type encoder interface { - AppendArrayDelim(dst []byte) []byte - AppendArrayEnd(dst []byte) []byte - AppendArrayStart(dst []byte) []byte - AppendBeginMarker(dst []byte) []byte - AppendBool(dst []byte, val bool) []byte - AppendBools(dst []byte, vals []bool) []byte - AppendBytes(dst, s []byte) []byte - AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte - AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte - AppendEndMarker(dst []byte) []byte - AppendFloat32(dst []byte, val float32) []byte - AppendFloat64(dst []byte, val float64) []byte - AppendFloats32(dst []byte, vals []float32) []byte - AppendFloats64(dst []byte, vals []float64) []byte - AppendHex(dst, s []byte) []byte - AppendIPAddr(dst []byte, ip net.IP) []byte - AppendIPPrefix(dst []byte, pfx net.IPNet) []byte - AppendInt(dst []byte, val int) []byte - AppendInt16(dst []byte, val int16) []byte - AppendInt32(dst []byte, val int32) []byte - AppendInt64(dst []byte, val int64) []byte - AppendInt8(dst []byte, val int8) []byte - AppendInterface(dst []byte, i interface{}) []byte - AppendInts(dst []byte, vals []int) []byte - AppendInts16(dst []byte, vals []int16) []byte - AppendInts32(dst []byte, vals []int32) []byte - AppendInts64(dst []byte, vals []int64) []byte - AppendInts8(dst []byte, vals []int8) []byte - AppendKey(dst []byte, key string) []byte - AppendLineBreak(dst []byte) []byte - AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte - AppendNil(dst []byte) []byte - AppendObjectData(dst []byte, o []byte) []byte - AppendString(dst []byte, s string) []byte - AppendStrings(dst []byte, vals []string) []byte - AppendTime(dst []byte, t time.Time, format string) []byte - AppendTimes(dst []byte, vals []time.Time, format string) []byte - AppendUint(dst []byte, val uint) []byte - AppendUint16(dst []byte, val uint16) []byte - AppendUint32(dst []byte, val uint32) []byte - AppendUint64(dst []byte, val uint64) []byte - AppendUint8(dst []byte, val uint8) []byte - AppendUints(dst []byte, vals []uint) []byte - AppendUints16(dst []byte, vals []uint16) []byte - AppendUints32(dst []byte, vals []uint32) []byte - AppendUints64(dst []byte, vals []uint64) []byte - AppendUints8(dst []byte, vals []uint8) []byte -} diff --git a/vendor/github.com/rs/zerolog/encoder_cbor.go b/vendor/github.com/rs/zerolog/encoder_cbor.go deleted file mode 100644 index 36cb994b..00000000 --- a/vendor/github.com/rs/zerolog/encoder_cbor.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build binary_log - -package zerolog - -// This file contains bindings to do binary encoding. - -import ( - "github.com/rs/zerolog/internal/cbor" -) - -var ( - _ encoder = (*cbor.Encoder)(nil) - - enc = cbor.Encoder{} -) - -func init() { - // using closure to reflect the changes at runtime. - cbor.JSONMarshalFunc = func(v interface{}) ([]byte, error) { - return InterfaceMarshalFunc(v) - } -} - -func appendJSON(dst []byte, j []byte) []byte { - return cbor.AppendEmbeddedJSON(dst, j) -} -func appendCBOR(dst []byte, c []byte) []byte { - return cbor.AppendEmbeddedCBOR(dst, c) -} - -// decodeIfBinaryToString - converts a binary formatted log msg to a -// JSON formatted String Log message. -func decodeIfBinaryToString(in []byte) string { - return cbor.DecodeIfBinaryToString(in) -} - -func decodeObjectToStr(in []byte) string { - return cbor.DecodeObjectToStr(in) -} - -// decodeIfBinaryToBytes - converts a binary formatted log msg to a -// JSON formatted Bytes Log message. -func decodeIfBinaryToBytes(in []byte) []byte { - return cbor.DecodeIfBinaryToBytes(in) -} diff --git a/vendor/github.com/rs/zerolog/encoder_json.go b/vendor/github.com/rs/zerolog/encoder_json.go deleted file mode 100644 index 6f96c68a..00000000 --- a/vendor/github.com/rs/zerolog/encoder_json.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build !binary_log - -package zerolog - -// encoder_json.go file contains bindings to generate -// JSON encoded byte stream. - -import ( - "encoding/base64" - "github.com/rs/zerolog/internal/json" -) - -var ( - _ encoder = (*json.Encoder)(nil) - - enc = json.Encoder{} -) - -func init() { - // using closure to reflect the changes at runtime. - json.JSONMarshalFunc = func(v interface{}) ([]byte, error) { - return InterfaceMarshalFunc(v) - } -} - -func appendJSON(dst []byte, j []byte) []byte { - return append(dst, j...) -} -func appendCBOR(dst []byte, cbor []byte) []byte { - dst = append(dst, []byte("\"data:application/cbor;base64,")...) - l := len(dst) - enc := base64.StdEncoding - n := enc.EncodedLen(len(cbor)) - for i := 0; i < n; i++ { - dst = append(dst, '.') - } - enc.Encode(dst[l:], cbor) - return append(dst, '"') -} - -func decodeIfBinaryToString(in []byte) string { - return string(in) -} - -func decodeObjectToStr(in []byte) string { - return string(in) -} - -func decodeIfBinaryToBytes(in []byte) []byte { - return in -} diff --git a/vendor/github.com/rs/zerolog/event.go b/vendor/github.com/rs/zerolog/event.go deleted file mode 100644 index 2a5d3b08..00000000 --- a/vendor/github.com/rs/zerolog/event.go +++ /dev/null @@ -1,830 +0,0 @@ -package zerolog - -import ( - "context" - "fmt" - "net" - "os" - "runtime" - "sync" - "time" -) - -var eventPool = &sync.Pool{ - New: func() interface{} { - return &Event{ - buf: make([]byte, 0, 500), - } - }, -} - -// Event represents a log event. It is instanced by one of the level method of -// Logger and finalized by the Msg or Msgf method. -type Event struct { - buf []byte - w LevelWriter - level Level - done func(msg string) - stack bool // enable error stack trace - ch []Hook // hooks from context - skipFrame int // The number of additional frames to skip when printing the caller. - ctx context.Context // Optional Go context for event -} - -func putEvent(e *Event) { - // Proper usage of a sync.Pool requires each entry to have approximately - // the same memory cost. To obtain this property when the stored type - // contains a variably-sized buffer, we add a hard limit on the maximum buffer - // to place back in the pool. - // - // See https://golang.org/issue/23199 - const maxSize = 1 << 16 // 64KiB - if cap(e.buf) > maxSize { - return - } - eventPool.Put(e) -} - -// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface -// to be implemented by types used with Event/Context's Object methods. -type LogObjectMarshaler interface { - MarshalZerologObject(e *Event) -} - -// LogArrayMarshaler provides a strongly-typed and encoding-agnostic interface -// to be implemented by types used with Event/Context's Array methods. -type LogArrayMarshaler interface { - MarshalZerologArray(a *Array) -} - -func newEvent(w LevelWriter, level Level) *Event { - e := eventPool.Get().(*Event) - e.buf = e.buf[:0] - e.ch = nil - e.buf = enc.AppendBeginMarker(e.buf) - e.w = w - e.level = level - e.stack = false - e.skipFrame = 0 - return e -} - -func (e *Event) write() (err error) { - if e == nil { - return nil - } - if e.level != Disabled { - e.buf = enc.AppendEndMarker(e.buf) - e.buf = enc.AppendLineBreak(e.buf) - if e.w != nil { - _, err = e.w.WriteLevel(e.level, e.buf) - } - } - putEvent(e) - return -} - -// Enabled return false if the *Event is going to be filtered out by -// log level or sampling. -func (e *Event) Enabled() bool { - return e != nil && e.level != Disabled -} - -// Discard disables the event so Msg(f) won't print it. -func (e *Event) Discard() *Event { - if e == nil { - return e - } - e.level = Disabled - return nil -} - -// Msg sends the *Event with msg added as the message field if not empty. -// -// NOTICE: once this method is called, the *Event should be disposed. -// Calling Msg twice can have unexpected result. -func (e *Event) Msg(msg string) { - if e == nil { - return - } - e.msg(msg) -} - -// Send is equivalent to calling Msg(""). -// -// NOTICE: once this method is called, the *Event should be disposed. -func (e *Event) Send() { - if e == nil { - return - } - e.msg("") -} - -// Msgf sends the event with formatted msg added as the message field if not empty. -// -// NOTICE: once this method is called, the *Event should be disposed. -// Calling Msgf twice can have unexpected result. -func (e *Event) Msgf(format string, v ...interface{}) { - if e == nil { - return - } - e.msg(fmt.Sprintf(format, v...)) -} - -func (e *Event) MsgFunc(createMsg func() string) { - if e == nil { - return - } - e.msg(createMsg()) -} - -func (e *Event) msg(msg string) { - for _, hook := range e.ch { - hook.Run(e, e.level, msg) - } - if msg != "" { - e.buf = enc.AppendString(enc.AppendKey(e.buf, MessageFieldName), msg) - } - if e.done != nil { - defer e.done(msg) - } - if err := e.write(); err != nil { - if ErrorHandler != nil { - ErrorHandler(err) - } else { - fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v\n", err) - } - } -} - -// Fields is a helper function to use a map or slice to set fields using type assertion. -// Only map[string]interface{} and []interface{} are accepted. []interface{} must -// alternate string keys and arbitrary values, and extraneous ones are ignored. -func (e *Event) Fields(fields interface{}) *Event { - if e == nil { - return e - } - e.buf = appendFields(e.buf, fields) - return e -} - -// Dict adds the field key with a dict to the event context. -// Use zerolog.Dict() to create the dictionary. -func (e *Event) Dict(key string, dict *Event) *Event { - if e == nil { - return e - } - dict.buf = enc.AppendEndMarker(dict.buf) - e.buf = append(enc.AppendKey(e.buf, key), dict.buf...) - putEvent(dict) - return e -} - -// Dict creates an Event to be used with the *Event.Dict method. -// Call usual field methods like Str, Int etc to add fields to this -// event and give it as argument the *Event.Dict method. -func Dict() *Event { - return newEvent(nil, 0) -} - -// Array adds the field key with an array to the event context. -// Use zerolog.Arr() to create the array or pass a type that -// implement the LogArrayMarshaler interface. -func (e *Event) Array(key string, arr LogArrayMarshaler) *Event { - if e == nil { - return e - } - e.buf = enc.AppendKey(e.buf, key) - var a *Array - if aa, ok := arr.(*Array); ok { - a = aa - } else { - a = Arr() - arr.MarshalZerologArray(a) - } - e.buf = a.write(e.buf) - return e -} - -func (e *Event) appendObject(obj LogObjectMarshaler) { - e.buf = enc.AppendBeginMarker(e.buf) - obj.MarshalZerologObject(e) - e.buf = enc.AppendEndMarker(e.buf) -} - -// Object marshals an object that implement the LogObjectMarshaler interface. -func (e *Event) Object(key string, obj LogObjectMarshaler) *Event { - if e == nil { - return e - } - e.buf = enc.AppendKey(e.buf, key) - if obj == nil { - e.buf = enc.AppendNil(e.buf) - - return e - } - - e.appendObject(obj) - return e -} - -// Func allows an anonymous func to run only if the event is enabled. -func (e *Event) Func(f func(e *Event)) *Event { - if e != nil && e.Enabled() { - f(e) - } - return e -} - -// EmbedObject marshals an object that implement the LogObjectMarshaler interface. -func (e *Event) EmbedObject(obj LogObjectMarshaler) *Event { - if e == nil { - return e - } - if obj == nil { - return e - } - obj.MarshalZerologObject(e) - return e -} - -// Str adds the field key with val as a string to the *Event context. -func (e *Event) Str(key, val string) *Event { - if e == nil { - return e - } - e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val) - return e -} - -// Strs adds the field key with vals as a []string to the *Event context. -func (e *Event) Strs(key string, vals []string) *Event { - if e == nil { - return e - } - e.buf = enc.AppendStrings(enc.AppendKey(e.buf, key), vals) - return e -} - -// Stringer adds the field key with val.String() (or null if val is nil) -// to the *Event context. -func (e *Event) Stringer(key string, val fmt.Stringer) *Event { - if e == nil { - return e - } - e.buf = enc.AppendStringer(enc.AppendKey(e.buf, key), val) - return e -} - -// Stringers adds the field key with vals where each individual val -// is used as val.String() (or null if val is empty) to the *Event -// context. -func (e *Event) Stringers(key string, vals []fmt.Stringer) *Event { - if e == nil { - return e - } - e.buf = enc.AppendStringers(enc.AppendKey(e.buf, key), vals) - return e -} - -// Bytes adds the field key with val as a string to the *Event context. -// -// Runes outside of normal ASCII ranges will be hex-encoded in the resulting -// JSON. -func (e *Event) Bytes(key string, val []byte) *Event { - if e == nil { - return e - } - e.buf = enc.AppendBytes(enc.AppendKey(e.buf, key), val) - return e -} - -// Hex adds the field key with val as a hex string to the *Event context. -func (e *Event) Hex(key string, val []byte) *Event { - if e == nil { - return e - } - e.buf = enc.AppendHex(enc.AppendKey(e.buf, key), val) - return e -} - -// RawJSON adds already encoded JSON to the log line under key. -// -// No sanity check is performed on b; it must not contain carriage returns and -// be valid JSON. -func (e *Event) RawJSON(key string, b []byte) *Event { - if e == nil { - return e - } - e.buf = appendJSON(enc.AppendKey(e.buf, key), b) - return e -} - -// RawCBOR adds already encoded CBOR to the log line under key. -// -// No sanity check is performed on b -// Note: The full featureset of CBOR is supported as data will not be mapped to json but stored as data-url -func (e *Event) RawCBOR(key string, b []byte) *Event { - if e == nil { - return e - } - e.buf = appendCBOR(enc.AppendKey(e.buf, key), b) - return e -} - -// AnErr adds the field key with serialized err to the *Event context. -// If err is nil, no field is added. -func (e *Event) AnErr(key string, err error) *Event { - if e == nil { - return e - } - switch m := ErrorMarshalFunc(err).(type) { - case nil: - return e - case LogObjectMarshaler: - return e.Object(key, m) - case error: - if m == nil || isNilValue(m) { - return e - } else { - return e.Str(key, m.Error()) - } - case string: - return e.Str(key, m) - default: - return e.Interface(key, m) - } -} - -// Errs adds the field key with errs as an array of serialized errors to the -// *Event context. -func (e *Event) Errs(key string, errs []error) *Event { - if e == nil { - return e - } - arr := Arr() - for _, err := range errs { - switch m := ErrorMarshalFunc(err).(type) { - case LogObjectMarshaler: - arr = arr.Object(m) - case error: - arr = arr.Err(m) - case string: - arr = arr.Str(m) - default: - arr = arr.Interface(m) - } - } - - return e.Array(key, arr) -} - -// Err adds the field "error" with serialized err to the *Event context. -// If err is nil, no field is added. -// -// To customize the key name, change zerolog.ErrorFieldName. -// -// If Stack() has been called before and zerolog.ErrorStackMarshaler is defined, -// the err is passed to ErrorStackMarshaler and the result is appended to the -// zerolog.ErrorStackFieldName. -func (e *Event) Err(err error) *Event { - if e == nil { - return e - } - if e.stack && ErrorStackMarshaler != nil { - switch m := ErrorStackMarshaler(err).(type) { - case nil: - case LogObjectMarshaler: - e.Object(ErrorStackFieldName, m) - case error: - if m != nil && !isNilValue(m) { - e.Str(ErrorStackFieldName, m.Error()) - } - case string: - e.Str(ErrorStackFieldName, m) - default: - e.Interface(ErrorStackFieldName, m) - } - } - return e.AnErr(ErrorFieldName, err) -} - -// Stack enables stack trace printing for the error passed to Err(). -// -// ErrorStackMarshaler must be set for this method to do something. -func (e *Event) Stack() *Event { - if e != nil { - e.stack = true - } - return e -} - -// Ctx adds the Go Context to the *Event context. The context is not rendered -// in the output message, but is available to hooks and to Func() calls via the -// GetCtx() accessor. A typical use case is to extract tracing information from -// the Go Ctx. -func (e *Event) Ctx(ctx context.Context) *Event { - if e != nil { - e.ctx = ctx - } - return e -} - -// GetCtx retrieves the Go context.Context which is optionally stored in the -// Event. This allows Hooks and functions passed to Func() to retrieve values -// which are stored in the context.Context. This can be useful in tracing, -// where span information is commonly propagated in the context.Context. -func (e *Event) GetCtx() context.Context { - if e == nil || e.ctx == nil { - return context.Background() - } - return e.ctx -} - -// Bool adds the field key with val as a bool to the *Event context. -func (e *Event) Bool(key string, b bool) *Event { - if e == nil { - return e - } - e.buf = enc.AppendBool(enc.AppendKey(e.buf, key), b) - return e -} - -// Bools adds the field key with val as a []bool to the *Event context. -func (e *Event) Bools(key string, b []bool) *Event { - if e == nil { - return e - } - e.buf = enc.AppendBools(enc.AppendKey(e.buf, key), b) - return e -} - -// Int adds the field key with i as a int to the *Event context. -func (e *Event) Int(key string, i int) *Event { - if e == nil { - return e - } - e.buf = enc.AppendInt(enc.AppendKey(e.buf, key), i) - return e -} - -// Ints adds the field key with i as a []int to the *Event context. -func (e *Event) Ints(key string, i []int) *Event { - if e == nil { - return e - } - e.buf = enc.AppendInts(enc.AppendKey(e.buf, key), i) - return e -} - -// Int8 adds the field key with i as a int8 to the *Event context. -func (e *Event) Int8(key string, i int8) *Event { - if e == nil { - return e - } - e.buf = enc.AppendInt8(enc.AppendKey(e.buf, key), i) - return e -} - -// Ints8 adds the field key with i as a []int8 to the *Event context. -func (e *Event) Ints8(key string, i []int8) *Event { - if e == nil { - return e - } - e.buf = enc.AppendInts8(enc.AppendKey(e.buf, key), i) - return e -} - -// Int16 adds the field key with i as a int16 to the *Event context. -func (e *Event) Int16(key string, i int16) *Event { - if e == nil { - return e - } - e.buf = enc.AppendInt16(enc.AppendKey(e.buf, key), i) - return e -} - -// Ints16 adds the field key with i as a []int16 to the *Event context. -func (e *Event) Ints16(key string, i []int16) *Event { - if e == nil { - return e - } - e.buf = enc.AppendInts16(enc.AppendKey(e.buf, key), i) - return e -} - -// Int32 adds the field key with i as a int32 to the *Event context. -func (e *Event) Int32(key string, i int32) *Event { - if e == nil { - return e - } - e.buf = enc.AppendInt32(enc.AppendKey(e.buf, key), i) - return e -} - -// Ints32 adds the field key with i as a []int32 to the *Event context. -func (e *Event) Ints32(key string, i []int32) *Event { - if e == nil { - return e - } - e.buf = enc.AppendInts32(enc.AppendKey(e.buf, key), i) - return e -} - -// Int64 adds the field key with i as a int64 to the *Event context. -func (e *Event) Int64(key string, i int64) *Event { - if e == nil { - return e - } - e.buf = enc.AppendInt64(enc.AppendKey(e.buf, key), i) - return e -} - -// Ints64 adds the field key with i as a []int64 to the *Event context. -func (e *Event) Ints64(key string, i []int64) *Event { - if e == nil { - return e - } - e.buf = enc.AppendInts64(enc.AppendKey(e.buf, key), i) - return e -} - -// Uint adds the field key with i as a uint to the *Event context. -func (e *Event) Uint(key string, i uint) *Event { - if e == nil { - return e - } - e.buf = enc.AppendUint(enc.AppendKey(e.buf, key), i) - return e -} - -// Uints adds the field key with i as a []int to the *Event context. -func (e *Event) Uints(key string, i []uint) *Event { - if e == nil { - return e - } - e.buf = enc.AppendUints(enc.AppendKey(e.buf, key), i) - return e -} - -// Uint8 adds the field key with i as a uint8 to the *Event context. -func (e *Event) Uint8(key string, i uint8) *Event { - if e == nil { - return e - } - e.buf = enc.AppendUint8(enc.AppendKey(e.buf, key), i) - return e -} - -// Uints8 adds the field key with i as a []int8 to the *Event context. -func (e *Event) Uints8(key string, i []uint8) *Event { - if e == nil { - return e - } - e.buf = enc.AppendUints8(enc.AppendKey(e.buf, key), i) - return e -} - -// Uint16 adds the field key with i as a uint16 to the *Event context. -func (e *Event) Uint16(key string, i uint16) *Event { - if e == nil { - return e - } - e.buf = enc.AppendUint16(enc.AppendKey(e.buf, key), i) - return e -} - -// Uints16 adds the field key with i as a []int16 to the *Event context. -func (e *Event) Uints16(key string, i []uint16) *Event { - if e == nil { - return e - } - e.buf = enc.AppendUints16(enc.AppendKey(e.buf, key), i) - return e -} - -// Uint32 adds the field key with i as a uint32 to the *Event context. -func (e *Event) Uint32(key string, i uint32) *Event { - if e == nil { - return e - } - e.buf = enc.AppendUint32(enc.AppendKey(e.buf, key), i) - return e -} - -// Uints32 adds the field key with i as a []int32 to the *Event context. -func (e *Event) Uints32(key string, i []uint32) *Event { - if e == nil { - return e - } - e.buf = enc.AppendUints32(enc.AppendKey(e.buf, key), i) - return e -} - -// Uint64 adds the field key with i as a uint64 to the *Event context. -func (e *Event) Uint64(key string, i uint64) *Event { - if e == nil { - return e - } - e.buf = enc.AppendUint64(enc.AppendKey(e.buf, key), i) - return e -} - -// Uints64 adds the field key with i as a []int64 to the *Event context. -func (e *Event) Uints64(key string, i []uint64) *Event { - if e == nil { - return e - } - e.buf = enc.AppendUints64(enc.AppendKey(e.buf, key), i) - return e -} - -// Float32 adds the field key with f as a float32 to the *Event context. -func (e *Event) Float32(key string, f float32) *Event { - if e == nil { - return e - } - e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f) - return e -} - -// Floats32 adds the field key with f as a []float32 to the *Event context. -func (e *Event) Floats32(key string, f []float32) *Event { - if e == nil { - return e - } - e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f) - return e -} - -// Float64 adds the field key with f as a float64 to the *Event context. -func (e *Event) Float64(key string, f float64) *Event { - if e == nil { - return e - } - e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f) - return e -} - -// Floats64 adds the field key with f as a []float64 to the *Event context. -func (e *Event) Floats64(key string, f []float64) *Event { - if e == nil { - return e - } - e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f) - return e -} - -// Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key. -// To customize the key name, change zerolog.TimestampFieldName. -// -// NOTE: It won't dedupe the "time" key if the *Event (or *Context) has one -// already. -func (e *Event) Timestamp() *Event { - if e == nil { - return e - } - e.buf = enc.AppendTime(enc.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat) - return e -} - -// Time adds the field key with t formatted as string using zerolog.TimeFieldFormat. -func (e *Event) Time(key string, t time.Time) *Event { - if e == nil { - return e - } - e.buf = enc.AppendTime(enc.AppendKey(e.buf, key), t, TimeFieldFormat) - return e -} - -// Times adds the field key with t formatted as string using zerolog.TimeFieldFormat. -func (e *Event) Times(key string, t []time.Time) *Event { - if e == nil { - return e - } - e.buf = enc.AppendTimes(enc.AppendKey(e.buf, key), t, TimeFieldFormat) - return e -} - -// Dur adds the field key with duration d stored as zerolog.DurationFieldUnit. -// If zerolog.DurationFieldInteger is true, durations are rendered as integer -// instead of float. -func (e *Event) Dur(key string, d time.Duration) *Event { - if e == nil { - return e - } - e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) - return e -} - -// Durs adds the field key with duration d stored as zerolog.DurationFieldUnit. -// If zerolog.DurationFieldInteger is true, durations are rendered as integer -// instead of float. -func (e *Event) Durs(key string, d []time.Duration) *Event { - if e == nil { - return e - } - e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) - return e -} - -// TimeDiff adds the field key with positive duration between time t and start. -// If time t is not greater than start, duration will be 0. -// Duration format follows the same principle as Dur(). -func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event { - if e == nil { - return e - } - var d time.Duration - if t.After(start) { - d = t.Sub(start) - } - e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) - return e -} - -// Any is a wrapper around Event.Interface. -func (e *Event) Any(key string, i interface{}) *Event { - return e.Interface(key, i) -} - -// Interface adds the field key with i marshaled using reflection. -func (e *Event) Interface(key string, i interface{}) *Event { - if e == nil { - return e - } - if obj, ok := i.(LogObjectMarshaler); ok { - return e.Object(key, obj) - } - e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), i) - return e -} - -// Type adds the field key with val's type using reflection. -func (e *Event) Type(key string, val interface{}) *Event { - if e == nil { - return e - } - e.buf = enc.AppendType(enc.AppendKey(e.buf, key), val) - return e -} - -// CallerSkipFrame instructs any future Caller calls to skip the specified number of frames. -// This includes those added via hooks from the context. -func (e *Event) CallerSkipFrame(skip int) *Event { - if e == nil { - return e - } - e.skipFrame += skip - return e -} - -// Caller adds the file:line of the caller with the zerolog.CallerFieldName key. -// The argument skip is the number of stack frames to ascend -// Skip If not passed, use the global variable CallerSkipFrameCount -func (e *Event) Caller(skip ...int) *Event { - sk := CallerSkipFrameCount - if len(skip) > 0 { - sk = skip[0] + CallerSkipFrameCount - } - return e.caller(sk) -} - -func (e *Event) caller(skip int) *Event { - if e == nil { - return e - } - pc, file, line, ok := runtime.Caller(skip + e.skipFrame) - if !ok { - return e - } - e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(pc, file, line)) - return e -} - -// IPAddr adds IPv4 or IPv6 Address to the event -func (e *Event) IPAddr(key string, ip net.IP) *Event { - if e == nil { - return e - } - e.buf = enc.AppendIPAddr(enc.AppendKey(e.buf, key), ip) - return e -} - -// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event -func (e *Event) IPPrefix(key string, pfx net.IPNet) *Event { - if e == nil { - return e - } - e.buf = enc.AppendIPPrefix(enc.AppendKey(e.buf, key), pfx) - return e -} - -// MACAddr adds MAC address to the event -func (e *Event) MACAddr(key string, ha net.HardwareAddr) *Event { - if e == nil { - return e - } - e.buf = enc.AppendMACAddr(enc.AppendKey(e.buf, key), ha) - return e -} diff --git a/vendor/github.com/rs/zerolog/fields.go b/vendor/github.com/rs/zerolog/fields.go deleted file mode 100644 index c1eb5ce7..00000000 --- a/vendor/github.com/rs/zerolog/fields.go +++ /dev/null @@ -1,277 +0,0 @@ -package zerolog - -import ( - "encoding/json" - "net" - "sort" - "time" - "unsafe" -) - -func isNilValue(i interface{}) bool { - return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0 -} - -func appendFields(dst []byte, fields interface{}) []byte { - switch fields := fields.(type) { - case []interface{}: - if n := len(fields); n&0x1 == 1 { // odd number - fields = fields[:n-1] - } - dst = appendFieldList(dst, fields) - case map[string]interface{}: - keys := make([]string, 0, len(fields)) - for key := range fields { - keys = append(keys, key) - } - sort.Strings(keys) - kv := make([]interface{}, 2) - for _, key := range keys { - kv[0], kv[1] = key, fields[key] - dst = appendFieldList(dst, kv) - } - } - return dst -} - -func appendFieldList(dst []byte, kvList []interface{}) []byte { - for i, n := 0, len(kvList); i < n; i += 2 { - key, val := kvList[i], kvList[i+1] - if key, ok := key.(string); ok { - dst = enc.AppendKey(dst, key) - } else { - continue - } - if val, ok := val.(LogObjectMarshaler); ok { - e := newEvent(nil, 0) - e.buf = e.buf[:0] - e.appendObject(val) - dst = append(dst, e.buf...) - putEvent(e) - continue - } - switch val := val.(type) { - case string: - dst = enc.AppendString(dst, val) - case []byte: - dst = enc.AppendBytes(dst, val) - case error: - switch m := ErrorMarshalFunc(val).(type) { - case LogObjectMarshaler: - e := newEvent(nil, 0) - e.buf = e.buf[:0] - e.appendObject(m) - dst = append(dst, e.buf...) - putEvent(e) - case error: - if m == nil || isNilValue(m) { - dst = enc.AppendNil(dst) - } else { - dst = enc.AppendString(dst, m.Error()) - } - case string: - dst = enc.AppendString(dst, m) - default: - dst = enc.AppendInterface(dst, m) - } - case []error: - dst = enc.AppendArrayStart(dst) - for i, err := range val { - switch m := ErrorMarshalFunc(err).(type) { - case LogObjectMarshaler: - e := newEvent(nil, 0) - e.buf = e.buf[:0] - e.appendObject(m) - dst = append(dst, e.buf...) - putEvent(e) - case error: - if m == nil || isNilValue(m) { - dst = enc.AppendNil(dst) - } else { - dst = enc.AppendString(dst, m.Error()) - } - case string: - dst = enc.AppendString(dst, m) - default: - dst = enc.AppendInterface(dst, m) - } - - if i < (len(val) - 1) { - enc.AppendArrayDelim(dst) - } - } - dst = enc.AppendArrayEnd(dst) - case bool: - dst = enc.AppendBool(dst, val) - case int: - dst = enc.AppendInt(dst, val) - case int8: - dst = enc.AppendInt8(dst, val) - case int16: - dst = enc.AppendInt16(dst, val) - case int32: - dst = enc.AppendInt32(dst, val) - case int64: - dst = enc.AppendInt64(dst, val) - case uint: - dst = enc.AppendUint(dst, val) - case uint8: - dst = enc.AppendUint8(dst, val) - case uint16: - dst = enc.AppendUint16(dst, val) - case uint32: - dst = enc.AppendUint32(dst, val) - case uint64: - dst = enc.AppendUint64(dst, val) - case float32: - dst = enc.AppendFloat32(dst, val) - case float64: - dst = enc.AppendFloat64(dst, val) - case time.Time: - dst = enc.AppendTime(dst, val, TimeFieldFormat) - case time.Duration: - dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger) - case *string: - if val != nil { - dst = enc.AppendString(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *bool: - if val != nil { - dst = enc.AppendBool(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *int: - if val != nil { - dst = enc.AppendInt(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *int8: - if val != nil { - dst = enc.AppendInt8(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *int16: - if val != nil { - dst = enc.AppendInt16(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *int32: - if val != nil { - dst = enc.AppendInt32(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *int64: - if val != nil { - dst = enc.AppendInt64(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *uint: - if val != nil { - dst = enc.AppendUint(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *uint8: - if val != nil { - dst = enc.AppendUint8(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *uint16: - if val != nil { - dst = enc.AppendUint16(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *uint32: - if val != nil { - dst = enc.AppendUint32(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *uint64: - if val != nil { - dst = enc.AppendUint64(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *float32: - if val != nil { - dst = enc.AppendFloat32(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *float64: - if val != nil { - dst = enc.AppendFloat64(dst, *val) - } else { - dst = enc.AppendNil(dst) - } - case *time.Time: - if val != nil { - dst = enc.AppendTime(dst, *val, TimeFieldFormat) - } else { - dst = enc.AppendNil(dst) - } - case *time.Duration: - if val != nil { - dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger) - } else { - dst = enc.AppendNil(dst) - } - case []string: - dst = enc.AppendStrings(dst, val) - case []bool: - dst = enc.AppendBools(dst, val) - case []int: - dst = enc.AppendInts(dst, val) - case []int8: - dst = enc.AppendInts8(dst, val) - case []int16: - dst = enc.AppendInts16(dst, val) - case []int32: - dst = enc.AppendInts32(dst, val) - case []int64: - dst = enc.AppendInts64(dst, val) - case []uint: - dst = enc.AppendUints(dst, val) - // case []uint8: - // dst = enc.AppendUints8(dst, val) - case []uint16: - dst = enc.AppendUints16(dst, val) - case []uint32: - dst = enc.AppendUints32(dst, val) - case []uint64: - dst = enc.AppendUints64(dst, val) - case []float32: - dst = enc.AppendFloats32(dst, val) - case []float64: - dst = enc.AppendFloats64(dst, val) - case []time.Time: - dst = enc.AppendTimes(dst, val, TimeFieldFormat) - case []time.Duration: - dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger) - case nil: - dst = enc.AppendNil(dst) - case net.IP: - dst = enc.AppendIPAddr(dst, val) - case net.IPNet: - dst = enc.AppendIPPrefix(dst, val) - case net.HardwareAddr: - dst = enc.AppendMACAddr(dst, val) - case json.RawMessage: - dst = appendJSON(dst, val) - default: - dst = enc.AppendInterface(dst, val) - } - } - return dst -} diff --git a/vendor/github.com/rs/zerolog/globals.go b/vendor/github.com/rs/zerolog/globals.go deleted file mode 100644 index e1067deb..00000000 --- a/vendor/github.com/rs/zerolog/globals.go +++ /dev/null @@ -1,142 +0,0 @@ -package zerolog - -import ( - "encoding/json" - "strconv" - "sync/atomic" - "time" -) - -const ( - // TimeFormatUnix defines a time format that makes time fields to be - // serialized as Unix timestamp integers. - TimeFormatUnix = "" - - // TimeFormatUnixMs defines a time format that makes time fields to be - // serialized as Unix timestamp integers in milliseconds. - TimeFormatUnixMs = "UNIXMS" - - // TimeFormatUnixMicro defines a time format that makes time fields to be - // serialized as Unix timestamp integers in microseconds. - TimeFormatUnixMicro = "UNIXMICRO" - - // TimeFormatUnixNano defines a time format that makes time fields to be - // serialized as Unix timestamp integers in nanoseconds. - TimeFormatUnixNano = "UNIXNANO" -) - -var ( - // TimestampFieldName is the field name used for the timestamp field. - TimestampFieldName = "time" - - // LevelFieldName is the field name used for the level field. - LevelFieldName = "level" - - // LevelTraceValue is the value used for the trace level field. - LevelTraceValue = "trace" - // LevelDebugValue is the value used for the debug level field. - LevelDebugValue = "debug" - // LevelInfoValue is the value used for the info level field. - LevelInfoValue = "info" - // LevelWarnValue is the value used for the warn level field. - LevelWarnValue = "warn" - // LevelErrorValue is the value used for the error level field. - LevelErrorValue = "error" - // LevelFatalValue is the value used for the fatal level field. - LevelFatalValue = "fatal" - // LevelPanicValue is the value used for the panic level field. - LevelPanicValue = "panic" - - // LevelFieldMarshalFunc allows customization of global level field marshaling. - LevelFieldMarshalFunc = func(l Level) string { - return l.String() - } - - // MessageFieldName is the field name used for the message field. - MessageFieldName = "message" - - // ErrorFieldName is the field name used for error fields. - ErrorFieldName = "error" - - // CallerFieldName is the field name used for caller field. - CallerFieldName = "caller" - - // CallerSkipFrameCount is the number of stack frames to skip to find the caller. - CallerSkipFrameCount = 2 - - // CallerMarshalFunc allows customization of global caller marshaling - CallerMarshalFunc = func(pc uintptr, file string, line int) string { - return file + ":" + strconv.Itoa(line) - } - - // ErrorStackFieldName is the field name used for error stacks. - ErrorStackFieldName = "stack" - - // ErrorStackMarshaler extract the stack from err if any. - ErrorStackMarshaler func(err error) interface{} - - // ErrorMarshalFunc allows customization of global error marshaling - ErrorMarshalFunc = func(err error) interface{} { - return err - } - - // InterfaceMarshalFunc allows customization of interface marshaling. - // Default: "encoding/json.Marshal" - InterfaceMarshalFunc = json.Marshal - - // TimeFieldFormat defines the time format of the Time field type. If set to - // TimeFormatUnix, TimeFormatUnixMs, TimeFormatUnixMicro or TimeFormatUnixNano, the time is formatted as a UNIX - // timestamp as integer. - TimeFieldFormat = time.RFC3339 - - // TimestampFunc defines the function called to generate a timestamp. - TimestampFunc = time.Now - - // DurationFieldUnit defines the unit for time.Duration type fields added - // using the Dur method. - DurationFieldUnit = time.Millisecond - - // DurationFieldInteger renders Dur fields as integer instead of float if - // set to true. - DurationFieldInteger = false - - // ErrorHandler is called whenever zerolog fails to write an event on its - // output. If not set, an error is printed on the stderr. This handler must - // be thread safe and non-blocking. - ErrorHandler func(err error) - - // DefaultContextLogger is returned from Ctx() if there is no logger associated - // with the context. - DefaultContextLogger *Logger -) - -var ( - gLevel = new(int32) - disableSampling = new(int32) -) - -// SetGlobalLevel sets the global override for log level. If this -// values is raised, all Loggers will use at least this value. -// -// To globally disable logs, set GlobalLevel to Disabled. -func SetGlobalLevel(l Level) { - atomic.StoreInt32(gLevel, int32(l)) -} - -// GlobalLevel returns the current global log level -func GlobalLevel() Level { - return Level(atomic.LoadInt32(gLevel)) -} - -// DisableSampling will disable sampling in all Loggers if true. -func DisableSampling(v bool) { - var i int32 - if v { - i = 1 - } - atomic.StoreInt32(disableSampling, i) -} - -func samplingDisabled() bool { - return atomic.LoadInt32(disableSampling) == 1 -} diff --git a/vendor/github.com/rs/zerolog/go112.go b/vendor/github.com/rs/zerolog/go112.go deleted file mode 100644 index e7b5a1bd..00000000 --- a/vendor/github.com/rs/zerolog/go112.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build go1.12 - -package zerolog - -// Since go 1.12, some auto generated init functions are hidden from -// runtime.Caller. -const contextCallerSkipFrameCount = 2 diff --git a/vendor/github.com/rs/zerolog/hook.go b/vendor/github.com/rs/zerolog/hook.go deleted file mode 100644 index ec6effc1..00000000 --- a/vendor/github.com/rs/zerolog/hook.go +++ /dev/null @@ -1,64 +0,0 @@ -package zerolog - -// Hook defines an interface to a log hook. -type Hook interface { - // Run runs the hook with the event. - Run(e *Event, level Level, message string) -} - -// HookFunc is an adaptor to allow the use of an ordinary function -// as a Hook. -type HookFunc func(e *Event, level Level, message string) - -// Run implements the Hook interface. -func (h HookFunc) Run(e *Event, level Level, message string) { - h(e, level, message) -} - -// LevelHook applies a different hook for each level. -type LevelHook struct { - NoLevelHook, TraceHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook -} - -// Run implements the Hook interface. -func (h LevelHook) Run(e *Event, level Level, message string) { - switch level { - case TraceLevel: - if h.TraceHook != nil { - h.TraceHook.Run(e, level, message) - } - case DebugLevel: - if h.DebugHook != nil { - h.DebugHook.Run(e, level, message) - } - case InfoLevel: - if h.InfoHook != nil { - h.InfoHook.Run(e, level, message) - } - case WarnLevel: - if h.WarnHook != nil { - h.WarnHook.Run(e, level, message) - } - case ErrorLevel: - if h.ErrorHook != nil { - h.ErrorHook.Run(e, level, message) - } - case FatalLevel: - if h.FatalHook != nil { - h.FatalHook.Run(e, level, message) - } - case PanicLevel: - if h.PanicHook != nil { - h.PanicHook.Run(e, level, message) - } - case NoLevel: - if h.NoLevelHook != nil { - h.NoLevelHook.Run(e, level, message) - } - } -} - -// NewLevelHook returns a new LevelHook. -func NewLevelHook() LevelHook { - return LevelHook{} -} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/README.md b/vendor/github.com/rs/zerolog/internal/cbor/README.md deleted file mode 100644 index 92c2e8c7..00000000 --- a/vendor/github.com/rs/zerolog/internal/cbor/README.md +++ /dev/null @@ -1,56 +0,0 @@ -## Reference: - CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049) - -## Comparison of JSON vs CBOR - -Two main areas of reduction are: - -1. CPU usage to write a log msg -2. Size (in bytes) of log messages. - - -CPU Usage savings are below: -``` -name JSON time/op CBOR time/op delta -Info-32 15.3ns ± 1% 11.7ns ± 3% -23.78% (p=0.000 n=9+10) -ContextFields-32 16.2ns ± 2% 12.3ns ± 3% -23.97% (p=0.000 n=9+9) -ContextAppend-32 6.70ns ± 0% 6.20ns ± 0% -7.44% (p=0.000 n=9+9) -LogFields-32 66.4ns ± 0% 24.6ns ± 2% -62.89% (p=0.000 n=10+9) -LogArrayObject-32 911ns ±11% 768ns ± 6% -15.64% (p=0.000 n=10+10) -LogFieldType/Floats-32 70.3ns ± 2% 29.5ns ± 1% -57.98% (p=0.000 n=10+10) -LogFieldType/Err-32 14.0ns ± 3% 12.1ns ± 8% -13.20% (p=0.000 n=8+10) -LogFieldType/Dur-32 17.2ns ± 2% 13.1ns ± 1% -24.27% (p=0.000 n=10+9) -LogFieldType/Object-32 54.3ns ±11% 52.3ns ± 7% ~ (p=0.239 n=10+10) -LogFieldType/Ints-32 20.3ns ± 2% 15.1ns ± 2% -25.50% (p=0.000 n=9+10) -LogFieldType/Interfaces-32 642ns ±11% 621ns ± 9% ~ (p=0.118 n=10+10) -LogFieldType/Interface(Objects)-32 635ns ±13% 632ns ± 9% ~ (p=0.592 n=10+10) -LogFieldType/Times-32 294ns ± 0% 27ns ± 1% -90.71% (p=0.000 n=10+9) -LogFieldType/Durs-32 121ns ± 0% 33ns ± 2% -72.44% (p=0.000 n=9+9) -LogFieldType/Interface(Object)-32 56.6ns ± 8% 52.3ns ± 8% -7.54% (p=0.007 n=10+10) -LogFieldType/Errs-32 17.8ns ± 3% 16.1ns ± 2% -9.71% (p=0.000 n=10+9) -LogFieldType/Time-32 40.5ns ± 1% 12.7ns ± 6% -68.66% (p=0.000 n=8+9) -LogFieldType/Bool-32 12.0ns ± 5% 10.2ns ± 2% -15.18% (p=0.000 n=10+8) -LogFieldType/Bools-32 17.2ns ± 2% 12.6ns ± 4% -26.63% (p=0.000 n=10+10) -LogFieldType/Int-32 12.3ns ± 2% 11.2ns ± 4% -9.27% (p=0.000 n=9+10) -LogFieldType/Float-32 16.7ns ± 1% 12.6ns ± 2% -24.42% (p=0.000 n=7+9) -LogFieldType/Str-32 12.7ns ± 7% 11.3ns ± 7% -10.88% (p=0.000 n=10+9) -LogFieldType/Strs-32 20.3ns ± 3% 18.2ns ± 3% -10.25% (p=0.000 n=9+10) -LogFieldType/Interface-32 183ns ±12% 175ns ± 9% ~ (p=0.078 n=10+10) -``` - -Log message size savings is greatly dependent on the number and type of fields in the log message. -Assuming this log message (with an Integer, timestamp and string, in addition to level). - -`{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}` - -Two measurements were done for the log file sizes - one without any compression, second -using [compress/zlib](https://golang.org/pkg/compress/zlib/). - -Results for 10,000 log messages: - -| Log Format | Plain File Size (in KB) | Compressed File Size (in KB) | -| :--- | :---: | :---: | -| JSON | 920 | 28 | -| CBOR | 550 | 28 | - -The example used to calculate the above data is available in [Examples](examples). diff --git a/vendor/github.com/rs/zerolog/internal/cbor/base.go b/vendor/github.com/rs/zerolog/internal/cbor/base.go deleted file mode 100644 index 51fe86c9..00000000 --- a/vendor/github.com/rs/zerolog/internal/cbor/base.go +++ /dev/null @@ -1,19 +0,0 @@ -package cbor - -// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice. -// Making it package level instead of embedded in Encoder brings -// some extra efforts at importing, but avoids value copy when the functions -// of Encoder being invoked. -// DO REMEMBER to set this variable at importing, or -// you might get a nil pointer dereference panic at runtime. -var JSONMarshalFunc func(v interface{}) ([]byte, error) - -type Encoder struct{} - -// AppendKey adds a key (string) to the binary encoded log message -func (e Encoder) AppendKey(dst []byte, key string) []byte { - if len(dst) < 1 { - dst = e.AppendBeginMarker(dst) - } - return e.AppendString(dst, key) -} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/cbor.go b/vendor/github.com/rs/zerolog/internal/cbor/cbor.go deleted file mode 100644 index 1bf14438..00000000 --- a/vendor/github.com/rs/zerolog/internal/cbor/cbor.go +++ /dev/null @@ -1,102 +0,0 @@ -// Package cbor provides primitives for storing different data -// in the CBOR (binary) format. CBOR is defined in RFC7049. -package cbor - -import "time" - -const ( - majorOffset = 5 - additionalMax = 23 - - // Non Values. - additionalTypeBoolFalse byte = 20 - additionalTypeBoolTrue byte = 21 - additionalTypeNull byte = 22 - - // Integer (+ve and -ve) Sub-types. - additionalTypeIntUint8 byte = 24 - additionalTypeIntUint16 byte = 25 - additionalTypeIntUint32 byte = 26 - additionalTypeIntUint64 byte = 27 - - // Float Sub-types. - additionalTypeFloat16 byte = 25 - additionalTypeFloat32 byte = 26 - additionalTypeFloat64 byte = 27 - additionalTypeBreak byte = 31 - - // Tag Sub-types. - additionalTypeTimestamp byte = 01 - additionalTypeEmbeddedCBOR byte = 63 - - // Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml - additionalTypeTagNetworkAddr uint16 = 260 - additionalTypeTagNetworkPrefix uint16 = 261 - additionalTypeEmbeddedJSON uint16 = 262 - additionalTypeTagHexString uint16 = 263 - - // Unspecified number of elements. - additionalTypeInfiniteCount byte = 31 -) -const ( - majorTypeUnsignedInt byte = iota << majorOffset // Major type 0 - majorTypeNegativeInt // Major type 1 - majorTypeByteString // Major type 2 - majorTypeUtf8String // Major type 3 - majorTypeArray // Major type 4 - majorTypeMap // Major type 5 - majorTypeTags // Major type 6 - majorTypeSimpleAndFloat // Major type 7 -) - -const ( - maskOutAdditionalType byte = (7 << majorOffset) - maskOutMajorType byte = 31 -) - -const ( - float32Nan = "\xfa\x7f\xc0\x00\x00" - float32PosInfinity = "\xfa\x7f\x80\x00\x00" - float32NegInfinity = "\xfa\xff\x80\x00\x00" - float64Nan = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00" - float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00" - float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00" -) - -// IntegerTimeFieldFormat indicates the format of timestamp decoded -// from an integer (time in seconds). -var IntegerTimeFieldFormat = time.RFC3339 - -// NanoTimeFieldFormat indicates the format of timestamp decoded -// from a float value (time in seconds and nanoseconds). -var NanoTimeFieldFormat = time.RFC3339Nano - -func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte { - byteCount := 8 - var minor byte - switch { - case number < 256: - byteCount = 1 - minor = additionalTypeIntUint8 - - case number < 65536: - byteCount = 2 - minor = additionalTypeIntUint16 - - case number < 4294967296: - byteCount = 4 - minor = additionalTypeIntUint32 - - default: - byteCount = 8 - minor = additionalTypeIntUint64 - - } - - dst = append(dst, major|minor) - byteCount-- - for ; byteCount >= 0; byteCount-- { - dst = append(dst, byte(number>>(uint(byteCount)*8))) - } - return dst -} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go b/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go deleted file mode 100644 index 616bed65..00000000 --- a/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go +++ /dev/null @@ -1,654 +0,0 @@ -package cbor - -// This file contains code to decode a stream of CBOR Data into JSON. - -import ( - "bufio" - "bytes" - "encoding/base64" - "fmt" - "io" - "math" - "net" - "runtime" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -var decodeTimeZone *time.Location - -const hexTable = "0123456789abcdef" - -const isFloat32 = 4 -const isFloat64 = 8 - -func readNBytes(src *bufio.Reader, n int) []byte { - ret := make([]byte, n) - for i := 0; i < n; i++ { - ch, e := src.ReadByte() - if e != nil { - panic(fmt.Errorf("Tried to Read %d Bytes.. But hit end of file", n)) - } - ret[i] = ch - } - return ret -} - -func readByte(src *bufio.Reader) byte { - b, e := src.ReadByte() - if e != nil { - panic(fmt.Errorf("Tried to Read 1 Byte.. But hit end of file")) - } - return b -} - -func decodeIntAdditionalType(src *bufio.Reader, minor byte) int64 { - val := int64(0) - if minor <= 23 { - val = int64(minor) - } else { - bytesToRead := 0 - switch minor { - case additionalTypeIntUint8: - bytesToRead = 1 - case additionalTypeIntUint16: - bytesToRead = 2 - case additionalTypeIntUint32: - bytesToRead = 4 - case additionalTypeIntUint64: - bytesToRead = 8 - default: - panic(fmt.Errorf("Invalid Additional Type: %d in decodeInteger (expected <28)", minor)) - } - pb := readNBytes(src, bytesToRead) - for i := 0; i < bytesToRead; i++ { - val = val * 256 - val += int64(pb[i]) - } - } - return val -} - -func decodeInteger(src *bufio.Reader) int64 { - pb := readByte(src) - major := pb & maskOutAdditionalType - minor := pb & maskOutMajorType - if major != majorTypeUnsignedInt && major != majorTypeNegativeInt { - panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major)) - } - val := decodeIntAdditionalType(src, minor) - if major == 0 { - return val - } - return (-1 - val) -} - -func decodeFloat(src *bufio.Reader) (float64, int) { - pb := readByte(src) - major := pb & maskOutAdditionalType - minor := pb & maskOutMajorType - if major != majorTypeSimpleAndFloat { - panic(fmt.Errorf("Incorrect Major type is: %d in decodeFloat", major)) - } - - switch minor { - case additionalTypeFloat16: - panic(fmt.Errorf("float16 is not suppported in decodeFloat")) - - case additionalTypeFloat32: - pb := readNBytes(src, 4) - switch string(pb) { - case float32Nan: - return math.NaN(), isFloat32 - case float32PosInfinity: - return math.Inf(0), isFloat32 - case float32NegInfinity: - return math.Inf(-1), isFloat32 - } - n := uint32(0) - for i := 0; i < 4; i++ { - n = n * 256 - n += uint32(pb[i]) - } - val := math.Float32frombits(n) - return float64(val), isFloat32 - case additionalTypeFloat64: - pb := readNBytes(src, 8) - switch string(pb) { - case float64Nan: - return math.NaN(), isFloat64 - case float64PosInfinity: - return math.Inf(0), isFloat64 - case float64NegInfinity: - return math.Inf(-1), isFloat64 - } - n := uint64(0) - for i := 0; i < 8; i++ { - n = n * 256 - n += uint64(pb[i]) - } - val := math.Float64frombits(n) - return val, isFloat64 - } - panic(fmt.Errorf("Invalid Additional Type: %d in decodeFloat", minor)) -} - -func decodeStringComplex(dst []byte, s string, pos uint) []byte { - i := int(pos) - start := 0 - - for i < len(s) { - b := s[i] - if b >= utf8.RuneSelf { - r, size := utf8.DecodeRuneInString(s[i:]) - if r == utf8.RuneError && size == 1 { - // In case of error, first append previous simple characters to - // the byte slice if any and append a replacement character code - // in place of the invalid sequence. - if start < i { - dst = append(dst, s[start:i]...) - } - dst = append(dst, `\ufffd`...) - i += size - start = i - continue - } - i += size - continue - } - if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' { - i++ - continue - } - // We encountered a character that needs to be encoded. - // Let's append the previous simple characters to the byte slice - // and switch our operation to read and encode the remainder - // characters byte-by-byte. - if start < i { - dst = append(dst, s[start:i]...) - } - switch b { - case '"', '\\': - dst = append(dst, '\\', b) - case '\b': - dst = append(dst, '\\', 'b') - case '\f': - dst = append(dst, '\\', 'f') - case '\n': - dst = append(dst, '\\', 'n') - case '\r': - dst = append(dst, '\\', 'r') - case '\t': - dst = append(dst, '\\', 't') - default: - dst = append(dst, '\\', 'u', '0', '0', hexTable[b>>4], hexTable[b&0xF]) - } - i++ - start = i - } - if start < len(s) { - dst = append(dst, s[start:]...) - } - return dst -} - -func decodeString(src *bufio.Reader, noQuotes bool) []byte { - pb := readByte(src) - major := pb & maskOutAdditionalType - minor := pb & maskOutMajorType - if major != majorTypeByteString { - panic(fmt.Errorf("Major type is: %d in decodeString", major)) - } - result := []byte{} - if !noQuotes { - result = append(result, '"') - } - length := decodeIntAdditionalType(src, minor) - len := int(length) - pbs := readNBytes(src, len) - result = append(result, pbs...) - if noQuotes { - return result - } - return append(result, '"') -} -func decodeStringToDataUrl(src *bufio.Reader, mimeType string) []byte { - pb := readByte(src) - major := pb & maskOutAdditionalType - minor := pb & maskOutMajorType - if major != majorTypeByteString { - panic(fmt.Errorf("Major type is: %d in decodeString", major)) - } - length := decodeIntAdditionalType(src, minor) - l := int(length) - enc := base64.StdEncoding - lEnc := enc.EncodedLen(l) - result := make([]byte, len("\"data:;base64,\"")+len(mimeType)+lEnc) - dest := result - u := copy(dest, "\"data:") - dest = dest[u:] - u = copy(dest, mimeType) - dest = dest[u:] - u = copy(dest, ";base64,") - dest = dest[u:] - pbs := readNBytes(src, l) - enc.Encode(dest, pbs) - dest = dest[lEnc:] - dest[0] = '"' - return result -} - -func decodeUTF8String(src *bufio.Reader) []byte { - pb := readByte(src) - major := pb & maskOutAdditionalType - minor := pb & maskOutMajorType - if major != majorTypeUtf8String { - panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major)) - } - result := []byte{'"'} - length := decodeIntAdditionalType(src, minor) - len := int(length) - pbs := readNBytes(src, len) - - for i := 0; i < len; i++ { - // Check if the character needs encoding. Control characters, slashes, - // and the double quote need json encoding. Bytes above the ascii - // boundary needs utf8 encoding. - if pbs[i] < 0x20 || pbs[i] > 0x7e || pbs[i] == '\\' || pbs[i] == '"' { - // We encountered a character that needs to be encoded. Switch - // to complex version of the algorithm. - dst := []byte{'"'} - dst = decodeStringComplex(dst, string(pbs), uint(i)) - return append(dst, '"') - } - } - // The string has no need for encoding and therefore is directly - // appended to the byte slice. - result = append(result, pbs...) - return append(result, '"') -} - -func array2Json(src *bufio.Reader, dst io.Writer) { - dst.Write([]byte{'['}) - pb := readByte(src) - major := pb & maskOutAdditionalType - minor := pb & maskOutMajorType - if major != majorTypeArray { - panic(fmt.Errorf("Major type is: %d in array2Json", major)) - } - len := 0 - unSpecifiedCount := false - if minor == additionalTypeInfiniteCount { - unSpecifiedCount = true - } else { - length := decodeIntAdditionalType(src, minor) - len = int(length) - } - for i := 0; unSpecifiedCount || i < len; i++ { - if unSpecifiedCount { - pb, e := src.Peek(1) - if e != nil { - panic(e) - } - if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { - readByte(src) - break - } - } - cbor2JsonOneObject(src, dst) - if unSpecifiedCount { - pb, e := src.Peek(1) - if e != nil { - panic(e) - } - if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { - readByte(src) - break - } - dst.Write([]byte{','}) - } else if i+1 < len { - dst.Write([]byte{','}) - } - } - dst.Write([]byte{']'}) -} - -func map2Json(src *bufio.Reader, dst io.Writer) { - pb := readByte(src) - major := pb & maskOutAdditionalType - minor := pb & maskOutMajorType - if major != majorTypeMap { - panic(fmt.Errorf("Major type is: %d in map2Json", major)) - } - len := 0 - unSpecifiedCount := false - if minor == additionalTypeInfiniteCount { - unSpecifiedCount = true - } else { - length := decodeIntAdditionalType(src, minor) - len = int(length) - } - dst.Write([]byte{'{'}) - for i := 0; unSpecifiedCount || i < len; i++ { - if unSpecifiedCount { - pb, e := src.Peek(1) - if e != nil { - panic(e) - } - if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { - readByte(src) - break - } - } - cbor2JsonOneObject(src, dst) - if i%2 == 0 { - // Even position values are keys. - dst.Write([]byte{':'}) - } else { - if unSpecifiedCount { - pb, e := src.Peek(1) - if e != nil { - panic(e) - } - if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { - readByte(src) - break - } - dst.Write([]byte{','}) - } else if i+1 < len { - dst.Write([]byte{','}) - } - } - } - dst.Write([]byte{'}'}) -} - -func decodeTagData(src *bufio.Reader) []byte { - pb := readByte(src) - major := pb & maskOutAdditionalType - minor := pb & maskOutMajorType - if major != majorTypeTags { - panic(fmt.Errorf("Major type is: %d in decodeTagData", major)) - } - switch minor { - case additionalTypeTimestamp: - return decodeTimeStamp(src) - case additionalTypeIntUint8: - val := decodeIntAdditionalType(src, minor) - switch byte(val) { - case additionalTypeEmbeddedCBOR: - pb := readByte(src) - dataMajor := pb & maskOutAdditionalType - if dataMajor != majorTypeByteString { - panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedCBOR", dataMajor)) - } - src.UnreadByte() - return decodeStringToDataUrl(src, "application/cbor") - default: - panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val)) - } - - // Tag value is larger than 256 (so uint16). - case additionalTypeIntUint16: - val := decodeIntAdditionalType(src, minor) - - switch uint16(val) { - case additionalTypeEmbeddedJSON: - pb := readByte(src) - dataMajor := pb & maskOutAdditionalType - if dataMajor != majorTypeByteString { - panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedJSON", dataMajor)) - } - src.UnreadByte() - return decodeString(src, true) - - case additionalTypeTagNetworkAddr: - octets := decodeString(src, true) - ss := []byte{'"'} - switch len(octets) { - case 6: // MAC address. - ha := net.HardwareAddr(octets) - ss = append(append(ss, ha.String()...), '"') - case 4: // IPv4 address. - fallthrough - case 16: // IPv6 address. - ip := net.IP(octets) - ss = append(append(ss, ip.String()...), '"') - default: - panic(fmt.Errorf("Unexpected Network Address length: %d (expected 4,6,16)", len(octets))) - } - return ss - - case additionalTypeTagNetworkPrefix: - pb := readByte(src) - if pb != majorTypeMap|0x1 { - panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected")) - } - octets := decodeString(src, true) - val := decodeInteger(src) - ip := net.IP(octets) - var mask net.IPMask - pfxLen := int(val) - if len(octets) == 4 { - mask = net.CIDRMask(pfxLen, 32) - } else { - mask = net.CIDRMask(pfxLen, 128) - } - ipPfx := net.IPNet{IP: ip, Mask: mask} - ss := []byte{'"'} - ss = append(append(ss, ipPfx.String()...), '"') - return ss - - case additionalTypeTagHexString: - octets := decodeString(src, true) - ss := []byte{'"'} - for _, v := range octets { - ss = append(ss, hexTable[v>>4], hexTable[v&0x0f]) - } - return append(ss, '"') - - default: - panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val)) - } - } - panic(fmt.Errorf("Unsupported Additional Type: %d in decodeTagData", minor)) -} - -func decodeTimeStamp(src *bufio.Reader) []byte { - pb := readByte(src) - src.UnreadByte() - tsMajor := pb & maskOutAdditionalType - if tsMajor == majorTypeUnsignedInt || tsMajor == majorTypeNegativeInt { - n := decodeInteger(src) - t := time.Unix(n, 0) - if decodeTimeZone != nil { - t = t.In(decodeTimeZone) - } else { - t = t.In(time.UTC) - } - tsb := []byte{} - tsb = append(tsb, '"') - tsb = t.AppendFormat(tsb, IntegerTimeFieldFormat) - tsb = append(tsb, '"') - return tsb - } else if tsMajor == majorTypeSimpleAndFloat { - n, _ := decodeFloat(src) - secs := int64(n) - n -= float64(secs) - n *= float64(1e9) - t := time.Unix(secs, int64(n)) - if decodeTimeZone != nil { - t = t.In(decodeTimeZone) - } else { - t = t.In(time.UTC) - } - tsb := []byte{} - tsb = append(tsb, '"') - tsb = t.AppendFormat(tsb, NanoTimeFieldFormat) - tsb = append(tsb, '"') - return tsb - } - panic(fmt.Errorf("TS format is neigther int nor float: %d", tsMajor)) -} - -func decodeSimpleFloat(src *bufio.Reader) []byte { - pb := readByte(src) - major := pb & maskOutAdditionalType - minor := pb & maskOutMajorType - if major != majorTypeSimpleAndFloat { - panic(fmt.Errorf("Major type is: %d in decodeSimpleFloat", major)) - } - switch minor { - case additionalTypeBoolTrue: - return []byte("true") - case additionalTypeBoolFalse: - return []byte("false") - case additionalTypeNull: - return []byte("null") - case additionalTypeFloat16: - fallthrough - case additionalTypeFloat32: - fallthrough - case additionalTypeFloat64: - src.UnreadByte() - v, bc := decodeFloat(src) - ba := []byte{} - switch { - case math.IsNaN(v): - return []byte("\"NaN\"") - case math.IsInf(v, 1): - return []byte("\"+Inf\"") - case math.IsInf(v, -1): - return []byte("\"-Inf\"") - } - if bc == isFloat32 { - ba = strconv.AppendFloat(ba, v, 'f', -1, 32) - } else if bc == isFloat64 { - ba = strconv.AppendFloat(ba, v, 'f', -1, 64) - } else { - panic(fmt.Errorf("Invalid Float precision from decodeFloat: %d", bc)) - } - return ba - default: - panic(fmt.Errorf("Invalid Additional Type: %d in decodeSimpleFloat", minor)) - } -} - -func cbor2JsonOneObject(src *bufio.Reader, dst io.Writer) { - pb, e := src.Peek(1) - if e != nil { - panic(e) - } - major := (pb[0] & maskOutAdditionalType) - - switch major { - case majorTypeUnsignedInt: - fallthrough - case majorTypeNegativeInt: - n := decodeInteger(src) - dst.Write([]byte(strconv.Itoa(int(n)))) - - case majorTypeByteString: - s := decodeString(src, false) - dst.Write(s) - - case majorTypeUtf8String: - s := decodeUTF8String(src) - dst.Write(s) - - case majorTypeArray: - array2Json(src, dst) - - case majorTypeMap: - map2Json(src, dst) - - case majorTypeTags: - s := decodeTagData(src) - dst.Write(s) - - case majorTypeSimpleAndFloat: - s := decodeSimpleFloat(src) - dst.Write(s) - } -} - -func moreBytesToRead(src *bufio.Reader) bool { - _, e := src.ReadByte() - if e == nil { - src.UnreadByte() - return true - } - return false -} - -// Cbor2JsonManyObjects decodes all the CBOR Objects read from src -// reader. It keeps on decoding until reader returns EOF (error when reading). -// Decoded string is written to the dst. At the end of every CBOR Object -// newline is written to the output stream. -// -// Returns error (if any) that was encountered during decode. -// The child functions will generate a panic when error is encountered and -// this function will recover non-runtime Errors and return the reason as error. -func Cbor2JsonManyObjects(src io.Reader, dst io.Writer) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - bufRdr := bufio.NewReader(src) - for moreBytesToRead(bufRdr) { - cbor2JsonOneObject(bufRdr, dst) - dst.Write([]byte("\n")) - } - return nil -} - -// Detect if the bytes to be printed is Binary or not. -func binaryFmt(p []byte) bool { - if len(p) > 0 && p[0] > 0x7F { - return true - } - return false -} - -func getReader(str string) *bufio.Reader { - return bufio.NewReader(strings.NewReader(str)) -} - -// DecodeIfBinaryToString converts a binary formatted log msg to a -// JSON formatted String Log message - suitable for printing to Console/Syslog. -func DecodeIfBinaryToString(in []byte) string { - if binaryFmt(in) { - var b bytes.Buffer - Cbor2JsonManyObjects(strings.NewReader(string(in)), &b) - return b.String() - } - return string(in) -} - -// DecodeObjectToStr checks if the input is a binary format, if so, -// it will decode a single Object and return the decoded string. -func DecodeObjectToStr(in []byte) string { - if binaryFmt(in) { - var b bytes.Buffer - cbor2JsonOneObject(getReader(string(in)), &b) - return b.String() - } - return string(in) -} - -// DecodeIfBinaryToBytes checks if the input is a binary format, if so, -// it will decode all Objects and return the decoded string as byte array. -func DecodeIfBinaryToBytes(in []byte) []byte { - if binaryFmt(in) { - var b bytes.Buffer - Cbor2JsonManyObjects(bytes.NewReader(in), &b) - return b.Bytes() - } - return in -} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/string.go b/vendor/github.com/rs/zerolog/internal/cbor/string.go deleted file mode 100644 index 9fc9a4f8..00000000 --- a/vendor/github.com/rs/zerolog/internal/cbor/string.go +++ /dev/null @@ -1,117 +0,0 @@ -package cbor - -import "fmt" - -// AppendStrings encodes and adds an array of strings to the dst byte array. -func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { - major := majorTypeArray - l := len(vals) - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendString(dst, v) - } - return dst -} - -// AppendString encodes and adds a string to the dst byte array. -func (Encoder) AppendString(dst []byte, s string) []byte { - major := majorTypeUtf8String - - l := len(s) - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l)) - } - return append(dst, s...) -} - -// AppendStringers encodes and adds an array of Stringer values -// to the dst byte array. -func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte { - if len(vals) == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - dst = e.AppendArrayStart(dst) - dst = e.AppendStringer(dst, vals[0]) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = e.AppendStringer(dst, val) - } - } - return e.AppendArrayEnd(dst) -} - -// AppendStringer encodes and adds the Stringer value to the dst -// byte array. -func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte { - if val == nil { - return e.AppendNil(dst) - } - return e.AppendString(dst, val.String()) -} - -// AppendBytes encodes and adds an array of bytes to the dst byte array. -func (Encoder) AppendBytes(dst, s []byte) []byte { - major := majorTypeByteString - - l := len(s) - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - return append(dst, s...) -} - -// AppendEmbeddedJSON adds a tag and embeds input JSON as such. -func AppendEmbeddedJSON(dst, s []byte) []byte { - major := majorTypeTags - minor := additionalTypeEmbeddedJSON - - // Append the TAG to indicate this is Embedded JSON. - dst = append(dst, major|additionalTypeIntUint16) - dst = append(dst, byte(minor>>8)) - dst = append(dst, byte(minor&0xff)) - - // Append the JSON Object as Byte String. - major = majorTypeByteString - - l := len(s) - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - return append(dst, s...) -} - -// AppendEmbeddedCBOR adds a tag and embeds input CBOR as such. -func AppendEmbeddedCBOR(dst, s []byte) []byte { - major := majorTypeTags - minor := additionalTypeEmbeddedCBOR - - // Append the TAG to indicate this is Embedded JSON. - dst = append(dst, major|additionalTypeIntUint8) - dst = append(dst, minor) - - // Append the CBOR Object as Byte String. - major = majorTypeByteString - - l := len(s) - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - return append(dst, s...) -} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/time.go b/vendor/github.com/rs/zerolog/internal/cbor/time.go deleted file mode 100644 index d81fb125..00000000 --- a/vendor/github.com/rs/zerolog/internal/cbor/time.go +++ /dev/null @@ -1,93 +0,0 @@ -package cbor - -import ( - "time" -) - -func appendIntegerTimestamp(dst []byte, t time.Time) []byte { - major := majorTypeTags - minor := additionalTypeTimestamp - dst = append(dst, major|minor) - secs := t.Unix() - var val uint64 - if secs < 0 { - major = majorTypeNegativeInt - val = uint64(-secs - 1) - } else { - major = majorTypeUnsignedInt - val = uint64(secs) - } - dst = appendCborTypePrefix(dst, major, val) - return dst -} - -func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte { - major := majorTypeTags - minor := additionalTypeTimestamp - dst = append(dst, major|minor) - secs := t.Unix() - nanos := t.Nanosecond() - var val float64 - val = float64(secs)*1.0 + float64(nanos)*1e-9 - return e.AppendFloat64(dst, val) -} - -// AppendTime encodes and adds a timestamp to the dst byte array. -func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte { - utc := t.UTC() - if utc.Nanosecond() == 0 { - return appendIntegerTimestamp(dst, utc) - } - return e.appendFloatTimestamp(dst, utc) -} - -// AppendTimes encodes and adds an array of timestamps to the dst byte array. -func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - - for _, t := range vals { - dst = e.AppendTime(dst, t, unused) - } - return dst -} - -// AppendDuration encodes and adds a duration to the dst byte array. -// useInt field indicates whether to store the duration as seconds (integer) or -// as seconds+nanoseconds (float). -func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { - if useInt { - return e.AppendInt64(dst, int64(d/unit)) - } - return e.AppendFloat64(dst, float64(d)/float64(unit)) -} - -// AppendDurations encodes and adds an array of durations to the dst byte array. -// useInt field indicates whether to store the duration as seconds (integer) or -// as seconds+nanoseconds (float). -func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, d := range vals { - dst = e.AppendDuration(dst, d, unit, useInt) - } - return dst -} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/types.go b/vendor/github.com/rs/zerolog/internal/cbor/types.go deleted file mode 100644 index 6f538328..00000000 --- a/vendor/github.com/rs/zerolog/internal/cbor/types.go +++ /dev/null @@ -1,486 +0,0 @@ -package cbor - -import ( - "fmt" - "math" - "net" - "reflect" -) - -// AppendNil inserts a 'Nil' object into the dst byte array. -func (Encoder) AppendNil(dst []byte) []byte { - return append(dst, majorTypeSimpleAndFloat|additionalTypeNull) -} - -// AppendBeginMarker inserts a map start into the dst byte array. -func (Encoder) AppendBeginMarker(dst []byte) []byte { - return append(dst, majorTypeMap|additionalTypeInfiniteCount) -} - -// AppendEndMarker inserts a map end into the dst byte array. -func (Encoder) AppendEndMarker(dst []byte) []byte { - return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak) -} - -// AppendObjectData takes an object in form of a byte array and appends to dst. -func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { - // BeginMarker is present in the dst, which - // should not be copied when appending to existing data. - return append(dst, o[1:]...) -} - -// AppendArrayStart adds markers to indicate the start of an array. -func (Encoder) AppendArrayStart(dst []byte) []byte { - return append(dst, majorTypeArray|additionalTypeInfiniteCount) -} - -// AppendArrayEnd adds markers to indicate the end of an array. -func (Encoder) AppendArrayEnd(dst []byte) []byte { - return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak) -} - -// AppendArrayDelim adds markers to indicate end of a particular array element. -func (Encoder) AppendArrayDelim(dst []byte) []byte { - //No delimiters needed in cbor - return dst -} - -// AppendLineBreak is a noop that keep API compat with json encoder. -func (Encoder) AppendLineBreak(dst []byte) []byte { - // No line breaks needed in binary format. - return dst -} - -// AppendBool encodes and inserts a boolean value into the dst byte array. -func (Encoder) AppendBool(dst []byte, val bool) []byte { - b := additionalTypeBoolFalse - if val { - b = additionalTypeBoolTrue - } - return append(dst, majorTypeSimpleAndFloat|b) -} - -// AppendBools encodes and inserts an array of boolean values into the dst byte array. -func (e Encoder) AppendBools(dst []byte, vals []bool) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendBool(dst, v) - } - return dst -} - -// AppendInt encodes and inserts an integer value into the dst byte array. -func (Encoder) AppendInt(dst []byte, val int) []byte { - major := majorTypeUnsignedInt - contentVal := val - if val < 0 { - major = majorTypeNegativeInt - contentVal = -val - 1 - } - if contentVal <= additionalMax { - lb := byte(contentVal) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(contentVal)) - } - return dst -} - -// AppendInts encodes and inserts an array of integer values into the dst byte array. -func (e Encoder) AppendInts(dst []byte, vals []int) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendInt(dst, v) - } - return dst -} - -// AppendInt8 encodes and inserts an int8 value into the dst byte array. -func (e Encoder) AppendInt8(dst []byte, val int8) []byte { - return e.AppendInt(dst, int(val)) -} - -// AppendInts8 encodes and inserts an array of integer values into the dst byte array. -func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendInt(dst, int(v)) - } - return dst -} - -// AppendInt16 encodes and inserts a int16 value into the dst byte array. -func (e Encoder) AppendInt16(dst []byte, val int16) []byte { - return e.AppendInt(dst, int(val)) -} - -// AppendInts16 encodes and inserts an array of int16 values into the dst byte array. -func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendInt(dst, int(v)) - } - return dst -} - -// AppendInt32 encodes and inserts a int32 value into the dst byte array. -func (e Encoder) AppendInt32(dst []byte, val int32) []byte { - return e.AppendInt(dst, int(val)) -} - -// AppendInts32 encodes and inserts an array of int32 values into the dst byte array. -func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendInt(dst, int(v)) - } - return dst -} - -// AppendInt64 encodes and inserts a int64 value into the dst byte array. -func (Encoder) AppendInt64(dst []byte, val int64) []byte { - major := majorTypeUnsignedInt - contentVal := val - if val < 0 { - major = majorTypeNegativeInt - contentVal = -val - 1 - } - if contentVal <= additionalMax { - lb := byte(contentVal) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(contentVal)) - } - return dst -} - -// AppendInts64 encodes and inserts an array of int64 values into the dst byte array. -func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendInt64(dst, v) - } - return dst -} - -// AppendUint encodes and inserts an unsigned integer value into the dst byte array. -func (e Encoder) AppendUint(dst []byte, val uint) []byte { - return e.AppendInt64(dst, int64(val)) -} - -// AppendUints encodes and inserts an array of unsigned integer values into the dst byte array. -func (e Encoder) AppendUints(dst []byte, vals []uint) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendUint(dst, v) - } - return dst -} - -// AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array. -func (e Encoder) AppendUint8(dst []byte, val uint8) []byte { - return e.AppendUint(dst, uint(val)) -} - -// AppendUints8 encodes and inserts an array of uint8 values into the dst byte array. -func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendUint8(dst, v) - } - return dst -} - -// AppendUint16 encodes and inserts a uint16 value into the dst byte array. -func (e Encoder) AppendUint16(dst []byte, val uint16) []byte { - return e.AppendUint(dst, uint(val)) -} - -// AppendUints16 encodes and inserts an array of uint16 values into the dst byte array. -func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendUint16(dst, v) - } - return dst -} - -// AppendUint32 encodes and inserts a uint32 value into the dst byte array. -func (e Encoder) AppendUint32(dst []byte, val uint32) []byte { - return e.AppendUint(dst, uint(val)) -} - -// AppendUints32 encodes and inserts an array of uint32 values into the dst byte array. -func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendUint32(dst, v) - } - return dst -} - -// AppendUint64 encodes and inserts a uint64 value into the dst byte array. -func (Encoder) AppendUint64(dst []byte, val uint64) []byte { - major := majorTypeUnsignedInt - contentVal := val - if contentVal <= additionalMax { - lb := byte(contentVal) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, contentVal) - } - return dst -} - -// AppendUints64 encodes and inserts an array of uint64 values into the dst byte array. -func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendUint64(dst, v) - } - return dst -} - -// AppendFloat32 encodes and inserts a single precision float value into the dst byte array. -func (Encoder) AppendFloat32(dst []byte, val float32) []byte { - switch { - case math.IsNaN(float64(val)): - return append(dst, "\xfa\x7f\xc0\x00\x00"...) - case math.IsInf(float64(val), 1): - return append(dst, "\xfa\x7f\x80\x00\x00"...) - case math.IsInf(float64(val), -1): - return append(dst, "\xfa\xff\x80\x00\x00"...) - } - major := majorTypeSimpleAndFloat - subType := additionalTypeFloat32 - n := math.Float32bits(val) - var buf [4]byte - for i := uint(0); i < 4; i++ { - buf[i] = byte(n >> ((3 - i) * 8)) - } - return append(append(dst, major|subType), buf[0], buf[1], buf[2], buf[3]) -} - -// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array. -func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendFloat32(dst, v) - } - return dst -} - -// AppendFloat64 encodes and inserts a double precision float value into the dst byte array. -func (Encoder) AppendFloat64(dst []byte, val float64) []byte { - switch { - case math.IsNaN(val): - return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...) - case math.IsInf(val, 1): - return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...) - case math.IsInf(val, -1): - return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...) - } - major := majorTypeSimpleAndFloat - subType := additionalTypeFloat64 - n := math.Float64bits(val) - dst = append(dst, major|subType) - for i := uint(1); i <= 8; i++ { - b := byte(n >> ((8 - i) * 8)) - dst = append(dst, b) - } - return dst -} - -// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array. -func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte { - major := majorTypeArray - l := len(vals) - if l == 0 { - return e.AppendArrayEnd(e.AppendArrayStart(dst)) - } - if l <= additionalMax { - lb := byte(l) - dst = append(dst, major|lb) - } else { - dst = appendCborTypePrefix(dst, major, uint64(l)) - } - for _, v := range vals { - dst = e.AppendFloat64(dst, v) - } - return dst -} - -// AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst. -func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { - marshaled, err := JSONMarshalFunc(i) - if err != nil { - return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) - } - return AppendEmbeddedJSON(dst, marshaled) -} - -// AppendType appends the parameter type (as a string) to the input byte slice. -func (e Encoder) AppendType(dst []byte, i interface{}) []byte { - if i == nil { - return e.AppendString(dst, "") - } - return e.AppendString(dst, reflect.TypeOf(i).String()) -} - -// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6). -func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { - dst = append(dst, majorTypeTags|additionalTypeIntUint16) - dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) - dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) - return e.AppendBytes(dst, ip) -} - -// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length). -func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { - dst = append(dst, majorTypeTags|additionalTypeIntUint16) - dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8)) - dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff)) - - // Prefix is a tuple (aka MAP of 1 pair of elements) - - // first element is prefix, second is mask length. - dst = append(dst, majorTypeMap|0x1) - dst = e.AppendBytes(dst, pfx.IP) - maskLen, _ := pfx.Mask.Size() - return e.AppendUint8(dst, uint8(maskLen)) -} - -// AppendMACAddr encodes and inserts a Hardware (MAC) address. -func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { - dst = append(dst, majorTypeTags|additionalTypeIntUint16) - dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) - dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) - return e.AppendBytes(dst, ha) -} - -// AppendHex adds a TAG and inserts a hex bytes as a string. -func (e Encoder) AppendHex(dst []byte, val []byte) []byte { - dst = append(dst, majorTypeTags|additionalTypeIntUint16) - dst = append(dst, byte(additionalTypeTagHexString>>8)) - dst = append(dst, byte(additionalTypeTagHexString&0xff)) - return e.AppendBytes(dst, val) -} diff --git a/vendor/github.com/rs/zerolog/internal/json/base.go b/vendor/github.com/rs/zerolog/internal/json/base.go deleted file mode 100644 index 09ec59f4..00000000 --- a/vendor/github.com/rs/zerolog/internal/json/base.go +++ /dev/null @@ -1,19 +0,0 @@ -package json - -// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice. -// Making it package level instead of embedded in Encoder brings -// some extra efforts at importing, but avoids value copy when the functions -// of Encoder being invoked. -// DO REMEMBER to set this variable at importing, or -// you might get a nil pointer dereference panic at runtime. -var JSONMarshalFunc func(v interface{}) ([]byte, error) - -type Encoder struct{} - -// AppendKey appends a new key to the output JSON. -func (e Encoder) AppendKey(dst []byte, key string) []byte { - if dst[len(dst)-1] != '{' { - dst = append(dst, ',') - } - return append(e.AppendString(dst, key), ':') -} diff --git a/vendor/github.com/rs/zerolog/internal/json/bytes.go b/vendor/github.com/rs/zerolog/internal/json/bytes.go deleted file mode 100644 index de64120d..00000000 --- a/vendor/github.com/rs/zerolog/internal/json/bytes.go +++ /dev/null @@ -1,85 +0,0 @@ -package json - -import "unicode/utf8" - -// AppendBytes is a mirror of appendString with []byte arg -func (Encoder) AppendBytes(dst, s []byte) []byte { - dst = append(dst, '"') - for i := 0; i < len(s); i++ { - if !noEscapeTable[s[i]] { - dst = appendBytesComplex(dst, s, i) - return append(dst, '"') - } - } - dst = append(dst, s...) - return append(dst, '"') -} - -// AppendHex encodes the input bytes to a hex string and appends -// the encoded string to the input byte slice. -// -// The operation loops though each byte and encodes it as hex using -// the hex lookup table. -func (Encoder) AppendHex(dst, s []byte) []byte { - dst = append(dst, '"') - for _, v := range s { - dst = append(dst, hex[v>>4], hex[v&0x0f]) - } - return append(dst, '"') -} - -// appendBytesComplex is a mirror of the appendStringComplex -// with []byte arg -func appendBytesComplex(dst, s []byte, i int) []byte { - start := 0 - for i < len(s) { - b := s[i] - if b >= utf8.RuneSelf { - r, size := utf8.DecodeRune(s[i:]) - if r == utf8.RuneError && size == 1 { - if start < i { - dst = append(dst, s[start:i]...) - } - dst = append(dst, `\ufffd`...) - i += size - start = i - continue - } - i += size - continue - } - if noEscapeTable[b] { - i++ - continue - } - // We encountered a character that needs to be encoded. - // Let's append the previous simple characters to the byte slice - // and switch our operation to read and encode the remainder - // characters byte-by-byte. - if start < i { - dst = append(dst, s[start:i]...) - } - switch b { - case '"', '\\': - dst = append(dst, '\\', b) - case '\b': - dst = append(dst, '\\', 'b') - case '\f': - dst = append(dst, '\\', 'f') - case '\n': - dst = append(dst, '\\', 'n') - case '\r': - dst = append(dst, '\\', 'r') - case '\t': - dst = append(dst, '\\', 't') - default: - dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) - } - i++ - start = i - } - if start < len(s) { - dst = append(dst, s[start:]...) - } - return dst -} diff --git a/vendor/github.com/rs/zerolog/internal/json/string.go b/vendor/github.com/rs/zerolog/internal/json/string.go deleted file mode 100644 index fd7770f2..00000000 --- a/vendor/github.com/rs/zerolog/internal/json/string.go +++ /dev/null @@ -1,149 +0,0 @@ -package json - -import ( - "fmt" - "unicode/utf8" -) - -const hex = "0123456789abcdef" - -var noEscapeTable = [256]bool{} - -func init() { - for i := 0; i <= 0x7e; i++ { - noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"' - } -} - -// AppendStrings encodes the input strings to json and -// appends the encoded string list to the input byte slice. -func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = e.AppendString(dst, vals[0]) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = e.AppendString(append(dst, ','), val) - } - } - dst = append(dst, ']') - return dst -} - -// AppendString encodes the input string to json and appends -// the encoded string to the input byte slice. -// -// The operation loops though each byte in the string looking -// for characters that need json or utf8 encoding. If the string -// does not need encoding, then the string is appended in its -// entirety to the byte slice. -// If we encounter a byte that does need encoding, switch up -// the operation and perform a byte-by-byte read-encode-append. -func (Encoder) AppendString(dst []byte, s string) []byte { - // Start with a double quote. - dst = append(dst, '"') - // Loop through each character in the string. - for i := 0; i < len(s); i++ { - // Check if the character needs encoding. Control characters, slashes, - // and the double quote need json encoding. Bytes above the ascii - // boundary needs utf8 encoding. - if !noEscapeTable[s[i]] { - // We encountered a character that needs to be encoded. Switch - // to complex version of the algorithm. - dst = appendStringComplex(dst, s, i) - return append(dst, '"') - } - } - // The string has no need for encoding and therefore is directly - // appended to the byte slice. - dst = append(dst, s...) - // End with a double quote - return append(dst, '"') -} - -// AppendStringers encodes the provided Stringer list to json and -// appends the encoded Stringer list to the input byte slice. -func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = e.AppendStringer(dst, vals[0]) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = e.AppendStringer(append(dst, ','), val) - } - } - return append(dst, ']') -} - -// AppendStringer encodes the input Stringer to json and appends the -// encoded Stringer value to the input byte slice. -func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte { - if val == nil { - return e.AppendInterface(dst, nil) - } - return e.AppendString(dst, val.String()) -} - -//// appendStringComplex is used by appendString to take over an in -// progress JSON string encoding that encountered a character that needs -// to be encoded. -func appendStringComplex(dst []byte, s string, i int) []byte { - start := 0 - for i < len(s) { - b := s[i] - if b >= utf8.RuneSelf { - r, size := utf8.DecodeRuneInString(s[i:]) - if r == utf8.RuneError && size == 1 { - // In case of error, first append previous simple characters to - // the byte slice if any and append a replacement character code - // in place of the invalid sequence. - if start < i { - dst = append(dst, s[start:i]...) - } - dst = append(dst, `\ufffd`...) - i += size - start = i - continue - } - i += size - continue - } - if noEscapeTable[b] { - i++ - continue - } - // We encountered a character that needs to be encoded. - // Let's append the previous simple characters to the byte slice - // and switch our operation to read and encode the remainder - // characters byte-by-byte. - if start < i { - dst = append(dst, s[start:i]...) - } - switch b { - case '"', '\\': - dst = append(dst, '\\', b) - case '\b': - dst = append(dst, '\\', 'b') - case '\f': - dst = append(dst, '\\', 'f') - case '\n': - dst = append(dst, '\\', 'n') - case '\r': - dst = append(dst, '\\', 'r') - case '\t': - dst = append(dst, '\\', 't') - default: - dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) - } - i++ - start = i - } - if start < len(s) { - dst = append(dst, s[start:]...) - } - return dst -} diff --git a/vendor/github.com/rs/zerolog/internal/json/time.go b/vendor/github.com/rs/zerolog/internal/json/time.go deleted file mode 100644 index 6a8dc912..00000000 --- a/vendor/github.com/rs/zerolog/internal/json/time.go +++ /dev/null @@ -1,113 +0,0 @@ -package json - -import ( - "strconv" - "time" -) - -const ( - // Import from zerolog/global.go - timeFormatUnix = "" - timeFormatUnixMs = "UNIXMS" - timeFormatUnixMicro = "UNIXMICRO" - timeFormatUnixNano = "UNIXNANO" -) - -// AppendTime formats the input time with the given format -// and appends the encoded string to the input byte slice. -func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte { - switch format { - case timeFormatUnix: - return e.AppendInt64(dst, t.Unix()) - case timeFormatUnixMs: - return e.AppendInt64(dst, t.UnixNano()/1000000) - case timeFormatUnixMicro: - return e.AppendInt64(dst, t.UnixNano()/1000) - case timeFormatUnixNano: - return e.AppendInt64(dst, t.UnixNano()) - } - return append(t.AppendFormat(append(dst, '"'), format), '"') -} - -// AppendTimes converts the input times with the given format -// and appends the encoded string list to the input byte slice. -func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte { - switch format { - case timeFormatUnix: - return appendUnixTimes(dst, vals) - case timeFormatUnixMs: - return appendUnixNanoTimes(dst, vals, 1000000) - case timeFormatUnixMicro: - return appendUnixNanoTimes(dst, vals, 1000) - case timeFormatUnixNano: - return appendUnixNanoTimes(dst, vals, 1) - } - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"') - if len(vals) > 1 { - for _, t := range vals[1:] { - dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"') - } - } - dst = append(dst, ']') - return dst -} - -func appendUnixTimes(dst []byte, vals []time.Time) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendInt(dst, vals[0].Unix(), 10) - if len(vals) > 1 { - for _, t := range vals[1:] { - dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10) - } - } - dst = append(dst, ']') - return dst -} - -func appendUnixNanoTimes(dst []byte, vals []time.Time, div int64) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendInt(dst, vals[0].UnixNano()/div, 10) - if len(vals) > 1 { - for _, t := range vals[1:] { - dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/div, 10) - } - } - dst = append(dst, ']') - return dst -} - -// AppendDuration formats the input duration with the given unit & format -// and appends the encoded string to the input byte slice. -func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { - if useInt { - return strconv.AppendInt(dst, int64(d/unit), 10) - } - return e.AppendFloat64(dst, float64(d)/float64(unit)) -} - -// AppendDurations formats the input durations with the given unit & format -// and appends the encoded string list to the input byte slice. -func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = e.AppendDuration(dst, vals[0], unit, useInt) - if len(vals) > 1 { - for _, d := range vals[1:] { - dst = e.AppendDuration(append(dst, ','), d, unit, useInt) - } - } - dst = append(dst, ']') - return dst -} diff --git a/vendor/github.com/rs/zerolog/internal/json/types.go b/vendor/github.com/rs/zerolog/internal/json/types.go deleted file mode 100644 index ef3a2a7a..00000000 --- a/vendor/github.com/rs/zerolog/internal/json/types.go +++ /dev/null @@ -1,414 +0,0 @@ -package json - -import ( - "fmt" - "math" - "net" - "reflect" - "strconv" -) - -// AppendNil inserts a 'Nil' object into the dst byte array. -func (Encoder) AppendNil(dst []byte) []byte { - return append(dst, "null"...) -} - -// AppendBeginMarker inserts a map start into the dst byte array. -func (Encoder) AppendBeginMarker(dst []byte) []byte { - return append(dst, '{') -} - -// AppendEndMarker inserts a map end into the dst byte array. -func (Encoder) AppendEndMarker(dst []byte) []byte { - return append(dst, '}') -} - -// AppendLineBreak appends a line break. -func (Encoder) AppendLineBreak(dst []byte) []byte { - return append(dst, '\n') -} - -// AppendArrayStart adds markers to indicate the start of an array. -func (Encoder) AppendArrayStart(dst []byte) []byte { - return append(dst, '[') -} - -// AppendArrayEnd adds markers to indicate the end of an array. -func (Encoder) AppendArrayEnd(dst []byte) []byte { - return append(dst, ']') -} - -// AppendArrayDelim adds markers to indicate end of a particular array element. -func (Encoder) AppendArrayDelim(dst []byte) []byte { - if len(dst) > 0 { - return append(dst, ',') - } - return dst -} - -// AppendBool converts the input bool to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendBool(dst []byte, val bool) []byte { - return strconv.AppendBool(dst, val) -} - -// AppendBools encodes the input bools to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendBools(dst []byte, vals []bool) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendBool(dst, vals[0]) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = strconv.AppendBool(append(dst, ','), val) - } - } - dst = append(dst, ']') - return dst -} - -// AppendInt converts the input int to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendInt(dst []byte, val int) []byte { - return strconv.AppendInt(dst, int64(val), 10) -} - -// AppendInts encodes the input ints to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendInts(dst []byte, vals []int) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendInt(dst, int64(vals[0]), 10) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = strconv.AppendInt(append(dst, ','), int64(val), 10) - } - } - dst = append(dst, ']') - return dst -} - -// AppendInt8 converts the input []int8 to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendInt8(dst []byte, val int8) []byte { - return strconv.AppendInt(dst, int64(val), 10) -} - -// AppendInts8 encodes the input int8s to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendInts8(dst []byte, vals []int8) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendInt(dst, int64(vals[0]), 10) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = strconv.AppendInt(append(dst, ','), int64(val), 10) - } - } - dst = append(dst, ']') - return dst -} - -// AppendInt16 converts the input int16 to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendInt16(dst []byte, val int16) []byte { - return strconv.AppendInt(dst, int64(val), 10) -} - -// AppendInts16 encodes the input int16s to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendInts16(dst []byte, vals []int16) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendInt(dst, int64(vals[0]), 10) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = strconv.AppendInt(append(dst, ','), int64(val), 10) - } - } - dst = append(dst, ']') - return dst -} - -// AppendInt32 converts the input int32 to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendInt32(dst []byte, val int32) []byte { - return strconv.AppendInt(dst, int64(val), 10) -} - -// AppendInts32 encodes the input int32s to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendInts32(dst []byte, vals []int32) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendInt(dst, int64(vals[0]), 10) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = strconv.AppendInt(append(dst, ','), int64(val), 10) - } - } - dst = append(dst, ']') - return dst -} - -// AppendInt64 converts the input int64 to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendInt64(dst []byte, val int64) []byte { - return strconv.AppendInt(dst, val, 10) -} - -// AppendInts64 encodes the input int64s to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendInts64(dst []byte, vals []int64) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendInt(dst, vals[0], 10) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = strconv.AppendInt(append(dst, ','), val, 10) - } - } - dst = append(dst, ']') - return dst -} - -// AppendUint converts the input uint to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendUint(dst []byte, val uint) []byte { - return strconv.AppendUint(dst, uint64(val), 10) -} - -// AppendUints encodes the input uints to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendUints(dst []byte, vals []uint) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendUint(dst, uint64(vals[0]), 10) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) - } - } - dst = append(dst, ']') - return dst -} - -// AppendUint8 converts the input uint8 to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendUint8(dst []byte, val uint8) []byte { - return strconv.AppendUint(dst, uint64(val), 10) -} - -// AppendUints8 encodes the input uint8s to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendUints8(dst []byte, vals []uint8) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendUint(dst, uint64(vals[0]), 10) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) - } - } - dst = append(dst, ']') - return dst -} - -// AppendUint16 converts the input uint16 to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendUint16(dst []byte, val uint16) []byte { - return strconv.AppendUint(dst, uint64(val), 10) -} - -// AppendUints16 encodes the input uint16s to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendUints16(dst []byte, vals []uint16) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendUint(dst, uint64(vals[0]), 10) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) - } - } - dst = append(dst, ']') - return dst -} - -// AppendUint32 converts the input uint32 to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendUint32(dst []byte, val uint32) []byte { - return strconv.AppendUint(dst, uint64(val), 10) -} - -// AppendUints32 encodes the input uint32s to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendUint(dst, uint64(vals[0]), 10) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) - } - } - dst = append(dst, ']') - return dst -} - -// AppendUint64 converts the input uint64 to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendUint64(dst []byte, val uint64) []byte { - return strconv.AppendUint(dst, val, 10) -} - -// AppendUints64 encodes the input uint64s to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = strconv.AppendUint(dst, vals[0], 10) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = strconv.AppendUint(append(dst, ','), val, 10) - } - } - dst = append(dst, ']') - return dst -} - -func appendFloat(dst []byte, val float64, bitSize int) []byte { - // JSON does not permit NaN or Infinity. A typical JSON encoder would fail - // with an error, but a logging library wants the data to get through so we - // make a tradeoff and store those types as string. - switch { - case math.IsNaN(val): - return append(dst, `"NaN"`...) - case math.IsInf(val, 1): - return append(dst, `"+Inf"`...) - case math.IsInf(val, -1): - return append(dst, `"-Inf"`...) - } - return strconv.AppendFloat(dst, val, 'f', -1, bitSize) -} - -// AppendFloat32 converts the input float32 to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendFloat32(dst []byte, val float32) []byte { - return appendFloat(dst, float64(val), 32) -} - -// AppendFloats32 encodes the input float32s to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = appendFloat(dst, float64(vals[0]), 32) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = appendFloat(append(dst, ','), float64(val), 32) - } - } - dst = append(dst, ']') - return dst -} - -// AppendFloat64 converts the input float64 to a string and -// appends the encoded string to the input byte slice. -func (Encoder) AppendFloat64(dst []byte, val float64) []byte { - return appendFloat(dst, val, 64) -} - -// AppendFloats64 encodes the input float64s to json and -// appends the encoded string list to the input byte slice. -func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte { - if len(vals) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - dst = appendFloat(dst, vals[0], 64) - if len(vals) > 1 { - for _, val := range vals[1:] { - dst = appendFloat(append(dst, ','), val, 64) - } - } - dst = append(dst, ']') - return dst -} - -// AppendInterface marshals the input interface to a string and -// appends the encoded string to the input byte slice. -func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { - marshaled, err := JSONMarshalFunc(i) - if err != nil { - return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) - } - return append(dst, marshaled...) -} - -// AppendType appends the parameter type (as a string) to the input byte slice. -func (e Encoder) AppendType(dst []byte, i interface{}) []byte { - if i == nil { - return e.AppendString(dst, "") - } - return e.AppendString(dst, reflect.TypeOf(i).String()) -} - -// AppendObjectData takes in an object that is already in a byte array -// and adds it to the dst. -func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { - // Three conditions apply here: - // 1. new content starts with '{' - which should be dropped OR - // 2. new content starts with '{' - which should be replaced with ',' - // to separate with existing content OR - // 3. existing content has already other fields - if o[0] == '{' { - if len(dst) > 1 { - dst = append(dst, ',') - } - o = o[1:] - } else if len(dst) > 1 { - dst = append(dst, ',') - } - return append(dst, o...) -} - -// AppendIPAddr adds IPv4 or IPv6 address to dst. -func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { - return e.AppendString(dst, ip.String()) -} - -// AppendIPPrefix adds IPv4 or IPv6 Prefix (address & mask) to dst. -func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { - return e.AppendString(dst, pfx.String()) - -} - -// AppendMACAddr adds MAC address to dst. -func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { - return e.AppendString(dst, ha.String()) -} diff --git a/vendor/github.com/rs/zerolog/log.go b/vendor/github.com/rs/zerolog/log.go deleted file mode 100644 index e7b5126e..00000000 --- a/vendor/github.com/rs/zerolog/log.go +++ /dev/null @@ -1,498 +0,0 @@ -// Package zerolog provides a lightweight logging library dedicated to JSON logging. -// -// A global Logger can be use for simple logging: -// -// import "github.com/rs/zerolog/log" -// -// log.Info().Msg("hello world") -// // Output: {"time":1494567715,"level":"info","message":"hello world"} -// -// NOTE: To import the global logger, import the "log" subpackage "github.com/rs/zerolog/log". -// -// Fields can be added to log messages: -// -// log.Info().Str("foo", "bar").Msg("hello world") -// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} -// -// Create logger instance to manage different outputs: -// -// logger := zerolog.New(os.Stderr).With().Timestamp().Logger() -// logger.Info(). -// Str("foo", "bar"). -// Msg("hello world") -// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} -// -// Sub-loggers let you chain loggers with additional context: -// -// sublogger := log.With().Str("component": "foo").Logger() -// sublogger.Info().Msg("hello world") -// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"} -// -// Level logging -// -// zerolog.SetGlobalLevel(zerolog.InfoLevel) -// -// log.Debug().Msg("filtered out message") -// log.Info().Msg("routed message") -// -// if e := log.Debug(); e.Enabled() { -// // Compute log output only if enabled. -// value := compute() -// e.Str("foo": value).Msg("some debug message") -// } -// // Output: {"level":"info","time":1494567715,"routed message"} -// -// Customize automatic field names: -// -// log.TimestampFieldName = "t" -// log.LevelFieldName = "p" -// log.MessageFieldName = "m" -// -// log.Info().Msg("hello world") -// // Output: {"t":1494567715,"p":"info","m":"hello world"} -// -// Log with no level and message: -// -// log.Log().Str("foo","bar").Msg("") -// // Output: {"time":1494567715,"foo":"bar"} -// -// Add contextual fields to global Logger: -// -// log.Logger = log.With().Str("foo", "bar").Logger() -// -// Sample logs: -// -// sampled := log.Sample(&zerolog.BasicSampler{N: 10}) -// sampled.Info().Msg("will be logged every 10 messages") -// -// Log with contextual hooks: -// -// // Create the hook: -// type SeverityHook struct{} -// -// func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { -// if level != zerolog.NoLevel { -// e.Str("severity", level.String()) -// } -// } -// -// // And use it: -// var h SeverityHook -// log := zerolog.New(os.Stdout).Hook(h) -// log.Warn().Msg("") -// // Output: {"level":"warn","severity":"warn"} -// -// # Caveats -// -// Field duplication: -// -// There is no fields deduplication out-of-the-box. -// Using the same key multiple times creates new key in final JSON each time. -// -// logger := zerolog.New(os.Stderr).With().Timestamp().Logger() -// logger.Info(). -// Timestamp(). -// Msg("dup") -// // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} -// -// In this case, many consumers will take the last value, -// but this is not guaranteed; check yours if in doubt. -// -// Concurrency safety: -// -// Be careful when calling UpdateContext. It is not concurrency safe. Use the With method to create a child logger: -// -// func handler(w http.ResponseWriter, r *http.Request) { -// // Create a child logger for concurrency safety -// logger := log.Logger.With().Logger() -// -// // Add context fields, for example User-Agent from HTTP headers -// logger.UpdateContext(func(c zerolog.Context) zerolog.Context { -// ... -// }) -// } -package zerolog - -import ( - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strconv" - "strings" -) - -// Level defines log levels. -type Level int8 - -const ( - // DebugLevel defines debug log level. - DebugLevel Level = iota - // InfoLevel defines info log level. - InfoLevel - // WarnLevel defines warn log level. - WarnLevel - // ErrorLevel defines error log level. - ErrorLevel - // FatalLevel defines fatal log level. - FatalLevel - // PanicLevel defines panic log level. - PanicLevel - // NoLevel defines an absent log level. - NoLevel - // Disabled disables the logger. - Disabled - - // TraceLevel defines trace log level. - TraceLevel Level = -1 - // Values less than TraceLevel are handled as numbers. -) - -func (l Level) String() string { - switch l { - case TraceLevel: - return LevelTraceValue - case DebugLevel: - return LevelDebugValue - case InfoLevel: - return LevelInfoValue - case WarnLevel: - return LevelWarnValue - case ErrorLevel: - return LevelErrorValue - case FatalLevel: - return LevelFatalValue - case PanicLevel: - return LevelPanicValue - case Disabled: - return "disabled" - case NoLevel: - return "" - } - return strconv.Itoa(int(l)) -} - -// ParseLevel converts a level string into a zerolog Level value. -// returns an error if the input string does not match known values. -func ParseLevel(levelStr string) (Level, error) { - switch { - case strings.EqualFold(levelStr, LevelFieldMarshalFunc(TraceLevel)): - return TraceLevel, nil - case strings.EqualFold(levelStr, LevelFieldMarshalFunc(DebugLevel)): - return DebugLevel, nil - case strings.EqualFold(levelStr, LevelFieldMarshalFunc(InfoLevel)): - return InfoLevel, nil - case strings.EqualFold(levelStr, LevelFieldMarshalFunc(WarnLevel)): - return WarnLevel, nil - case strings.EqualFold(levelStr, LevelFieldMarshalFunc(ErrorLevel)): - return ErrorLevel, nil - case strings.EqualFold(levelStr, LevelFieldMarshalFunc(FatalLevel)): - return FatalLevel, nil - case strings.EqualFold(levelStr, LevelFieldMarshalFunc(PanicLevel)): - return PanicLevel, nil - case strings.EqualFold(levelStr, LevelFieldMarshalFunc(Disabled)): - return Disabled, nil - case strings.EqualFold(levelStr, LevelFieldMarshalFunc(NoLevel)): - return NoLevel, nil - } - i, err := strconv.Atoi(levelStr) - if err != nil { - return NoLevel, fmt.Errorf("Unknown Level String: '%s', defaulting to NoLevel", levelStr) - } - if i > 127 || i < -128 { - return NoLevel, fmt.Errorf("Out-Of-Bounds Level: '%d', defaulting to NoLevel", i) - } - return Level(i), nil -} - -// UnmarshalText implements encoding.TextUnmarshaler to allow for easy reading from toml/yaml/json formats -func (l *Level) UnmarshalText(text []byte) error { - if l == nil { - return errors.New("can't unmarshal a nil *Level") - } - var err error - *l, err = ParseLevel(string(text)) - return err -} - -// MarshalText implements encoding.TextMarshaler to allow for easy writing into toml/yaml/json formats -func (l Level) MarshalText() ([]byte, error) { - return []byte(LevelFieldMarshalFunc(l)), nil -} - -// A Logger represents an active logging object that generates lines -// of JSON output to an io.Writer. Each logging operation makes a single -// call to the Writer's Write method. There is no guarantee on access -// serialization to the Writer. If your Writer is not thread safe, -// you may consider a sync wrapper. -type Logger struct { - w LevelWriter - level Level - sampler Sampler - context []byte - hooks []Hook - stack bool - ctx context.Context -} - -// New creates a root logger with given output writer. If the output writer implements -// the LevelWriter interface, the WriteLevel method will be called instead of the Write -// one. -// -// Each logging operation makes a single call to the Writer's Write method. There is no -// guarantee on access serialization to the Writer. If your Writer is not thread safe, -// you may consider using sync wrapper. -func New(w io.Writer) Logger { - if w == nil { - w = ioutil.Discard - } - lw, ok := w.(LevelWriter) - if !ok { - lw = levelWriterAdapter{w} - } - return Logger{w: lw, level: TraceLevel} -} - -// Nop returns a disabled logger for which all operation are no-op. -func Nop() Logger { - return New(nil).Level(Disabled) -} - -// Output duplicates the current logger and sets w as its output. -func (l Logger) Output(w io.Writer) Logger { - l2 := New(w) - l2.level = l.level - l2.sampler = l.sampler - l2.stack = l.stack - if len(l.hooks) > 0 { - l2.hooks = append(l2.hooks, l.hooks...) - } - if l.context != nil { - l2.context = make([]byte, len(l.context), cap(l.context)) - copy(l2.context, l.context) - } - return l2 -} - -// With creates a child logger with the field added to its context. -func (l Logger) With() Context { - context := l.context - l.context = make([]byte, 0, 500) - if context != nil { - l.context = append(l.context, context...) - } else { - // This is needed for AppendKey to not check len of input - // thus making it inlinable - l.context = enc.AppendBeginMarker(l.context) - } - return Context{l} -} - -// UpdateContext updates the internal logger's context. -// -// Caution: This method is not concurrency safe. -// Use the With method to create a child logger before modifying the context from concurrent goroutines. -func (l *Logger) UpdateContext(update func(c Context) Context) { - if l == disabledLogger { - return - } - if cap(l.context) == 0 { - l.context = make([]byte, 0, 500) - } - if len(l.context) == 0 { - l.context = enc.AppendBeginMarker(l.context) - } - c := update(Context{*l}) - l.context = c.l.context -} - -// Level creates a child logger with the minimum accepted level set to level. -func (l Logger) Level(lvl Level) Logger { - l.level = lvl - return l -} - -// GetLevel returns the current Level of l. -func (l Logger) GetLevel() Level { - return l.level -} - -// Sample returns a logger with the s sampler. -func (l Logger) Sample(s Sampler) Logger { - l.sampler = s - return l -} - -// Hook returns a logger with the h Hook. -func (l Logger) Hook(h Hook) Logger { - newHooks := make([]Hook, len(l.hooks), len(l.hooks)+1) - copy(newHooks, l.hooks) - l.hooks = append(newHooks, h) - return l -} - -// Trace starts a new message with trace level. -// -// You must call Msg on the returned event in order to send the event. -func (l *Logger) Trace() *Event { - return l.newEvent(TraceLevel, nil) -} - -// Debug starts a new message with debug level. -// -// You must call Msg on the returned event in order to send the event. -func (l *Logger) Debug() *Event { - return l.newEvent(DebugLevel, nil) -} - -// Info starts a new message with info level. -// -// You must call Msg on the returned event in order to send the event. -func (l *Logger) Info() *Event { - return l.newEvent(InfoLevel, nil) -} - -// Warn starts a new message with warn level. -// -// You must call Msg on the returned event in order to send the event. -func (l *Logger) Warn() *Event { - return l.newEvent(WarnLevel, nil) -} - -// Error starts a new message with error level. -// -// You must call Msg on the returned event in order to send the event. -func (l *Logger) Error() *Event { - return l.newEvent(ErrorLevel, nil) -} - -// Err starts a new message with error level with err as a field if not nil or -// with info level if err is nil. -// -// You must call Msg on the returned event in order to send the event. -func (l *Logger) Err(err error) *Event { - if err != nil { - return l.Error().Err(err) - } - - return l.Info() -} - -// Fatal starts a new message with fatal level. The os.Exit(1) function -// is called by the Msg method, which terminates the program immediately. -// -// You must call Msg on the returned event in order to send the event. -func (l *Logger) Fatal() *Event { - return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) }) -} - -// Panic starts a new message with panic level. The panic() function -// is called by the Msg method, which stops the ordinary flow of a goroutine. -// -// You must call Msg on the returned event in order to send the event. -func (l *Logger) Panic() *Event { - return l.newEvent(PanicLevel, func(msg string) { panic(msg) }) -} - -// WithLevel starts a new message with level. Unlike Fatal and Panic -// methods, WithLevel does not terminate the program or stop the ordinary -// flow of a goroutine when used with their respective levels. -// -// You must call Msg on the returned event in order to send the event. -func (l *Logger) WithLevel(level Level) *Event { - switch level { - case TraceLevel: - return l.Trace() - case DebugLevel: - return l.Debug() - case InfoLevel: - return l.Info() - case WarnLevel: - return l.Warn() - case ErrorLevel: - return l.Error() - case FatalLevel: - return l.newEvent(FatalLevel, nil) - case PanicLevel: - return l.newEvent(PanicLevel, nil) - case NoLevel: - return l.Log() - case Disabled: - return nil - default: - return l.newEvent(level, nil) - } -} - -// Log starts a new message with no level. Setting GlobalLevel to Disabled -// will still disable events produced by this method. -// -// You must call Msg on the returned event in order to send the event. -func (l *Logger) Log() *Event { - return l.newEvent(NoLevel, nil) -} - -// Print sends a log event using debug level and no extra field. -// Arguments are handled in the manner of fmt.Print. -func (l *Logger) Print(v ...interface{}) { - if e := l.Debug(); e.Enabled() { - e.CallerSkipFrame(1).Msg(fmt.Sprint(v...)) - } -} - -// Printf sends a log event using debug level and no extra field. -// Arguments are handled in the manner of fmt.Printf. -func (l *Logger) Printf(format string, v ...interface{}) { - if e := l.Debug(); e.Enabled() { - e.CallerSkipFrame(1).Msg(fmt.Sprintf(format, v...)) - } -} - -// Write implements the io.Writer interface. This is useful to set as a writer -// for the standard library log. -func (l Logger) Write(p []byte) (n int, err error) { - n = len(p) - if n > 0 && p[n-1] == '\n' { - // Trim CR added by stdlog. - p = p[0 : n-1] - } - l.Log().CallerSkipFrame(1).Msg(string(p)) - return -} - -func (l *Logger) newEvent(level Level, done func(string)) *Event { - enabled := l.should(level) - if !enabled { - if done != nil { - done("") - } - return nil - } - e := newEvent(l.w, level) - e.done = done - e.ch = l.hooks - e.ctx = l.ctx - if level != NoLevel && LevelFieldName != "" { - e.Str(LevelFieldName, LevelFieldMarshalFunc(level)) - } - if l.context != nil && len(l.context) > 1 { - e.buf = enc.AppendObjectData(e.buf, l.context) - } - if l.stack { - e.Stack() - } - return e -} - -// should returns true if the log event should be logged. -func (l *Logger) should(lvl Level) bool { - if lvl < l.level || lvl < GlobalLevel() { - return false - } - if l.sampler != nil && !samplingDisabled() { - return l.sampler.Sample(lvl) - } - return true -} diff --git a/vendor/github.com/rs/zerolog/not_go112.go b/vendor/github.com/rs/zerolog/not_go112.go deleted file mode 100644 index 4c43c9e7..00000000 --- a/vendor/github.com/rs/zerolog/not_go112.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !go1.12 - -package zerolog - -const contextCallerSkipFrameCount = 3 diff --git a/vendor/github.com/rs/zerolog/pretty.png b/vendor/github.com/rs/zerolog/pretty.png deleted file mode 100644 index 24203368..00000000 Binary files a/vendor/github.com/rs/zerolog/pretty.png and /dev/null differ diff --git a/vendor/github.com/rs/zerolog/sampler.go b/vendor/github.com/rs/zerolog/sampler.go deleted file mode 100644 index 1be98c4f..00000000 --- a/vendor/github.com/rs/zerolog/sampler.go +++ /dev/null @@ -1,134 +0,0 @@ -package zerolog - -import ( - "math/rand" - "sync/atomic" - "time" -) - -var ( - // Often samples log every ~ 10 events. - Often = RandomSampler(10) - // Sometimes samples log every ~ 100 events. - Sometimes = RandomSampler(100) - // Rarely samples log every ~ 1000 events. - Rarely = RandomSampler(1000) -) - -// Sampler defines an interface to a log sampler. -type Sampler interface { - // Sample returns true if the event should be part of the sample, false if - // the event should be dropped. - Sample(lvl Level) bool -} - -// RandomSampler use a PRNG to randomly sample an event out of N events, -// regardless of their level. -type RandomSampler uint32 - -// Sample implements the Sampler interface. -func (s RandomSampler) Sample(lvl Level) bool { - if s <= 0 { - return false - } - if rand.Intn(int(s)) != 0 { - return false - } - return true -} - -// BasicSampler is a sampler that will send every Nth events, regardless of -// their level. -type BasicSampler struct { - N uint32 - counter uint32 -} - -// Sample implements the Sampler interface. -func (s *BasicSampler) Sample(lvl Level) bool { - n := s.N - if n == 1 { - return true - } - c := atomic.AddUint32(&s.counter, 1) - return c%n == 1 -} - -// BurstSampler lets Burst events pass per Period then pass the decision to -// NextSampler. If Sampler is not set, all subsequent events are rejected. -type BurstSampler struct { - // Burst is the maximum number of event per period allowed before calling - // NextSampler. - Burst uint32 - // Period defines the burst period. If 0, NextSampler is always called. - Period time.Duration - // NextSampler is the sampler used after the burst is reached. If nil, - // events are always rejected after the burst. - NextSampler Sampler - - counter uint32 - resetAt int64 -} - -// Sample implements the Sampler interface. -func (s *BurstSampler) Sample(lvl Level) bool { - if s.Burst > 0 && s.Period > 0 { - if s.inc() <= s.Burst { - return true - } - } - if s.NextSampler == nil { - return false - } - return s.NextSampler.Sample(lvl) -} - -func (s *BurstSampler) inc() uint32 { - now := time.Now().UnixNano() - resetAt := atomic.LoadInt64(&s.resetAt) - var c uint32 - if now > resetAt { - c = 1 - atomic.StoreUint32(&s.counter, c) - newResetAt := now + s.Period.Nanoseconds() - reset := atomic.CompareAndSwapInt64(&s.resetAt, resetAt, newResetAt) - if !reset { - // Lost the race with another goroutine trying to reset. - c = atomic.AddUint32(&s.counter, 1) - } - } else { - c = atomic.AddUint32(&s.counter, 1) - } - return c -} - -// LevelSampler applies a different sampler for each level. -type LevelSampler struct { - TraceSampler, DebugSampler, InfoSampler, WarnSampler, ErrorSampler Sampler -} - -func (s LevelSampler) Sample(lvl Level) bool { - switch lvl { - case TraceLevel: - if s.TraceSampler != nil { - return s.TraceSampler.Sample(lvl) - } - case DebugLevel: - if s.DebugSampler != nil { - return s.DebugSampler.Sample(lvl) - } - case InfoLevel: - if s.InfoSampler != nil { - return s.InfoSampler.Sample(lvl) - } - case WarnLevel: - if s.WarnSampler != nil { - return s.WarnSampler.Sample(lvl) - } - case ErrorLevel: - if s.ErrorSampler != nil { - return s.ErrorSampler.Sample(lvl) - } - } - return true -} diff --git a/vendor/github.com/rs/zerolog/syslog.go b/vendor/github.com/rs/zerolog/syslog.go deleted file mode 100644 index c4082830..00000000 --- a/vendor/github.com/rs/zerolog/syslog.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build !windows -// +build !binary_log - -package zerolog - -import ( - "io" -) - -// See http://cee.mitre.org/language/1.0-beta1/clt.html#syslog -// or https://www.rsyslog.com/json-elasticsearch/ -const ceePrefix = "@cee:" - -// SyslogWriter is an interface matching a syslog.Writer struct. -type SyslogWriter interface { - io.Writer - Debug(m string) error - Info(m string) error - Warning(m string) error - Err(m string) error - Emerg(m string) error - Crit(m string) error -} - -type syslogWriter struct { - w SyslogWriter - prefix string -} - -// SyslogLevelWriter wraps a SyslogWriter and call the right syslog level -// method matching the zerolog level. -func SyslogLevelWriter(w SyslogWriter) LevelWriter { - return syslogWriter{w, ""} -} - -// SyslogCEEWriter wraps a SyslogWriter with a SyslogLevelWriter that adds a -// MITRE CEE prefix for JSON syslog entries, compatible with rsyslog -// and syslog-ng JSON logging support. -// See https://www.rsyslog.com/json-elasticsearch/ -func SyslogCEEWriter(w SyslogWriter) LevelWriter { - return syslogWriter{w, ceePrefix} -} - -func (sw syslogWriter) Write(p []byte) (n int, err error) { - var pn int - if sw.prefix != "" { - pn, err = sw.w.Write([]byte(sw.prefix)) - if err != nil { - return pn, err - } - } - n, err = sw.w.Write(p) - return pn + n, err -} - -// WriteLevel implements LevelWriter interface. -func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) { - switch level { - case TraceLevel: - case DebugLevel: - err = sw.w.Debug(sw.prefix + string(p)) - case InfoLevel: - err = sw.w.Info(sw.prefix + string(p)) - case WarnLevel: - err = sw.w.Warning(sw.prefix + string(p)) - case ErrorLevel: - err = sw.w.Err(sw.prefix + string(p)) - case FatalLevel: - err = sw.w.Emerg(sw.prefix + string(p)) - case PanicLevel: - err = sw.w.Crit(sw.prefix + string(p)) - case NoLevel: - err = sw.w.Info(sw.prefix + string(p)) - default: - panic("invalid level") - } - // Any CEE prefix is not part of the message, so we don't include its length - n = len(p) - return -} diff --git a/vendor/github.com/rs/zerolog/writer.go b/vendor/github.com/rs/zerolog/writer.go deleted file mode 100644 index 26f5e632..00000000 --- a/vendor/github.com/rs/zerolog/writer.go +++ /dev/null @@ -1,154 +0,0 @@ -package zerolog - -import ( - "bytes" - "io" - "path" - "runtime" - "strconv" - "strings" - "sync" -) - -// LevelWriter defines as interface a writer may implement in order -// to receive level information with payload. -type LevelWriter interface { - io.Writer - WriteLevel(level Level, p []byte) (n int, err error) -} - -type levelWriterAdapter struct { - io.Writer -} - -func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { - return lw.Write(p) -} - -type syncWriter struct { - mu sync.Mutex - lw LevelWriter -} - -// SyncWriter wraps w so that each call to Write is synchronized with a mutex. -// This syncer can be used to wrap the call to writer's Write method if it is -// not thread safe. Note that you do not need this wrapper for os.File Write -// operations on POSIX and Windows systems as they are already thread-safe. -func SyncWriter(w io.Writer) io.Writer { - if lw, ok := w.(LevelWriter); ok { - return &syncWriter{lw: lw} - } - return &syncWriter{lw: levelWriterAdapter{w}} -} - -// Write implements the io.Writer interface. -func (s *syncWriter) Write(p []byte) (n int, err error) { - s.mu.Lock() - defer s.mu.Unlock() - return s.lw.Write(p) -} - -// WriteLevel implements the LevelWriter interface. -func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) { - s.mu.Lock() - defer s.mu.Unlock() - return s.lw.WriteLevel(l, p) -} - -type multiLevelWriter struct { - writers []LevelWriter -} - -func (t multiLevelWriter) Write(p []byte) (n int, err error) { - for _, w := range t.writers { - if _n, _err := w.Write(p); err == nil { - n = _n - if _err != nil { - err = _err - } else if _n != len(p) { - err = io.ErrShortWrite - } - } - } - return n, err -} - -func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) { - for _, w := range t.writers { - if _n, _err := w.WriteLevel(l, p); err == nil { - n = _n - if _err != nil { - err = _err - } else if _n != len(p) { - err = io.ErrShortWrite - } - } - } - return n, err -} - -// MultiLevelWriter creates a writer that duplicates its writes to all the -// provided writers, similar to the Unix tee(1) command. If some writers -// implement LevelWriter, their WriteLevel method will be used instead of Write. -func MultiLevelWriter(writers ...io.Writer) LevelWriter { - lwriters := make([]LevelWriter, 0, len(writers)) - for _, w := range writers { - if lw, ok := w.(LevelWriter); ok { - lwriters = append(lwriters, lw) - } else { - lwriters = append(lwriters, levelWriterAdapter{w}) - } - } - return multiLevelWriter{lwriters} -} - -// TestingLog is the logging interface of testing.TB. -type TestingLog interface { - Log(args ...interface{}) - Logf(format string, args ...interface{}) - Helper() -} - -// TestWriter is a writer that writes to testing.TB. -type TestWriter struct { - T TestingLog - - // Frame skips caller frames to capture the original file and line numbers. - Frame int -} - -// NewTestWriter creates a writer that logs to the testing.TB. -func NewTestWriter(t TestingLog) TestWriter { - return TestWriter{T: t} -} - -// Write to testing.TB. -func (t TestWriter) Write(p []byte) (n int, err error) { - t.T.Helper() - - n = len(p) - - // Strip trailing newline because t.Log always adds one. - p = bytes.TrimRight(p, "\n") - - // Try to correct the log file and line number to the caller. - if t.Frame > 0 { - _, origFile, origLine, _ := runtime.Caller(1) - _, frameFile, frameLine, ok := runtime.Caller(1 + t.Frame) - if ok { - erase := strings.Repeat("\b", len(path.Base(origFile))+len(strconv.Itoa(origLine))+3) - t.T.Logf("%s%s:%d: %s", erase, path.Base(frameFile), frameLine, p) - return n, err - } - } - t.T.Log(string(p)) - - return n, err -} - -// ConsoleTestWriter creates an option that correctly sets the file frame depth for testing.TB log. -func ConsoleTestWriter(t TestingLog) func(w *ConsoleWriter) { - return func(w *ConsoleWriter) { - w.Out = TestWriter{T: t, Frame: 6} - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 32ad8e6c..c027a0cb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -24,10 +24,10 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/google/uuid v1.3.0 +# github.com/google/uuid v1.3.1 ## explicit github.com/google/uuid -# github.com/gregdel/pushover v1.2.0 +# github.com/gregdel/pushover v1.2.1 ## explicit; go 1.14 github.com/gregdel/pushover # github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 @@ -61,12 +61,6 @@ github.com/jackc/pgx/v5/stdlib github.com/jellydator/ttlcache/v3 # github.com/kr/text v0.2.0 ## explicit -# github.com/mattn/go-colorable v0.1.13 -## explicit; go 1.15 -github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.19 -## explicit; go 1.15 -github.com/mattn/go-isatty # github.com/mennanov/fmutils v0.2.0 ## explicit; go 1.15 github.com/mennanov/fmutils @@ -79,7 +73,7 @@ github.com/ownmfa/api/go/api # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/redis/go-redis/v9 v9.0.5 +# github.com/redis/go-redis/v9 v9.1.0 ## explicit; go 1.18 github.com/redis/go-redis/v9 github.com/redis/go-redis/v9/internal @@ -91,11 +85,6 @@ github.com/redis/go-redis/v9/internal/rand github.com/redis/go-redis/v9/internal/util # github.com/rogpeppe/go-internal v1.11.0 ## explicit; go 1.19 -# github.com/rs/zerolog v1.30.0 -## explicit; go 1.15 -github.com/rs/zerolog -github.com/rs/zerolog/internal/cbor -github.com/rs/zerolog/internal/json # github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e ## explicit; go 1.13 github.com/skip2/go-qrcode @@ -145,15 +134,15 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# google.golang.org/genproto v0.0.0-20230814215434-ca7cfce7776a +# google.golang.org/genproto v0.0.0-20230821184602-ccc8af3d0e93 ## explicit; go 1.19 google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20230814215434-ca7cfce7776a +# google.golang.org/genproto/googleapis/api v0.0.0-20230821184602-ccc8af3d0e93 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230814215434-ca7cfce7776a +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230821184602-ccc8af3d0e93 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.57.0