diff --git a/client/kafka/async_producer.go b/client/kafka/async_producer.go index 64093447f..694400ab4 100644 --- a/client/kafka/async_producer.go +++ b/client/kafka/async_producer.go @@ -5,7 +5,7 @@ import ( "context" "fmt" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" patronerrors "github.com/beatlabs/patron/errors" "github.com/beatlabs/patron/trace" "github.com/opentracing/opentracing-go" diff --git a/client/kafka/integration_test.go b/client/kafka/integration_test.go index 39f37d409..8e8e33b3c 100644 --- a/client/kafka/integration_test.go +++ b/client/kafka/integration_test.go @@ -6,7 +6,7 @@ import ( "context" "testing" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" "github.com/opentracing/opentracing-go/mocktracer" diff --git a/client/kafka/kafka.go b/client/kafka/kafka.go index 15a7a011f..d319b2b49 100644 --- a/client/kafka/kafka.go +++ b/client/kafka/kafka.go @@ -6,7 +6,7 @@ import ( "fmt" "os" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/correlation" patronerrors "github.com/beatlabs/patron/errors" "github.com/beatlabs/patron/internal/validation" diff --git a/client/kafka/kafka_test.go b/client/kafka/kafka_test.go index dad83e5c2..a2cc74508 100644 --- a/client/kafka/kafka_test.go +++ b/client/kafka/kafka_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/correlation" "github.com/beatlabs/patron/trace" "github.com/opentracing/opentracing-go" diff --git a/client/kafka/sync_producer.go b/client/kafka/sync_producer.go index 612653ed0..39aa81e16 100644 --- a/client/kafka/sync_producer.go +++ b/client/kafka/sync_producer.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" patronerrors "github.com/beatlabs/patron/errors" "github.com/beatlabs/patron/trace" "github.com/opentracing/opentracing-go" diff --git a/component/async/kafka/group/group.go b/component/async/kafka/group/group.go index bd8ff2fc8..ffe1f2087 100644 --- a/component/async/kafka/group/group.go +++ b/component/async/kafka/group/group.go @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/component/async/kafka" "github.com/beatlabs/patron/internal/validation" diff --git a/component/async/kafka/group/group_test.go b/component/async/kafka/group/group_test.go index 671d98515..226a5af38 100644 --- a/component/async/kafka/group/group_test.go +++ b/component/async/kafka/group/group_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/component/async/kafka" kafkacmp "github.com/beatlabs/patron/component/kafka" diff --git a/component/async/kafka/group/integration_test.go b/component/async/kafka/group/integration_test.go index 19841219e..c12bc0787 100644 --- a/component/async/kafka/group/integration_test.go +++ b/component/async/kafka/group/integration_test.go @@ -13,7 +13,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/component/async/kafka" diff --git a/component/async/kafka/kafka.go b/component/async/kafka/kafka.go index 19498c22f..78fe5ca54 100644 --- a/component/async/kafka/kafka.go +++ b/component/async/kafka/kafka.go @@ -8,7 +8,7 @@ import ( "strconv" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/correlation" "github.com/beatlabs/patron/encoding" diff --git a/component/async/kafka/kafka_test.go b/component/async/kafka/kafka_test.go index 010afbd24..16864d602 100644 --- a/component/async/kafka/kafka_test.go +++ b/component/async/kafka/kafka_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/correlation" "github.com/beatlabs/patron/encoding" diff --git a/component/async/kafka/option.go b/component/async/kafka/option.go index cd6af5a07..ea8ddaf43 100644 --- a/component/async/kafka/option.go +++ b/component/async/kafka/option.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/encoding" "github.com/beatlabs/patron/encoding/json" ) diff --git a/component/async/kafka/option_test.go b/component/async/kafka/option_test.go index 81481e86c..dd4a21c83 100644 --- a/component/async/kafka/option_test.go +++ b/component/async/kafka/option_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/encoding" "github.com/beatlabs/patron/encoding/json" "github.com/stretchr/testify/assert" diff --git a/component/async/kafka/simple/duration_client_test.go b/component/async/kafka/simple/duration_client_test.go index e761358f6..6155adbde 100644 --- a/component/async/kafka/simple/duration_client_test.go +++ b/component/async/kafka/simple/duration_client_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/component/async/kafka/simple/duration_kafka.go b/component/async/kafka/simple/duration_kafka.go index ff8e0489e..bcfa115a6 100644 --- a/component/async/kafka/simple/duration_kafka.go +++ b/component/async/kafka/simple/duration_kafka.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" ) type outOfRangeOffsetError struct { diff --git a/component/async/kafka/simple/integration_test.go b/component/async/kafka/simple/integration_test.go index 28380c036..3fb5507e6 100644 --- a/component/async/kafka/simple/integration_test.go +++ b/component/async/kafka/simple/integration_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async/kafka" kafkacmp "github.com/beatlabs/patron/component/kafka" testkafka "github.com/beatlabs/patron/test/kafka" diff --git a/component/async/kafka/simple/simple.go b/component/async/kafka/simple/simple.go index 07b18e97d..8728d55fd 100644 --- a/component/async/kafka/simple/simple.go +++ b/component/async/kafka/simple/simple.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/component/async/kafka" "github.com/beatlabs/patron/internal/validation" diff --git a/component/async/kafka/simple/simple_test.go b/component/async/kafka/simple/simple_test.go index d2f4d8819..d4ba592bc 100644 --- a/component/async/kafka/simple/simple_test.go +++ b/component/async/kafka/simple/simple_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async/kafka" kafkacmp "github.com/beatlabs/patron/component/kafka" "github.com/stretchr/testify/assert" diff --git a/component/kafka/component.go b/component/kafka/component.go index 9d20c8924..1e8d64448 100644 --- a/component/kafka/component.go +++ b/component/kafka/component.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/correlation" patronErrors "github.com/beatlabs/patron/errors" "github.com/beatlabs/patron/internal/validation" diff --git a/component/kafka/component_test.go b/component/kafka/component_test.go index 35d7e6ac8..4089b3d62 100644 --- a/component/kafka/component_test.go +++ b/component/kafka/component_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/correlation" "github.com/beatlabs/patron/encoding" "github.com/beatlabs/patron/encoding/json" diff --git a/component/kafka/integration_test.go b/component/kafka/integration_test.go index 84a6b8bd6..3b2a49f02 100644 --- a/component/kafka/integration_test.go +++ b/component/kafka/integration_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" kafkaclient "github.com/beatlabs/patron/client/kafka" "github.com/beatlabs/patron/correlation" testkafka "github.com/beatlabs/patron/test/kafka" diff --git a/component/kafka/kafka.go b/component/kafka/kafka.go index 14e774737..ce18001d3 100644 --- a/component/kafka/kafka.go +++ b/component/kafka/kafka.go @@ -7,7 +7,7 @@ import ( "fmt" "os" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/opentracing/opentracing-go" ) diff --git a/component/kafka/kafka_test.go b/component/kafka/kafka_test.go index d534b5171..14d6e0279 100644 --- a/component/kafka/kafka_test.go +++ b/component/kafka/kafka_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/correlation" "github.com/opentracing/opentracing-go/mocktracer" "github.com/stretchr/testify/assert" diff --git a/component/kafka/option.go b/component/kafka/option.go index f845d2975..4857de33c 100644 --- a/component/kafka/option.go +++ b/component/kafka/option.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "golang.org/x/exp/slog" ) diff --git a/component/kafka/option_test.go b/component/kafka/option_test.go index 3cbeeac80..6989e308e 100644 --- a/component/kafka/option_test.go +++ b/component/kafka/option_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/stretchr/testify/assert" ) diff --git a/examples/client/main.go b/examples/client/main.go index 658e57fb0..1420d3776 100644 --- a/examples/client/main.go +++ b/examples/client/main.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/sqs" patronamqp "github.com/beatlabs/patron/client/amqp" diff --git a/examples/service/kafka.go b/examples/service/kafka.go index e9c5bdd83..371cf90e8 100644 --- a/examples/service/kafka.go +++ b/examples/service/kafka.go @@ -3,7 +3,7 @@ package main import ( "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron" "github.com/beatlabs/patron/component/kafka" "github.com/beatlabs/patron/examples" diff --git a/go.mod b/go.mod index ca1f114b5..811039ae7 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/beatlabs/patron go 1.20 require ( - github.com/Shopify/sarama v1.38.1 + github.com/IBM/sarama v1.40.1 github.com/aws/aws-sdk-go-v2 v1.23.0 github.com/aws/aws-sdk-go-v2/config v1.25.3 github.com/aws/aws-sdk-go-v2/credentials v1.16.2 @@ -63,7 +63,7 @@ require ( github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect - github.com/klauspost/compress v1.15.14 // indirect + github.com/klauspost/compress v1.16.6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/montanaflynn/stats v0.6.6 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect diff --git a/go.sum b/go.sum index d5d94aab9..419ad6169 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A= -github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= +github.com/IBM/sarama v1.40.1 h1:lL01NNg/iBeigUbT+wpPysuTYW6roHo6kc1QrffRf0k= +github.com/IBM/sarama v1.40.1/go.mod h1:+5OFwA5Du9I6QrznhaMHsuwWdWZNMjaBSIxEWEgKOYE= github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/aws/aws-sdk-go-v2 v1.23.0 h1:PiHAzmiQQr6JULBUdvR8fKlA+UPKLT/8KbiqpFBWiAo= @@ -127,8 +127,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.14 h1:i7WCKDToww0wA+9qrUZ1xOjp218vfFo3nTU6UHp+gOc= -github.com/klauspost/compress v1.15.14/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk= +github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= diff --git a/service.go b/service.go index 4d8864891..1b949e1a6 100644 --- a/service.go +++ b/service.go @@ -179,7 +179,7 @@ func defaultLogAttrs(name, version string) []slog.Attr { } func setupLogging(lc logConfig) { - ho := slog.HandlerOptions{ + opts := &slog.HandlerOptions{ AddSource: true, Level: getLogLevel(), } @@ -187,9 +187,9 @@ func setupLogging(lc logConfig) { var hnd slog.Handler if lc.json { - hnd = ho.NewJSONHandler(os.Stderr) + hnd = slog.NewJSONHandler(os.Stderr, opts) } else { - hnd = ho.NewTextHandler(os.Stderr) + hnd = slog.NewTextHandler(os.Stderr, opts) } slog.New(hnd.WithAttrs(lc.attrs)) diff --git a/test/kafka/kafka.go b/test/kafka/kafka.go index 8fa02470c..2169c836a 100644 --- a/test/kafka/kafka.go +++ b/test/kafka/kafka.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" ) diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/IBM/sarama/.gitignore similarity index 100% rename from vendor/github.com/Shopify/sarama/.gitignore rename to vendor/github.com/IBM/sarama/.gitignore diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/IBM/sarama/.golangci.yml similarity index 86% rename from vendor/github.com/Shopify/sarama/.golangci.yml rename to vendor/github.com/IBM/sarama/.golangci.yml index 0b419abbf..3d87645c1 100644 --- a/vendor/github.com/Shopify/sarama/.golangci.yml +++ b/vendor/github.com/IBM/sarama/.golangci.yml @@ -19,7 +19,7 @@ linters-settings: misspell: locale: US goimports: - local-prefixes: github.com/Shopify/sarama + local-prefixes: github.com/IBM/sarama gocritic: enabled-tags: - diagnostic @@ -39,11 +39,18 @@ linters-settings: lines: 300 statements: 300 + depguard: + rules: + main: + deny: + - pkg: "io/ioutil" + desc: Use the "io" and "os" packages instead. + linters: disable-all: true enable: - bodyclose - - deadcode + # - deadcode - depguard - exportloopref - dogsled @@ -68,12 +75,12 @@ linters: # - paralleltest # - scopelint - staticcheck - - structcheck + # - structcheck # - stylecheck - typecheck - unconvert - unused - - varcheck + # - varcheck - whitespace issues: diff --git a/vendor/github.com/IBM/sarama/CHANGELOG.md b/vendor/github.com/IBM/sarama/CHANGELOG.md new file mode 100644 index 000000000..3737fc327 --- /dev/null +++ b/vendor/github.com/IBM/sarama/CHANGELOG.md @@ -0,0 +1,1511 @@ +# Changelog + +## Version 1.40.0 (2023-07-17) + +## What's Changed + +Note: this is the first release after the transition of Sarama ownership from Shopify to IBM in https://github.com/IBM/sarama/issues/2461 + +### :rotating_light: Breaking Changes + +- chore: migrate module to github.com/IBM/sarama by @dnwe in https://github.com/IBM/sarama/pull/2492 +- fix: restore (\*OffsetCommitRequest) AddBlock func by @dnwe in https://github.com/IBM/sarama/pull/2494 + +### :bug: Fixes + +- fix(consumer): don't retry FindCoordinator forever by @dnwe in https://github.com/IBM/sarama/pull/2427 +- fix(metrics): fix race condition when calling Broker.Open() twice by @vincentbernat in https://github.com/IBM/sarama/pull/2428 +- fix: use version 4 of DescribeGroupsRequest only if kafka broker vers… …ion is >= 2.4 by @faillefer in https://github.com/IBM/sarama/pull/2451 +- Fix HighWaterMarkOffset of mocks partition consumer by @gr8web in https://github.com/IBM/sarama/pull/2447 +- fix: prevent data race in balance strategy by @napallday in https://github.com/IBM/sarama/pull/2453 + +### :package: Dependency updates + +- chore(deps): bump golang.org/x/net from 0.5.0 to 0.7.0 by @dependabot in https://github.com/IBM/sarama/pull/2452 + +### :wrench: Maintenance + +- chore: add kafka 3.3.2 by @dnwe in https://github.com/IBM/sarama/pull/2434 +- chore(ci): remove Shopify/shopify-cla-action by @dnwe in https://github.com/IBM/sarama/pull/2489 +- chore: bytes.Equal instead bytes.Compare by @testwill in https://github.com/IBM/sarama/pull/2485 + +## New Contributors + +- @dependabot made their first contribution in https://github.com/IBM/sarama/pull/2452 +- @gr8web made their first contribution in https://github.com/IBM/sarama/pull/2447 +- @testwill made their first contribution in https://github.com/IBM/sarama/pull/2485 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.1...v1.40.0 + +## Version 1.38.1 (2023-01-22) + +## What's Changed +### :bug: Fixes +* fix(example): correct `records-number` param in txn producer readme by @diallo-han in https://github.com/IBM/sarama/pull/2420 +* fix: use newConsumer method in newConsumerGroup method by @Lumotheninja in https://github.com/IBM/sarama/pull/2424 +### :package: Dependency updates +* chore(deps): bump module github.com/klauspost/compress to v1.15.14 by @dnwe in https://github.com/IBM/sarama/pull/2410 +* chore(deps): bump module golang.org/x/net to v0.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2413 +* chore(deps): bump module github.com/stretchr/testify to v1.8.1 by @dnwe in https://github.com/IBM/sarama/pull/2411 +* chore(deps): bump module github.com/xdg-go/scram to v1.1.2 by @dnwe in https://github.com/IBM/sarama/pull/2412 +* chore(deps): bump module golang.org/x/sync to v0.1.0 by @dnwe in https://github.com/IBM/sarama/pull/2414 +* chore(deps): bump github.com/eapache/go-xerial-snappy digest to bf00bc1 by @dnwe in https://github.com/IBM/sarama/pull/2418 + +## New Contributors +* @diallo-han made their first contribution in https://github.com/IBM/sarama/pull/2420 +* @Lumotheninja made their first contribution in https://github.com/IBM/sarama/pull/2424 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.0...v1.38.1 + +## Version 1.38.0 (2023-01-08) + +## What's Changed +### :tada: New Features / Improvements +* feat(producer): improve memory usage of zstd encoder by using our own pool management by @rtreffer in https://github.com/IBM/sarama/pull/2375 +* feat(proto): implement and use MetadataRequest v7 by @dnwe in https://github.com/IBM/sarama/pull/2388 +* feat(metrics): add protocol-requests-rate metric by @auntan in https://github.com/IBM/sarama/pull/2373 +### :bug: Fixes +* fix(proto): track and supply leader epoch to FetchRequest by @dnwe in https://github.com/IBM/sarama/pull/2389 +* fix(example): improve arg name used for tls skip verify by @michaeljmarshall in https://github.com/IBM/sarama/pull/2385 +* fix(zstd): default back to GOMAXPROCS concurrency by @bgreenlee in https://github.com/IBM/sarama/pull/2404 +* fix(producer): add nil check while producer is retrying by @hsweif in https://github.com/IBM/sarama/pull/2387 +* fix(producer): return errors for every message in retryBatch to avoid producer hang forever by @cch123 in https://github.com/IBM/sarama/pull/2378 +* fix(metrics): fix race when accessing metric registry by @vincentbernat in https://github.com/IBM/sarama/pull/2409 +### :package: Dependency updates +* chore(deps): bump golang.org/x/net to v0.4.0 by @dnwe in https://github.com/IBM/sarama/pull/2403 +### :wrench: Maintenance +* chore(ci): replace set-output command in GH Action by @dnwe in https://github.com/IBM/sarama/pull/2390 +* chore(ci): include kafka 3.3.1 in testing matrix by @dnwe in https://github.com/IBM/sarama/pull/2406 + +## New Contributors +* @michaeljmarshall made their first contribution in https://github.com/IBM/sarama/pull/2385 +* @bgreenlee made their first contribution in https://github.com/IBM/sarama/pull/2404 +* @hsweif made their first contribution in https://github.com/IBM/sarama/pull/2387 +* @cch123 made their first contribution in https://github.com/IBM/sarama/pull/2378 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.2...v1.38.0 + +## Version 1.37.2 (2022-10-04) + +## What's Changed +### :bug: Fixes +* fix: ensure updateMetaDataMs is 64-bit aligned by @dnwe in https://github.com/IBM/sarama/pull/2356 +### :heavy_plus_sign: Other Changes +* fix: bump go.mod specification to go 1.17 by @dnwe in https://github.com/IBM/sarama/pull/2357 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.1...v1.37.2 + +## Version 1.37.1 (2022-10-04) + +## What's Changed +### :bug: Fixes +* fix: support existing deprecated Rebalance.Strategy field usage by @spongecaptain in https://github.com/IBM/sarama/pull/2352 +* fix(test): consumer group rebalance strategy compatibility by @Jacob-bzx in https://github.com/IBM/sarama/pull/2353 +* fix(producer): replace time.After with time.Timer to avoid high memory usage by @Jacob-bzx in https://github.com/IBM/sarama/pull/2355 + +## New Contributors +* @spongecaptain made their first contribution in https://github.com/IBM/sarama/pull/2352 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.0...v1.37.1 + +## Version 1.37.0 (2022-09-28) + +## What's Changed + +### :rotating_light: Breaking Changes +* Due to a change in [github.com/klauspost/compress v1.15.10](https://github.com/klauspost/compress/releases/tag/v1.15.10), Sarama v1.37.0 requires Go 1.17 going forward, unfortunately due to an oversight this wasn't reflected in the go.mod declaration at time of release. + +### :tada: New Features / Improvements +* feat(consumer): support multiple balance strategies by @Jacob-bzx in https://github.com/IBM/sarama/pull/2339 +* feat(producer): transactional API by @ryarnyah in https://github.com/IBM/sarama/pull/2295 +* feat(mocks): support key in MockFetchResponse. by @Skandalik in https://github.com/IBM/sarama/pull/2328 +### :bug: Fixes +* fix: avoid panic when Metadata.RefreshFrequency is 0 by @Jacob-bzx in https://github.com/IBM/sarama/pull/2329 +* fix(consumer): avoid pushing unrelated responses to paused children by @pkoutsovasilis in https://github.com/IBM/sarama/pull/2317 +* fix: prevent metrics leak with cleanup by @auntan in https://github.com/IBM/sarama/pull/2340 +* fix: race condition(may panic) when closing consumer group by @Jacob-bzx in https://github.com/IBM/sarama/pull/2331 +* fix(consumer): default ResetInvalidOffsets to true by @dnwe in https://github.com/IBM/sarama/pull/2345 +* Validate the `Config` when creating a mock producer/consumer by @joewreschnig in https://github.com/IBM/sarama/pull/2327 +### :package: Dependency updates +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.16 by @dnwe in https://github.com/IBM/sarama/pull/2335 +* chore(deps): bump golang.org/x/net digest to bea034e by @dnwe in https://github.com/IBM/sarama/pull/2333 +* chore(deps): bump golang.org/x/sync digest to 7f9b162 by @dnwe in https://github.com/IBM/sarama/pull/2334 +* chore(deps): bump golang.org/x/net digest to f486391 by @dnwe in https://github.com/IBM/sarama/pull/2348 +* chore(deps): bump module github.com/shopify/toxiproxy/v2 to v2.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2336 +* chore(deps): bump module github.com/klauspost/compress to v1.15.11 by @dnwe in https://github.com/IBM/sarama/pull/2349 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.17 by @dnwe in https://github.com/IBM/sarama/pull/2350 +### :wrench: Maintenance +* chore(ci): bump kafka-versions to latest by @dnwe in https://github.com/IBM/sarama/pull/2346 +* chore(ci): bump go-versions to N and N-1 by @dnwe in https://github.com/IBM/sarama/pull/2347 + +## New Contributors +* @Jacob-bzx made their first contribution in https://github.com/IBM/sarama/pull/2329 +* @pkoutsovasilis made their first contribution in https://github.com/IBM/sarama/pull/2317 +* @Skandalik made their first contribution in https://github.com/IBM/sarama/pull/2328 +* @auntan made their first contribution in https://github.com/IBM/sarama/pull/2340 +* @ryarnyah made their first contribution in https://github.com/IBM/sarama/pull/2295 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.36.0...v1.37.0 + +## Version 1.36.0 (2022-08-11) + +## What's Changed +### :tada: New Features / Improvements +* feat: add option to propagate OffsetOutOfRange error by @dkolistratova in https://github.com/IBM/sarama/pull/2252 +* feat(producer): expose ProducerMessage.byteSize() function by @k8scat in https://github.com/IBM/sarama/pull/2315 +* feat(metrics): track consumer fetch request rates by @dnwe in https://github.com/IBM/sarama/pull/2299 +### :bug: Fixes +* fix(consumer): avoid submitting empty fetch requests when paused by @raulnegreiros in https://github.com/IBM/sarama/pull/2143 +### :package: Dependency updates +* chore(deps): bump module github.com/klauspost/compress to v1.15.9 by @dnwe in https://github.com/IBM/sarama/pull/2304 +* chore(deps): bump golang.org/x/net digest to c7608f3 by @dnwe in https://github.com/IBM/sarama/pull/2301 +* chore(deps): bump golangci/golangci-lint-action action to v3 by @dnwe in https://github.com/IBM/sarama/pull/2311 +* chore(deps): bump golang.org/x/net digest to 07c6da5 by @dnwe in https://github.com/IBM/sarama/pull/2307 +* chore(deps): bump github actions versions (major) by @dnwe in https://github.com/IBM/sarama/pull/2313 +* chore(deps): bump module github.com/jcmturner/gofork to v1.7.6 by @dnwe in https://github.com/IBM/sarama/pull/2305 +* chore(deps): bump golang.org/x/sync digest to 886fb93 by @dnwe in https://github.com/IBM/sarama/pull/2302 +* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.3 by @dnwe in https://github.com/IBM/sarama/pull/2303 +### :wrench: Maintenance +* chore: add kafka 3.1.1 to the version matrix by @dnwe in https://github.com/IBM/sarama/pull/2300 +### :heavy_plus_sign: Other Changes +* Migrate off probot-CLA to new GitHub Action by @cursedcoder in https://github.com/IBM/sarama/pull/2294 +* Forgot to remove cla probot by @cursedcoder in https://github.com/IBM/sarama/pull/2297 +* chore(lint): re-enable a small amount of go-critic by @dnwe in https://github.com/IBM/sarama/pull/2312 + +## New Contributors +* @cursedcoder made their first contribution in https://github.com/IBM/sarama/pull/2294 +* @dkolistratova made their first contribution in https://github.com/IBM/sarama/pull/2252 +* @k8scat made their first contribution in https://github.com/IBM/sarama/pull/2315 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.35.0...v1.36.0 + +## Version 1.35.0 (2022-07-22) + +## What's Changed +### :bug: Fixes +* fix: fix metadata retry backoff invalid when get metadata failed by @Stephan14 in https://github.com/IBM/sarama/pull/2256 +* fix(balance): sort and de-deplicate memberIDs by @dnwe in https://github.com/IBM/sarama/pull/2285 +* fix: prevent DescribeLogDirs hang in admin client by @zerowidth in https://github.com/IBM/sarama/pull/2269 +* fix: include assignment-less members in SyncGroup by @dnwe in https://github.com/IBM/sarama/pull/2292 +### :package: Dependency updates +* chore(deps): bump module github.com/stretchr/testify to v1.8.0 by @dnwe in https://github.com/IBM/sarama/pull/2284 +* chore(deps): bump module github.com/eapache/go-resiliency to v1.3.0 by @dnwe in https://github.com/IBM/sarama/pull/2283 +* chore(deps): bump golang.org/x/net digest to 1185a90 by @dnwe in https://github.com/IBM/sarama/pull/2279 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.15 by @dnwe in https://github.com/IBM/sarama/pull/2281 +* chore(deps): bump module github.com/klauspost/compress to v1.15.8 by @dnwe in https://github.com/IBM/sarama/pull/2280 +### :wrench: Maintenance +* chore: rename `any` func to avoid identifier by @dnwe in https://github.com/IBM/sarama/pull/2272 +* chore: add and test against kafka 3.2.0 by @dnwe in https://github.com/IBM/sarama/pull/2288 +* chore: document Fetch protocol fields by @dnwe in https://github.com/IBM/sarama/pull/2289 +### :heavy_plus_sign: Other Changes +* chore(ci): fix redirect with GITHUB_STEP_SUMMARY by @dnwe in https://github.com/IBM/sarama/pull/2286 +* fix(test): permit ECONNRESET in TestInitProducerID by @dnwe in https://github.com/IBM/sarama/pull/2287 +* fix: ensure empty or devel version valid by @dnwe in https://github.com/IBM/sarama/pull/2291 + +## New Contributors +* @zerowidth made their first contribution in https://github.com/IBM/sarama/pull/2269 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.1...v1.35.0 + +## Version 1.34.1 (2022-06-07) + +## What's Changed +### :bug: Fixes +* fix(examples): check session.Context().Done() in examples/consumergroup by @zxc111 in https://github.com/IBM/sarama/pull/2240 +* fix(protocol): move AuthorizedOperations into GroupDescription of DescribeGroupsResponse by @aiquestion in https://github.com/IBM/sarama/pull/2247 +* fix(protocol): tidyup DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2248 +* fix(consumer): range balance strategy not like reference by @njhartwell in https://github.com/IBM/sarama/pull/2245 +### :wrench: Maintenance +* chore(ci): experiment with using tparse by @dnwe in https://github.com/IBM/sarama/pull/2236 +* chore(deps): bump thirdparty dependencies to latest releases by @dnwe in https://github.com/IBM/sarama/pull/2242 + +## New Contributors +* @zxc111 made their first contribution in https://github.com/IBM/sarama/pull/2240 +* @njhartwell made their first contribution in https://github.com/IBM/sarama/pull/2245 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.0...v1.34.1 + +## Version 1.34.0 (2022-05-30) + +## What's Changed +### :tada: New Features / Improvements +* KIP-345: support static membership by @aiquestion in https://github.com/IBM/sarama/pull/2230 +### :bug: Fixes +* fix: KIP-368 use receiver goroutine to process all sasl v1 responses by @k-wall in https://github.com/IBM/sarama/pull/2234 +### :wrench: Maintenance +* chore(deps): bump module github.com/pierrec/lz4 to v4 by @dnwe in https://github.com/IBM/sarama/pull/2231 +* chore(deps): bump golang.org/x/net digest to 2e3eb7b by @dnwe in https://github.com/IBM/sarama/pull/2232 + +## New Contributors +* @aiquestion made their first contribution in https://github.com/IBM/sarama/pull/2230 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.33.0...v1.34.0 + +## Version 1.33.0 (2022-05-11) + +## What's Changed +### :rotating_light: Breaking Changes + +**Note: with this change, the user of Sarama is required to use Go 1.13's errors.Is etc (rather then ==) when forming conditionals returned by this library.** +* feat: make `ErrOutOfBrokers` wrap the underlying error that prevented connections to the brokers by @k-wall in https://github.com/IBM/sarama/pull/2131 + + +### :tada: New Features / Improvements +* feat(message): add UnmarshalText method to CompressionCodec by @vincentbernat in https://github.com/IBM/sarama/pull/2172 +* KIP-368 : Allow SASL Connections to Periodically Re-Authenticate by @k-wall in https://github.com/IBM/sarama/pull/2197 +* feat: add batched CreateACLs func to ClusterAdmin by @nkostoulas in https://github.com/IBM/sarama/pull/2191 +### :bug: Fixes +* fix: TestRecordBatchDecoding failing sporadically by @k-wall in https://github.com/IBM/sarama/pull/2154 +* feat(test): add an fvt for broker deadlock by @dnwe in https://github.com/IBM/sarama/pull/2144 +* fix: avoid starvation in subscriptionManager by @dnwe in https://github.com/IBM/sarama/pull/2109 +* fix: remove "Is your cluster reachable?" from msg by @dnwe in https://github.com/IBM/sarama/pull/2165 +* fix: remove trailing fullstop from error strings by @dnwe in https://github.com/IBM/sarama/pull/2166 +* fix: return underlying sasl error message by @dnwe in https://github.com/IBM/sarama/pull/2164 +* fix: potential data race on a global variable by @pior in https://github.com/IBM/sarama/pull/2171 +* fix: AdminClient | CreateACLs | check for error in response, return error if needed by @omris94 in https://github.com/IBM/sarama/pull/2185 +* producer: ensure that the management message (fin) is never "leaked" by @niamster in https://github.com/IBM/sarama/pull/2182 +* fix: prevent RefreshBrokers leaking old brokers by @k-wall in https://github.com/IBM/sarama/pull/2203 +* fix: prevent RefreshController leaking controller by @k-wall in https://github.com/IBM/sarama/pull/2204 +* fix: prevent AsyncProducer retryBatch from leaking by @k-wall in https://github.com/IBM/sarama/pull/2208 +* fix: prevent metrics leak when authenticate fails by @Stephan14 in https://github.com/IBM/sarama/pull/2205 +* fix: prevent deadlock between subscription manager and consumer goroutines by @niamster in https://github.com/IBM/sarama/pull/2194 +* fix: prevent idempotent producer epoch exhaustion by @ladislavmacoun in https://github.com/IBM/sarama/pull/2178 +* fix(test): mockbroker offsetResponse vers behavior by @dnwe in https://github.com/IBM/sarama/pull/2213 +* fix: cope with OffsetsLoadInProgress on Join+Sync by @dnwe in https://github.com/IBM/sarama/pull/2214 +* fix: make default MaxWaitTime 500ms by @dnwe in https://github.com/IBM/sarama/pull/2227 +### :package: Dependency updates +* chore(deps): bump xdg-go/scram and klauspost/compress by @dnwe in https://github.com/IBM/sarama/pull/2170 +### :wrench: Maintenance +* fix(test): skip TestReadOnlyAndAllCommittedMessages by @dnwe in https://github.com/IBM/sarama/pull/2161 +* fix(test): remove t.Parallel() by @dnwe in https://github.com/IBM/sarama/pull/2162 +* chore(ci): bump along to Go 1.17+1.18 and bump golangci-lint by @dnwe in https://github.com/IBM/sarama/pull/2183 +* chore: switch to multi-arch compatible docker images by @dnwe in https://github.com/IBM/sarama/pull/2210 +### :heavy_plus_sign: Other Changes +* Remediate a number go-routine leaks (mainly test issues) by @k-wall in https://github.com/IBM/sarama/pull/2198 +* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 +* chore: bump functional test timeout to 12m by @dnwe in https://github.com/IBM/sarama/pull/2200 +* fix(admin): make DeleteRecords err consistent by @dnwe in https://github.com/IBM/sarama/pull/2226 + +## New Contributors +* @k-wall made their first contribution in https://github.com/IBM/sarama/pull/2154 +* @pior made their first contribution in https://github.com/IBM/sarama/pull/2171 +* @omris94 made their first contribution in https://github.com/IBM/sarama/pull/2185 +* @vincentbernat made their first contribution in https://github.com/IBM/sarama/pull/2172 +* @niamster made their first contribution in https://github.com/IBM/sarama/pull/2182 +* @ladislavmacoun made their first contribution in https://github.com/IBM/sarama/pull/2178 +* @nkostoulas made their first contribution in https://github.com/IBM/sarama/pull/2191 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.32.0...v1.33.0 + +## Version 1.32.0 (2022-02-24) + +### ⚠️ This release has been superseded by v1.33.0 and should _not_ be used. + +* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 + +--- + +## What's Changed +### :bug: Fixes +* Fix deadlock when closing Broker in brokerProducer by @slaunay in https://github.com/IBM/sarama/pull/2133 +### :package: Dependency updates +* chore: refresh dependencies to latest by @dnwe in https://github.com/IBM/sarama/pull/2159 +### :wrench: Maintenance +* fix: rework RebalancingMultiplePartitions test by @dnwe in https://github.com/IBM/sarama/pull/2130 +* fix(test): use Sarama transactional producer by @dnwe in https://github.com/IBM/sarama/pull/1939 +* chore: enable t.Parallel() wherever possible by @dnwe in https://github.com/IBM/sarama/pull/2138 +### :heavy_plus_sign: Other Changes +* chore: restrict to 1 testbinary at once by @dnwe in https://github.com/IBM/sarama/pull/2145 +* chore: restrict to 1 parallel test at once by @dnwe in https://github.com/IBM/sarama/pull/2146 +* Remove myself from codeowners by @bai in https://github.com/IBM/sarama/pull/2147 +* chore: add retractions for known bad versions by @dnwe in https://github.com/IBM/sarama/pull/2160 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.31.1...v1.32.0 + +## Version 1.31.1 (2022-02-01) + +- #2126 - @bai - Populate missing kafka versions +- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image +- #2123 - @bai - Update klauspost/compress to 0.14 +- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy +- #2119 - @bai - Add Kafka 3.1.0 version number +- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption +- #2051 - @seveas - Expose the TLS connection state of a broker connection +- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys +- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup +- #2113 - @mosceo - Fix typo + +## Version 1.31.0 (2022-01-18) + +## What's Changed +### :tada: New Features / Improvements +* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/IBM/sarama/pull/2088 +* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/IBM/sarama/pull/1686 +* Support request pipelining in AsyncProducer by @slaunay in https://github.com/IBM/sarama/pull/2094 +### :bug: Fixes +* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/IBM/sarama/pull/2080 +* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/IBM/sarama/pull/2081 +* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/IBM/sarama/pull/2082 +* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/IBM/sarama/pull/2096 +* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/IBM/sarama/pull/2107 +* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/IBM/sarama/pull/2108 +* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/IBM/sarama/pull/2078 +* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2111 +### :wrench: Maintenance +* chore: bump runtime and test dependencies by @dnwe in https://github.com/IBM/sarama/pull/2100 +### :memo: Documentation +* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2099 +### :heavy_plus_sign: Other Changes +* Fix typo by @mosceo in https://github.com/IBM/sarama/pull/2084 + +## New Contributors +* @grongor made their first contribution in https://github.com/IBM/sarama/pull/2080 +* @fengyinqiao made their first contribution in https://github.com/IBM/sarama/pull/2088 +* @xujianhai666 made their first contribution in https://github.com/IBM/sarama/pull/1686 +* @mosceo made their first contribution in https://github.com/IBM/sarama/pull/2084 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.1...v1.31.0 + +## Version 1.30.1 (2021-12-04) + +## What's Changed +### :tada: New Features / Improvements +* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/IBM/sarama/pull/2045 +### :bug: Fixes +* fix: set min-go-version to 1.16 by @troyanov in https://github.com/IBM/sarama/pull/2048 +* logger: fix debug logs' formatting directives by @utrack in https://github.com/IBM/sarama/pull/2054 +* fix: stuck on the batch with zero records length by @pachmu in https://github.com/IBM/sarama/pull/2057 +* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/IBM/sarama/pull/2076 +### :wrench: Maintenance +* chore: add release notes configuration by @dnwe in https://github.com/IBM/sarama/pull/2046 +* chore: confluent platform version bump by @lizthegrey in https://github.com/IBM/sarama/pull/2070 + +## Notes +* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x + +## New Contributors +* @troyanov made their first contribution in https://github.com/IBM/sarama/pull/2048 +* @lizthegrey made their first contribution in https://github.com/IBM/sarama/pull/2045 +* @utrack made their first contribution in https://github.com/IBM/sarama/pull/2054 +* @pachmu made their first contribution in https://github.com/IBM/sarama/pull/2057 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.0...v1.30.1 + +## Version 1.30.0 (2021-09-29) + +⚠️ This release has been superseded by v1.30.1 and should _not_ be used. + +**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 + +--- + +ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** + +--- + +# New Features / Improvements + +- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh +- #2000 - @matzew - Using xdg-go module for SCRAM +- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures +- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM +- #2006 - @faillefer - Add support for DeleteOffsets operation +- #1909 - @agriffaut - KIP-546 Client quota APIs +- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state +- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger +- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log +- #2019 - @dnwe - feat: add logging & a metric for producer throttle +- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface +- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol +- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open +- #2034 - @bai - Add support for kafka 3.0.0 + +# Fixes + +- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest +- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation +- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls +- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true +- #2007 - @bai - Add support for Go 1.17 +- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks +- #2010 - @dnwe - chore: enable exportloopref and misspell linters +- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements +- #2015 - @bai - Change default branch to main +- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() +- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 +- #2016 - @dnwe - chore: replace deprecated Go calls +- #2017 - @dnwe - chore: delete legacy vagrant script +- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test +- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 +- #2033 - @bai - Update dependencies +- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method +- #2035 - @dnwe - chore: populate the missing kafka versions +- #2038 - @dnwe - feat: add a fuzzing workflow to github actions + +## New Contributors +* @zifengyu made their first contribution in https://github.com/IBM/sarama/pull/1983 +* @doxsch made their first contribution in https://github.com/IBM/sarama/pull/1990 +* @LubergAlexander made their first contribution in https://github.com/IBM/sarama/pull/1988 +* @HurSungYun made their first contribution in https://github.com/IBM/sarama/pull/2001 +* @gdm85 made their first contribution in https://github.com/IBM/sarama/pull/2003 +* @qiangmzsx made their first contribution in https://github.com/IBM/sarama/pull/1973 +* @zhaomoran made their first contribution in https://github.com/IBM/sarama/pull/1992 +* @faillefer made their first contribution in https://github.com/IBM/sarama/pull/2006 +* @crivera-fastly made their first contribution in https://github.com/IBM/sarama/pull/1718 +* @null-sleep made their first contribution in https://github.com/IBM/sarama/pull/1984 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.29.1...v1.30.0 + +## Version 1.29.1 (2021-06-24) + +# New Features / Improvements + +- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API +- #1964 - @ajanikow - Add DelegationToken ResourceType + +# Fixes + +- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire +- #1971 - @KerryJava - fix kafka-producer-performance throughput panic +- #1968 - @dnwe - chore: bump golang.org/x versions +- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers +- #1963 - @dnwe - fix: ensure backoff timer is re-used +- #1949 - @dnwe - fix: explicitly use uint64 for payload length + +## Version 1.29.0 (2021-05-07) + +### New Features / Improvements + +- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API +- #1869 - @wyndhblb - zstd: encode+decode performance improvements +- #1541 - @izolight - add String, (Un)MarshalText for acl types. +- #1921 - @bai - Add support for Kafka 2.8.0 + +### Fixes +- #1936 - @dnwe - fix(consumer): follow preferred broker +- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) +- #1926 - @dnwe - fix: correct initial CodeQL findings +- #1925 - @bai - Test out CodeQL +- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos +- #1922 - @bai - Update go dependencies +- #1898 - @mmaslankaprv - Parsing only known control batches value +- #1887 - @withshubh - Fix: issues affecting code quality + +## Version 1.28.0 (2021-02-15) + +**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** + +- #1870 - @kvch - Update Kerberos library to latest major +- #1876 - @bai - Update docs, reference pkg.go.dev +- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close +- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages +- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies +- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy +- #1862 - @bai - Fix CI setenv permissions issues +- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev +- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica + +## Version 1.27.2 (2020-10-21) + +### Improvements + +#1750 - @krantideep95 Adds missing mock responses for mocking consumer group + +## Fixes + +#1817 - reverts #1785 - Add private method to Client interface to prevent implementation + +## Version 1.27.1 (2020-10-07) + +### Improvements + +#1775 - @d1egoaz - Adds a Producer Interceptor example +#1781 - @justin-chen - Refresh brokers given list of seed brokers +#1784 - @justin-chen - Add randomize seed broker method +#1790 - @d1egoaz - remove example binary +#1798 - @bai - Test against Go 1.15 +#1785 - @justin-chen - Add private method to Client interface to prevent implementation +#1802 - @uvw - Support Go 1.13 error unwrapping + +## Fixes + +#1791 - @stanislavkozlovski - bump default version to 1.0.0 + +## Version 1.27.0 (2020-08-11) + +### Improvements + +#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration +#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests +#1699 - @wclaeys - Consumer group support for manually comitting offsets +#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 +#1726 - @d1egoaz - Include zstd on the functional tests +#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors +#1738 - @varun06 - fixed variable names that are named same as some std lib package names +#1741 - @varun06 - updated zstd dependency to latest v1.10.10 +#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base +#1763 - @alrs - remove deprecated tls options from test +#1769 - @bai - Add support for Kafka 2.6.0 + +## Fixes + +#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +#1744 - @alrs - Fix isBalanced Function Signature + +## Version 1.26.4 (2020-05-19) + +## Fixes + +- #1701 - @d1egoaz - Set server name only for the current broker +- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka + +## Version 1.26.3 (2020-05-07) + +## Fixes + +- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config + +## Version 1.26.2 (2020-05-06) + +## ⚠️ Known Issues + +This release has been marked as not ready for production and may be unstable, please use v1.26.4. + +### Improvements + +- #1560 - @iyacontrol - add sync pool for gzip 1-9 +- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID +- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs +- #1632 - @bai - Add support for Go 1.14 +- #1640 - @random-dwi - Feature/fix list partition reassignments +- #1646 - @mimaison - Add DescribeLogDirs to admin client +- #1667 - @bai - Add support for kafka 2.5.0 + +## Fixes + +- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 +- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine +- #1602 - @d1egoaz - adds a note about consumer groups Consume method +- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly +- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented +- #1614 - @alrs - produce_response.go: Remove Unused Functions +- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables +- #1639 - @agriffaut - Handle errors with no message but error code +- #1643 - @kzinglzy - fix `config.net.keepalive` +- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs +- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata +- #1650 - @lavoiesl - Return the response error in heartbeatLoop +- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die +- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. + +## Version 1.26.1 (2020-02-04) + +Improvements: +- Add requests-in-flight metric ([1539](https://github.com/IBM/sarama/pull/1539)) +- Fix misleading example for cluster admin ([1595](https://github.com/IBM/sarama/pull/1595)) +- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/IBM/sarama/pull/1573)) +- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/IBM/sarama/pull/1592)) + +Bug Fixes: +- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/IBM/sarama/pull/1590)) +- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/IBM/sarama/pull/1589)) + +## Version 1.26.0 (2020-01-24) + +New Features: +- Enable zstd compression + ([1574](https://github.com/IBM/sarama/pull/1574), + [1582](https://github.com/IBM/sarama/pull/1582)) +- Support headers in tools kafka-console-producer + ([1549](https://github.com/IBM/sarama/pull/1549)) + +Improvements: +- Add SASL AuthIdentity to SASL frames (authzid) + ([1585](https://github.com/IBM/sarama/pull/1585)). + +Bug Fixes: +- Sending messages with ZStd compression enabled fails in multiple ways + ([1252](https://github.com/IBM/sarama/issues/1252)). +- Use the broker for any admin on BrokerConfig + ([1571](https://github.com/IBM/sarama/pull/1571)). +- Set DescribeConfigRequest Version field + ([1576](https://github.com/IBM/sarama/pull/1576)). +- ConsumerGroup flooding logs with client/metadata update req + ([1578](https://github.com/IBM/sarama/pull/1578)). +- MetadataRequest version in DescribeCluster + ([1580](https://github.com/IBM/sarama/pull/1580)). +- Fix deadlock in consumer group handleError + ([1581](https://github.com/IBM/sarama/pull/1581)) +- Fill in the Fetch{Request,Response} protocol + ([1582](https://github.com/IBM/sarama/pull/1582)). +- Retry topic request on ControllerNotAvailable + ([1586](https://github.com/IBM/sarama/pull/1586)). + +## Version 1.25.0 (2020-01-13) + +New Features: +- Support TLS protocol in kafka-producer-performance + ([1538](https://github.com/IBM/sarama/pull/1538)). +- Add support for kafka 2.4.0 + ([1552](https://github.com/IBM/sarama/pull/1552)). + +Improvements: +- Allow the Consumer to disable auto-commit offsets + ([1164](https://github.com/IBM/sarama/pull/1164)). +- Produce records with consistent timestamps + ([1455](https://github.com/IBM/sarama/pull/1455)). + +Bug Fixes: +- Fix incorrect SetTopicMetadata name mentions + ([1534](https://github.com/IBM/sarama/pull/1534)). +- Fix client.tryRefreshMetadata Println + ([1535](https://github.com/IBM/sarama/pull/1535)). +- Fix panic on calling updateMetadata on closed client + ([1531](https://github.com/IBM/sarama/pull/1531)). +- Fix possible faulty metrics in TestFuncProducing + ([1545](https://github.com/IBM/sarama/pull/1545)). + +## Version 1.24.1 (2019-10-31) + +New Features: +- Add DescribeLogDirs Request/Response pair + ([1520](https://github.com/IBM/sarama/pull/1520)). + +Bug Fixes: +- Fix ClusterAdmin returning invalid controller ID on DescribeCluster + ([1518](https://github.com/IBM/sarama/pull/1518)). +- Fix issue with consumergroup not rebalancing when new partition is added + ([1525](https://github.com/IBM/sarama/pull/1525)). +- Ensure consistent use of read/write deadlines + ([1529](https://github.com/IBM/sarama/pull/1529)). + +## Version 1.24.0 (2019-10-09) + +New Features: +- Add sticky partition assignor + ([1416](https://github.com/IBM/sarama/pull/1416)). +- Switch from cgo zstd package to pure Go implementation + ([1477](https://github.com/IBM/sarama/pull/1477)). + +Improvements: +- Allow creating ClusterAdmin from client + ([1415](https://github.com/IBM/sarama/pull/1415)). +- Set KafkaVersion in ListAcls method + ([1452](https://github.com/IBM/sarama/pull/1452)). +- Set request version in CreateACL ClusterAdmin method + ([1458](https://github.com/IBM/sarama/pull/1458)). +- Set request version in DeleteACL ClusterAdmin method + ([1461](https://github.com/IBM/sarama/pull/1461)). +- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest + ([1464](https://github.com/IBM/sarama/pull/1464)). +- Remove direct usage of gofork + ([1465](https://github.com/IBM/sarama/pull/1465)). +- Add support for Go 1.13 + ([1478](https://github.com/IBM/sarama/pull/1478)). +- Improve behavior of NewMockListAclsResponse + ([1481](https://github.com/IBM/sarama/pull/1481)). + +Bug Fixes: +- Fix race condition in consumergroup example + ([1434](https://github.com/IBM/sarama/pull/1434)). +- Fix brokerProducer goroutine leak + ([1442](https://github.com/IBM/sarama/pull/1442)). +- Use released version of lz4 library + ([1469](https://github.com/IBM/sarama/pull/1469)). +- Set correct version in MockDeleteTopicsResponse + ([1484](https://github.com/IBM/sarama/pull/1484)). +- Fix CLI help message typo + ([1494](https://github.com/IBM/sarama/pull/1494)). + +Known Issues: +- Please **don't** use Zstd, as it doesn't work right now. + See https://github.com/IBM/sarama/issues/1252 + +## Version 1.23.1 (2019-07-22) + +Bug Fixes: +- Fix fetch delete bug record + ([1425](https://github.com/IBM/sarama/pull/1425)). +- Handle SASL/OAUTHBEARER token rejection + ([1428](https://github.com/IBM/sarama/pull/1428)). + +## Version 1.23.0 (2019-07-02) + +New Features: +- Add support for Kafka 2.3.0 + ([1418](https://github.com/IBM/sarama/pull/1418)). +- Add support for ListConsumerGroupOffsets v2 + ([1374](https://github.com/IBM/sarama/pull/1374)). +- Add support for DeleteConsumerGroup + ([1417](https://github.com/IBM/sarama/pull/1417)). +- Add support for SASLVersion configuration + ([1410](https://github.com/IBM/sarama/pull/1410)). +- Add kerberos support + ([1366](https://github.com/IBM/sarama/pull/1366)). + +Improvements: +- Improve sasl_scram_client example + ([1406](https://github.com/IBM/sarama/pull/1406)). +- Fix shutdown and race-condition in consumer-group example + ([1404](https://github.com/IBM/sarama/pull/1404)). +- Add support for error codes 77—81 + ([1397](https://github.com/IBM/sarama/pull/1397)). +- Pool internal objects allocated per message + ([1385](https://github.com/IBM/sarama/pull/1385)). +- Reduce packet decoder allocations + ([1373](https://github.com/IBM/sarama/pull/1373)). +- Support timeout when fetching metadata + ([1359](https://github.com/IBM/sarama/pull/1359)). + +Bug Fixes: +- Fix fetch size integer overflow + ([1376](https://github.com/IBM/sarama/pull/1376)). +- Handle and log throttled FetchResponses + ([1383](https://github.com/IBM/sarama/pull/1383)). +- Refactor misspelled word Resouce to Resource + ([1368](https://github.com/IBM/sarama/pull/1368)). + +## Version 1.22.1 (2019-04-29) + +Improvements: +- Use zstd 1.3.8 + ([1350](https://github.com/IBM/sarama/pull/1350)). +- Add support for SaslHandshakeRequest v1 + ([1354](https://github.com/IBM/sarama/pull/1354)). + +Bug Fixes: +- Fix V5 MetadataRequest nullable topics array + ([1353](https://github.com/IBM/sarama/pull/1353)). +- Use a different SCRAM client for each broker connection + ([1349](https://github.com/IBM/sarama/pull/1349)). +- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 + ([1344](https://github.com/IBM/sarama/pull/1344)). + +## Version 1.22.0 (2019-04-09) + +New Features: +- Add Offline Replicas Operation to Client + ([1318](https://github.com/IBM/sarama/pull/1318)). +- Allow using proxy when connecting to broker + ([1326](https://github.com/IBM/sarama/pull/1326)). +- Implement ReadCommitted + ([1307](https://github.com/IBM/sarama/pull/1307)). +- Add support for Kafka 2.2.0 + ([1331](https://github.com/IBM/sarama/pull/1331)). +- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes + ([1331](https://github.com/IBM/sarama/pull/1295)). + +Improvements: +- Unregister all broker metrics on broker stop + ([1232](https://github.com/IBM/sarama/pull/1232)). +- Add SCRAM authentication example + ([1303](https://github.com/IBM/sarama/pull/1303)). +- Add consumergroup examples + ([1304](https://github.com/IBM/sarama/pull/1304)). +- Expose consumer batch size metric + ([1296](https://github.com/IBM/sarama/pull/1296)). +- Add TLS options to console producer and consumer + ([1300](https://github.com/IBM/sarama/pull/1300)). +- Reduce client close bookkeeping + ([1297](https://github.com/IBM/sarama/pull/1297)). +- Satisfy error interface in create responses + ([1154](https://github.com/IBM/sarama/pull/1154)). +- Please lint gods + ([1346](https://github.com/IBM/sarama/pull/1346)). + +Bug Fixes: +- Fix multi consumer group instance crash + ([1338](https://github.com/IBM/sarama/pull/1338)). +- Update lz4 to latest version + ([1347](https://github.com/IBM/sarama/pull/1347)). +- Retry ErrNotCoordinatorForConsumer in new consumergroup session + ([1231](https://github.com/IBM/sarama/pull/1231)). +- Fix cleanup error handler + ([1332](https://github.com/IBM/sarama/pull/1332)). +- Fix rate condition in PartitionConsumer + ([1156](https://github.com/IBM/sarama/pull/1156)). + +## Version 1.21.0 (2019-02-24) + +New Features: +- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest + ([1236](https://github.com/IBM/sarama/pull/1236)). +- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests + ([1178](https://github.com/IBM/sarama/pull/1178)). +- Implement SASL/OAUTHBEARER + ([1240](https://github.com/IBM/sarama/pull/1240)). + +Improvements: +- Add Go mod support + ([1282](https://github.com/IBM/sarama/pull/1282)). +- Add error codes 73—76 + ([1239](https://github.com/IBM/sarama/pull/1239)). +- Add retry backoff function + ([1160](https://github.com/IBM/sarama/pull/1160)). +- Maintain metadata in the producer even when retries are disabled + ([1189](https://github.com/IBM/sarama/pull/1189)). +- Include ReplicaAssignment in ListTopics + ([1274](https://github.com/IBM/sarama/pull/1274)). +- Add producer performance tool + ([1222](https://github.com/IBM/sarama/pull/1222)). +- Add support LogAppend timestamps + ([1258](https://github.com/IBM/sarama/pull/1258)). + +Bug Fixes: +- Fix potential deadlock when a heartbeat request fails + ([1286](https://github.com/IBM/sarama/pull/1286)). +- Fix consuming compacted topic + ([1227](https://github.com/IBM/sarama/pull/1227)). +- Set correct Kafka version for DescribeConfigsRequest v1 + ([1277](https://github.com/IBM/sarama/pull/1277)). +- Update kafka test version + ([1273](https://github.com/IBM/sarama/pull/1273)). + +## Version 1.20.1 (2019-01-10) + +New Features: +- Add optional replica id in offset request + ([1100](https://github.com/IBM/sarama/pull/1100)). + +Improvements: +- Implement DescribeConfigs Request + Response v1 & v2 + ([1230](https://github.com/IBM/sarama/pull/1230)). +- Reuse compression objects + ([1185](https://github.com/IBM/sarama/pull/1185)). +- Switch from png to svg for GoDoc link in README + ([1243](https://github.com/IBM/sarama/pull/1243)). +- Fix typo in deprecation notice for FetchResponseBlock.Records + ([1242](https://github.com/IBM/sarama/pull/1242)). +- Fix typos in consumer metadata response file + ([1244](https://github.com/IBM/sarama/pull/1244)). + +Bug Fixes: +- Revert to individual msg retries for non-idempotent + ([1203](https://github.com/IBM/sarama/pull/1203)). +- Respect MaxMessageBytes limit for uncompressed messages + ([1141](https://github.com/IBM/sarama/pull/1141)). + +## Version 1.20.0 (2018-12-10) + +New Features: + - Add support for zstd compression + ([#1170](https://github.com/IBM/sarama/pull/1170)). + - Add support for Idempotent Producer + ([#1152](https://github.com/IBM/sarama/pull/1152)). + - Add support support for Kafka 2.1.0 + ([#1229](https://github.com/IBM/sarama/pull/1229)). + - Add support support for OffsetCommit request/response pairs versions v1 to v5 + ([#1201](https://github.com/IBM/sarama/pull/1201)). + - Add support support for OffsetFetch request/response pair up to version v5 + ([#1198](https://github.com/IBM/sarama/pull/1198)). + +Improvements: + - Export broker's Rack setting + ([#1173](https://github.com/IBM/sarama/pull/1173)). + - Always use latest patch version of Go on CI + ([#1202](https://github.com/IBM/sarama/pull/1202)). + - Add error codes 61 to 72 + ([#1195](https://github.com/IBM/sarama/pull/1195)). + +Bug Fixes: + - Fix build without cgo + ([#1182](https://github.com/IBM/sarama/pull/1182)). + - Fix go vet suggestion in consumer group file + ([#1209](https://github.com/IBM/sarama/pull/1209)). + - Fix typos in code and comments + ([#1228](https://github.com/IBM/sarama/pull/1228)). + +## Version 1.19.0 (2018-09-27) + +New Features: + - Implement a higher-level consumer group + ([#1099](https://github.com/IBM/sarama/pull/1099)). + +Improvements: + - Add support for Go 1.11 + ([#1176](https://github.com/IBM/sarama/pull/1176)). + +Bug Fixes: + - Fix encoding of `MetadataResponse` with version 2 and higher + ([#1174](https://github.com/IBM/sarama/pull/1174)). + - Fix race condition in mock async producer + ([#1174](https://github.com/IBM/sarama/pull/1174)). + +## Version 1.18.0 (2018-09-07) + +New Features: + - Make `Partitioner.RequiresConsistency` vary per-message + ([#1112](https://github.com/IBM/sarama/pull/1112)). + - Add customizable partitioner + ([#1118](https://github.com/IBM/sarama/pull/1118)). + - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, + `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` + ([#1055](https://github.com/IBM/sarama/pull/1055)). + +Improvements: + - Add support for Kafka 2.0.0 + ([#1149](https://github.com/IBM/sarama/pull/1149)). + - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts + ([#1123](https://github.com/IBM/sarama/pull/1123)). + - Simpler offset management + ([#1127](https://github.com/IBM/sarama/pull/1127)). + +Bug Fixes: + - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka + ([#1110](https://github.com/IBM/sarama/pull/1110)). + - Fix consumer block when response did not contain all the + expected topic/partition blocks + ([#1086](https://github.com/IBM/sarama/pull/1086)). + - Fix consumer block when response contains only constrol messages + ([#1115](https://github.com/IBM/sarama/pull/1115)). + - Add timeout config for ClusterAdmin requests + ([#1142](https://github.com/IBM/sarama/pull/1142)). + - Add version check when producing message with headers + ([#1117](https://github.com/IBM/sarama/pull/1117)). + - Fix `MetadataRequest` for empty list of topics + ([#1132](https://github.com/IBM/sarama/pull/1132)). + - Fix producer topic metadata on-demand fetch when topic error happens in metadata response + ([#1125](https://github.com/IBM/sarama/pull/1125)). + +## Version 1.17.0 (2018-05-30) + +New Features: + - Add support for gzip compression levels + ([#1044](https://github.com/IBM/sarama/pull/1044)). + - Add support for Metadata request/response pairs versions v1 to v5 + ([#1047](https://github.com/IBM/sarama/pull/1047), + [#1069](https://github.com/IBM/sarama/pull/1069)). + - Add versioning to JoinGroup request/response pairs + ([#1098](https://github.com/IBM/sarama/pull/1098)) + - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs + ([#1065](https://github.com/IBM/sarama/pull/1065), + [#1096](https://github.com/IBM/sarama/pull/1096), + [#1027](https://github.com/IBM/sarama/pull/1027)). + - Add `Controller()` method to Client interface + ([#1063](https://github.com/IBM/sarama/pull/1063)). + +Improvements: + - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp + ([#1010](https://github.com/IBM/sarama/pull/1010)). + - Expose missing protocol parts: `msgSet` and `recordBatch` + ([#1049](https://github.com/IBM/sarama/pull/1049)). + - Add support for v1 DeleteTopics Request + ([#1052](https://github.com/IBM/sarama/pull/1052)). + - Add support for Go 1.10 + ([#1064](https://github.com/IBM/sarama/pull/1064)). + - Claim support for Kafka 1.1.0 + ([#1073](https://github.com/IBM/sarama/pull/1073)). + +Bug Fixes: + - Fix FindCoordinatorResponse.encode to allow nil Coordinator + ([#1050](https://github.com/IBM/sarama/pull/1050), + [#1051](https://github.com/IBM/sarama/pull/1051)). + - Clear all metadata when we have the latest topic info + ([#1033](https://github.com/IBM/sarama/pull/1033)). + - Make `PartitionConsumer.Close` idempotent + ([#1092](https://github.com/IBM/sarama/pull/1092)). + +## Version 1.16.0 (2018-02-12) + +New Features: + - Add support for the Create/Delete Topics request/response pairs + ([#1007](https://github.com/IBM/sarama/pull/1007), + [#1008](https://github.com/IBM/sarama/pull/1008)). + - Add support for the Describe/Create/Delete ACL request/response pairs + ([#1009](https://github.com/IBM/sarama/pull/1009)). + - Add support for the five transaction-related request/response pairs + ([#1016](https://github.com/IBM/sarama/pull/1016)). + +Improvements: + - Permit setting version on mock producer responses + ([#999](https://github.com/IBM/sarama/pull/999)). + - Add `NewMockBrokerListener` helper for testing TLS connections + ([#1019](https://github.com/IBM/sarama/pull/1019)). + - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB + which results in much higher throughput in most cases + ([#1024](https://github.com/IBM/sarama/pull/1024)). + - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to + reduce CPU and memory usage when processing many partitions + ([#1028](https://github.com/IBM/sarama/pull/1028)). + - Assign relative offsets to messages in the producer to save the brokers a + recompression pass + ([#1002](https://github.com/IBM/sarama/pull/1002), + [#1015](https://github.com/IBM/sarama/pull/1015)). + +Bug Fixes: + - Fix producing uncompressed batches with the new protocol format + ([#1032](https://github.com/IBM/sarama/issues/1032)). + - Fix consuming compacted topics with the new protocol format + ([#1005](https://github.com/IBM/sarama/issues/1005)). + - Fix consuming topics with a mix of protocol formats + ([#1021](https://github.com/IBM/sarama/issues/1021)). + - Fix consuming when the broker includes multiple batches in a single response + ([#1022](https://github.com/IBM/sarama/issues/1022)). + - Fix detection of `PartialTrailingMessage` when the partial message was + truncated before the magic value indicating its version + ([#1030](https://github.com/IBM/sarama/pull/1030)). + - Fix expectation-checking in the mock of `SyncProducer.SendMessages` + ([#1035](https://github.com/IBM/sarama/pull/1035)). + +## Version 1.15.0 (2017-12-08) + +New Features: + - Claim official support for Kafka 1.0, though it did already work + ([#984](https://github.com/IBM/sarama/pull/984)). + - Helper methods for Kafka version numbers to/from strings + ([#989](https://github.com/IBM/sarama/pull/989)). + - Implement CreatePartitions request/response + ([#985](https://github.com/IBM/sarama/pull/985)). + +Improvements: + - Add error codes 45-60 + ([#986](https://github.com/IBM/sarama/issues/986)). + +Bug Fixes: + - Fix slow consuming for certain Kafka 0.11/1.0 configurations + ([#982](https://github.com/IBM/sarama/pull/982)). + - Correctly determine when a FetchResponse contains the new message format + ([#990](https://github.com/IBM/sarama/pull/990)). + - Fix producing with multiple headers + ([#996](https://github.com/IBM/sarama/pull/996)). + - Fix handling of truncated record batches + ([#998](https://github.com/IBM/sarama/pull/998)). + - Fix leaking metrics when closing brokers + ([#991](https://github.com/IBM/sarama/pull/991)). + +## Version 1.14.0 (2017-11-13) + +New Features: + - Add support for the new Kafka 0.11 record-batch format, including the wire + protocol and the necessary behavioural changes in the producer and consumer. + Transactions and idempotency are not yet supported, but producing and + consuming should work with all the existing bells and whistles (batching, + compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta + of Arista Networks for this work. Part of + ([#901](https://github.com/IBM/sarama/issues/901)). + +Bug Fixes: + - Fix encoding of ProduceResponse versions in test + ([#970](https://github.com/IBM/sarama/pull/970)). + - Return partial replicas list when we have it + ([#975](https://github.com/IBM/sarama/pull/975)). + +## Version 1.13.0 (2017-10-04) + +New Features: + - Support for FetchRequest version 3 + ([#905](https://github.com/IBM/sarama/pull/905)). + - Permit setting version on mock FetchResponses + ([#939](https://github.com/IBM/sarama/pull/939)). + - Add a configuration option to support storing only minimal metadata for + extremely large clusters + ([#937](https://github.com/IBM/sarama/pull/937)). + - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets + ([#932](https://github.com/IBM/sarama/pull/932)). + +Improvements: + - Provide the block-level timestamp when consuming compressed messages + ([#885](https://github.com/IBM/sarama/issues/885)). + - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned + by the broker, which can be meaningful + ([#930](https://github.com/IBM/sarama/pull/930)). + - Use a `Ticker` to reduce consumer timer overhead at the cost of higher + variance in the actual timeout + ([#933](https://github.com/IBM/sarama/pull/933)). + +Bug Fixes: + - Gracefully handle messages with negative timestamps + ([#907](https://github.com/IBM/sarama/pull/907)). + - Raise a proper error when encountering an unknown message version + ([#940](https://github.com/IBM/sarama/pull/940)). + +## Version 1.12.0 (2017-05-08) + +New Features: + - Added support for the `ApiVersions` request and response pair, and Kafka + version 0.10.2 ([#867](https://github.com/IBM/sarama/pull/867)). Note + that you still need to specify the Kafka version in the Sarama configuration + for the time being. + - Added a `Brokers` method to the Client which returns the complete set of + active brokers ([#813](https://github.com/IBM/sarama/pull/813)). + - Added an `InSyncReplicas` method to the Client which returns the set of all + in-sync broker IDs for the given partition, now that the Kafka versions for + which this was misleading are no longer in our supported set + ([#872](https://github.com/IBM/sarama/pull/872)). + - Added a `NewCustomHashPartitioner` method which allows constructing a hash + partitioner with a custom hash method in case the default (FNV-1a) is not + suitable + ([#837](https://github.com/IBM/sarama/pull/837), + [#841](https://github.com/IBM/sarama/pull/841)). + +Improvements: + - Recognize more Kafka error codes + ([#859](https://github.com/IBM/sarama/pull/859)). + +Bug Fixes: + - Fix an issue where decoding a malformed FetchRequest would not return the + correct error ([#818](https://github.com/IBM/sarama/pull/818)). + - Respect ordering of group protocols in JoinGroupRequests. This fix is + transparent if you're using the `AddGroupProtocol` or + `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from + the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` + ([#812](https://github.com/IBM/sarama/issues/812)). + - Fix an alignment-related issue with atomics on 32-bit architectures + ([#859](https://github.com/IBM/sarama/pull/859)). + +## Version 1.11.0 (2016-12-20) + +_Important:_ As of Sarama 1.11 it is necessary to set the config value of +`Producer.Return.Successes` to true in order to use the SyncProducer. Previous +versions would silently override this value when instantiating a SyncProducer +which led to unexpected values and data races. + +New Features: + - Metrics! Thanks to Sébastien Launay for all his work on this feature + ([#701](https://github.com/IBM/sarama/pull/701), + [#746](https://github.com/IBM/sarama/pull/746), + [#766](https://github.com/IBM/sarama/pull/766)). + - Add support for LZ4 compression + ([#786](https://github.com/IBM/sarama/pull/786)). + - Add support for ListOffsetRequest v1 and Kafka 0.10.1 + ([#775](https://github.com/IBM/sarama/pull/775)). + - Added a `HighWaterMarks` method to the Consumer which aggregates the + `HighWaterMarkOffset` values of its child topic/partitions + ([#769](https://github.com/IBM/sarama/pull/769)). + +Bug Fixes: + - Fixed producing when using timestamps, compression and Kafka 0.10 + ([#759](https://github.com/IBM/sarama/pull/759)). + - Added missing decoder methods to DescribeGroups response + ([#756](https://github.com/IBM/sarama/pull/756)). + - Fix producer shutdown when `Return.Errors` is disabled + ([#787](https://github.com/IBM/sarama/pull/787)). + - Don't mutate configuration in SyncProducer + ([#790](https://github.com/IBM/sarama/pull/790)). + - Fix crash on SASL initialization failure + ([#795](https://github.com/IBM/sarama/pull/795)). + +## Version 1.10.1 (2016-08-30) + +Bug Fixes: + - Fix the documentation for `HashPartitioner` which was incorrect + ([#717](https://github.com/IBM/sarama/pull/717)). + - Permit client creation even when it is limited by ACLs + ([#722](https://github.com/IBM/sarama/pull/722)). + - Several fixes to the consumer timer optimization code, regressions introduced + in v1.10.0. Go's timers are finicky + ([#730](https://github.com/IBM/sarama/pull/730), + [#733](https://github.com/IBM/sarama/pull/733), + [#734](https://github.com/IBM/sarama/pull/734)). + - Handle consuming compressed relative offsets with Kafka 0.10 + ([#735](https://github.com/IBM/sarama/pull/735)). + +## Version 1.10.0 (2016-08-02) + +_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of +Kafka you are running against (via the `config.Version` value) in order to use +features that may not be compatible with old Kafka versions. If you don't +specify this value it will default to 0.8.2 (the minimum supported), and trying +to use more recent features (like the offset manager) will fail with an error. + +_Also:_ The offset-manager's behaviour has been changed to match the upstream +java consumer (see [#705](https://github.com/IBM/sarama/pull/705) and +[#713](https://github.com/IBM/sarama/pull/713)). If you use the +offset-manager, please ensure that you are committing one *greater* than the +last consumed message offset or else you may end up consuming duplicate +messages. + +New Features: + - Support for Kafka 0.10 + ([#672](https://github.com/IBM/sarama/pull/672), + [#678](https://github.com/IBM/sarama/pull/678), + [#681](https://github.com/IBM/sarama/pull/681), and others). + - Support for configuring the target Kafka version + ([#676](https://github.com/IBM/sarama/pull/676)). + - Batch producing support in the SyncProducer + ([#677](https://github.com/IBM/sarama/pull/677)). + - Extend producer mock to allow setting expectations on message contents + ([#667](https://github.com/IBM/sarama/pull/667)). + +Improvements: + - Support `nil` compressed messages for deleting in compacted topics + ([#634](https://github.com/IBM/sarama/pull/634)). + - Pre-allocate decoding errors, greatly reducing heap usage and GC time against + misbehaving brokers ([#690](https://github.com/IBM/sarama/pull/690)). + - Re-use consumer expiry timers, removing one allocation per consumed message + ([#707](https://github.com/IBM/sarama/pull/707)). + +Bug Fixes: + - Actually default the client ID to "sarama" like we say we do + ([#664](https://github.com/IBM/sarama/pull/664)). + - Fix a rare issue where `Client.Leader` could return the wrong error + ([#685](https://github.com/IBM/sarama/pull/685)). + - Fix a possible tight loop in the consumer + ([#693](https://github.com/IBM/sarama/pull/693)). + - Match upstream's offset-tracking behaviour + ([#705](https://github.com/IBM/sarama/pull/705)). + - Report UnknownTopicOrPartition errors from the offset manager + ([#706](https://github.com/IBM/sarama/pull/706)). + - Fix possible negative partition value from the HashPartitioner + ([#709](https://github.com/IBM/sarama/pull/709)). + +## Version 1.9.0 (2016-05-16) + +New Features: + - Add support for custom offset manager retention durations + ([#602](https://github.com/IBM/sarama/pull/602)). + - Publish low-level mocks to enable testing of third-party producer/consumer + implementations ([#570](https://github.com/IBM/sarama/pull/570)). + - Declare support for Golang 1.6 + ([#611](https://github.com/IBM/sarama/pull/611)). + - Support for SASL plain-text auth + ([#648](https://github.com/IBM/sarama/pull/648)). + +Improvements: + - Simplified broker locking scheme slightly + ([#604](https://github.com/IBM/sarama/pull/604)). + - Documentation cleanup + ([#605](https://github.com/IBM/sarama/pull/605), + [#621](https://github.com/IBM/sarama/pull/621), + [#654](https://github.com/IBM/sarama/pull/654)). + +Bug Fixes: + - Fix race condition shutting down the OffsetManager + ([#658](https://github.com/IBM/sarama/pull/658)). + +## Version 1.8.0 (2016-02-01) + +New Features: + - Full support for Kafka 0.9: + - All protocol messages and fields + ([#586](https://github.com/IBM/sarama/pull/586), + [#588](https://github.com/IBM/sarama/pull/588), + [#590](https://github.com/IBM/sarama/pull/590)). + - Verified that TLS support works + ([#581](https://github.com/IBM/sarama/pull/581)). + - Fixed the OffsetManager compatibility + ([#585](https://github.com/IBM/sarama/pull/585)). + +Improvements: + - Optimize for fewer system calls when reading from the network + ([#584](https://github.com/IBM/sarama/pull/584)). + - Automatically retry `InvalidMessage` errors to match upstream behaviour + ([#589](https://github.com/IBM/sarama/pull/589)). + +## Version 1.7.0 (2015-12-11) + +New Features: + - Preliminary support for Kafka 0.9 + ([#572](https://github.com/IBM/sarama/pull/572)). This comes with several + caveats: + - Protocol-layer support is mostly in place + ([#577](https://github.com/IBM/sarama/pull/577)), however Kafka 0.9 + renamed some messages and fields, which we did not in order to preserve API + compatibility. + - The producer and consumer work against 0.9, but the offset manager does + not ([#573](https://github.com/IBM/sarama/pull/573)). + - TLS support may or may not work + ([#581](https://github.com/IBM/sarama/pull/581)). + +Improvements: + - Don't wait for request timeouts on dead brokers, greatly speeding recovery + when the TCP connection is left hanging + ([#548](https://github.com/IBM/sarama/pull/548)). + - Refactored part of the producer. The new version provides a much more elegant + solution to [#449](https://github.com/IBM/sarama/pull/449). It is also + slightly more efficient, and much more precise in calculating batch sizes + when compression is used + ([#549](https://github.com/IBM/sarama/pull/549), + [#550](https://github.com/IBM/sarama/pull/550), + [#551](https://github.com/IBM/sarama/pull/551)). + +Bug Fixes: + - Fix race condition in consumer test mock + ([#553](https://github.com/IBM/sarama/pull/553)). + +## Version 1.6.1 (2015-09-25) + +Bug Fixes: + - Fix panic that could occur if a user-supplied message value failed to encode + ([#449](https://github.com/IBM/sarama/pull/449)). + +## Version 1.6.0 (2015-09-04) + +New Features: + - Implementation of a consumer offset manager using the APIs introduced in + Kafka 0.8.2. The API is designed mainly for integration into a future + high-level consumer, not for direct use, although it is *possible* to use it + directly. + ([#461](https://github.com/IBM/sarama/pull/461)). + +Improvements: + - CRC32 calculation is much faster on machines with SSE4.2 instructions, + removing a major hotspot from most profiles + ([#255](https://github.com/IBM/sarama/pull/255)). + +Bug Fixes: + - Make protocol decoding more robust against some malformed packets generated + by go-fuzz ([#523](https://github.com/IBM/sarama/pull/523), + [#525](https://github.com/IBM/sarama/pull/525)) or found in other ways + ([#528](https://github.com/IBM/sarama/pull/528)). + - Fix a potential race condition panic in the consumer on shutdown + ([#529](https://github.com/IBM/sarama/pull/529)). + +## Version 1.5.0 (2015-08-17) + +New Features: + - TLS-encrypted network connections are now supported. This feature is subject + to change when Kafka releases built-in TLS support, but for now this is + enough to work with TLS-terminating proxies + ([#154](https://github.com/IBM/sarama/pull/154)). + +Improvements: + - The consumer will not block if a single partition is not drained by the user; + all other partitions will continue to consume normally + ([#485](https://github.com/IBM/sarama/pull/485)). + - Formatting of error strings has been much improved + ([#495](https://github.com/IBM/sarama/pull/495)). + - Internal refactoring of the producer for code cleanliness and to enable + future work ([#300](https://github.com/IBM/sarama/pull/300)). + +Bug Fixes: + - Fix a potential deadlock in the consumer on shutdown + ([#475](https://github.com/IBM/sarama/pull/475)). + +## Version 1.4.3 (2015-07-21) + +Bug Fixes: + - Don't include the partitioner in the producer's "fetch partitions" + circuit-breaker ([#466](https://github.com/IBM/sarama/pull/466)). + - Don't retry messages until the broker is closed when abandoning a broker in + the producer ([#468](https://github.com/IBM/sarama/pull/468)). + - Update the import path for snappy-go, it has moved again and the API has + changed slightly ([#486](https://github.com/IBM/sarama/pull/486)). + +## Version 1.4.2 (2015-05-27) + +Bug Fixes: + - Update the import path for snappy-go, it has moved from google code to github + ([#456](https://github.com/IBM/sarama/pull/456)). + +## Version 1.4.1 (2015-05-25) + +Improvements: + - Optimizations when decoding snappy messages, thanks to John Potocny + ([#446](https://github.com/IBM/sarama/pull/446)). + +Bug Fixes: + - Fix hypothetical race conditions on producer shutdown + ([#450](https://github.com/IBM/sarama/pull/450), + [#451](https://github.com/IBM/sarama/pull/451)). + +## Version 1.4.0 (2015-05-01) + +New Features: + - The consumer now implements `Topics()` and `Partitions()` methods to enable + users to dynamically choose what topics/partitions to consume without + instantiating a full client + ([#431](https://github.com/IBM/sarama/pull/431)). + - The partition-consumer now exposes the high water mark offset value returned + by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/IBM/sarama/pull/339)). + - Added a `kafka-console-consumer` tool capable of handling multiple + partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` + ([#439](https://github.com/IBM/sarama/pull/439), + [#442](https://github.com/IBM/sarama/pull/442)). + +Improvements: + - The producer's logging during retry scenarios is more consistent, more + useful, and slightly less verbose + ([#429](https://github.com/IBM/sarama/pull/429)). + - The client now shuffles its initial list of seed brokers in order to prevent + thundering herd on the first broker in the list + ([#441](https://github.com/IBM/sarama/pull/441)). + +Bug Fixes: + - The producer now correctly manages its state if retries occur when it is + shutting down, fixing several instances of confusing behaviour and at least + one potential deadlock ([#419](https://github.com/IBM/sarama/pull/419)). + - The consumer now handles messages for different partitions asynchronously, + making it much more resilient to specific user code ordering + ([#325](https://github.com/IBM/sarama/pull/325)). + +## Version 1.3.0 (2015-04-16) + +New Features: + - The client now tracks consumer group coordinators using + ConsumerMetadataRequests similar to how it tracks partition leadership using + regular MetadataRequests ([#411](https://github.com/IBM/sarama/pull/411)). + This adds two methods to the client API: + - `Coordinator(consumerGroup string) (*Broker, error)` + - `RefreshCoordinator(consumerGroup string) error` + +Improvements: + - ConsumerMetadataResponses now automatically create a Broker object out of the + ID/address/port combination for the Coordinator; accessing the fields + individually has been deprecated + ([#413](https://github.com/IBM/sarama/pull/413)). + - Much improved handling of `OffsetOutOfRange` errors in the consumer. + Consumers will fail to start if the provided offset is out of range + ([#418](https://github.com/IBM/sarama/pull/418)) + and they will automatically shut down if the offset falls out of range + ([#424](https://github.com/IBM/sarama/pull/424)). + - Small performance improvement in encoding and decoding protocol messages + ([#427](https://github.com/IBM/sarama/pull/427)). + +Bug Fixes: + - Fix a rare race condition in the client's background metadata refresher if + it happens to be activated while the client is being closed + ([#422](https://github.com/IBM/sarama/pull/422)). + +## Version 1.2.0 (2015-04-07) + +Improvements: + - The producer's behaviour when `Flush.Frequency` is set is now more intuitive + ([#389](https://github.com/IBM/sarama/pull/389)). + - The producer is now somewhat more memory-efficient during and after retrying + messages due to an improved queue implementation + ([#396](https://github.com/IBM/sarama/pull/396)). + - The consumer produces much more useful logging output when leadership + changes ([#385](https://github.com/IBM/sarama/pull/385)). + - The client's `GetOffset` method will now automatically refresh metadata and + retry once in the event of stale information or similar + ([#394](https://github.com/IBM/sarama/pull/394)). + - Broker connections now have support for using TCP keepalives + ([#407](https://github.com/IBM/sarama/issues/407)). + +Bug Fixes: + - The OffsetCommitRequest message now correctly implements all three possible + API versions ([#390](https://github.com/IBM/sarama/pull/390), + [#400](https://github.com/IBM/sarama/pull/400)). + +## Version 1.1.0 (2015-03-20) + +Improvements: + - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly + broken topics don't choke throughput + ([#373](https://github.com/IBM/sarama/pull/373)). + +Bug Fixes: + - Fix the producer's internal reference counting in certain unusual scenarios + ([#367](https://github.com/IBM/sarama/pull/367)). + - Fix the consumer's internal reference counting in certain unusual scenarios + ([#369](https://github.com/IBM/sarama/pull/369)). + - Fix a condition where the producer's internal control messages could have + gotten stuck ([#368](https://github.com/IBM/sarama/pull/368)). + - Fix an issue where invalid partition lists would be cached when asking for + metadata for a non-existant topic ([#372](https://github.com/IBM/sarama/pull/372)). + + +## Version 1.0.0 (2015-03-17) + +Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: + +- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. +- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. +- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/IBM/sarama/mocks` package. +- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. +- All the configuration values have been unified in the `Config` struct. +- Much improved test suite. diff --git a/vendor/github.com/IBM/sarama/CONTRIBUTING.md b/vendor/github.com/IBM/sarama/CONTRIBUTING.md new file mode 100644 index 000000000..173b2a384 --- /dev/null +++ b/vendor/github.com/IBM/sarama/CONTRIBUTING.md @@ -0,0 +1,46 @@ +## Contributing + +[fork]: https://github.com/IBM/sarama/fork +[pr]: https://github.com/IBM/sarama/compare +[released]: https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license + +Hi there! We are thrilled that you would like to contribute to Sarama. +Your help is essential for keeping it great. + +Contributions to this project are [released][released] to the public under the project's [opensource license](LICENSE.md). +By contributing to this project you agree to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO). +The DCO was created by the Linux Kernel community and is a simple statement that you, as a contributor, wrote or otherwise have the legal right to contribute those changes. + +Contributors must _sign-off_ that they adhere to these requirements by adding a `Signed-off-by` line to all commit messages with an email address that matches the commit author: + +``` +feat: this is my commit message + +Signed-off-by: Random J Developer +``` + +Git even has a `-s` command line option to append this automatically to your commit message: + +``` +$ git commit -s -m 'This is my commit message' +``` + +## Submitting a pull request + +0. [Fork][fork] and clone the repository +1. Create a new branch: `git checkout -b my-branch-name` +2. Make your change, push to your fork and [submit a pull request][pr] +3. Wait for your pull request to be reviewed and merged. + +Here are a few things you can do that will increase the likelihood of your pull request being accepted: + +- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests. +- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). + +## Further Reading + +- [Developer Certificate of Origin versus Contributor License Agreements](https://julien.ponge.org/blog/developer-certificate-of-origin-versus-contributor-license-agreements/) +- [The most powerful contributor agreement](https://lwn.net/Articles/592503/) +- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/) +- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/) +- [GitHub Help](https://help.github.com) diff --git a/vendor/github.com/Shopify/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka similarity index 82% rename from vendor/github.com/Shopify/sarama/Dockerfile.kafka rename to vendor/github.com/IBM/sarama/Dockerfile.kafka index 48a9c178a..90fdb1669 100644 --- a/vendor/github.com/Shopify/sarama/Dockerfile.kafka +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -3,7 +3,8 @@ FROM registry.access.redhat.com/ubi8/ubi-minimal:latest USER root RUN microdnf update \ - && microdnf install curl gzip java-11-openjdk-headless tar \ + && microdnf install curl gzip java-11-openjdk-headless tar tzdata-java \ + && microdnf reinstall tzdata \ && microdnf clean all ENV JAVA_HOME=/usr/lib/jvm/jre-11 @@ -20,7 +21,7 @@ ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" RUN mkdir -p "/opt/kafka-2.8.2" && chmod a+rw /opt/kafka-2.8.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-2.8.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-2.8.2" RUN mkdir -p "/opt/kafka-3.1.2" && chmod a+rw /opt/kafka-3.1.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.1.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.1.2" RUN mkdir -p "/opt/kafka-3.2.3" && chmod a+rw /opt/kafka-3.2.3 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.2.3.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.2.3" -RUN mkdir -p "/opt/kafka-3.3.1" && chmod a+rw /opt/kafka-3.3.1 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.3.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.3.1" +RUN mkdir -p "/opt/kafka-3.3.2" && chmod a+rw /opt/kafka-3.3.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.3.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.3.2" COPY entrypoint.sh / diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/IBM/sarama/LICENSE.md similarity index 95% rename from vendor/github.com/Shopify/sarama/LICENSE rename to vendor/github.com/IBM/sarama/LICENSE.md index d2bf4352f..f2c7f0c5b 100644 --- a/vendor/github.com/Shopify/sarama/LICENSE +++ b/vendor/github.com/IBM/sarama/LICENSE.md @@ -1,4 +1,7 @@ +# MIT License + Copyright (c) 2013 Shopify +Copyright (c) 2023 IBM Corporation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/IBM/sarama/Makefile similarity index 100% rename from vendor/github.com/Shopify/sarama/Makefile rename to vendor/github.com/IBM/sarama/Makefile diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/IBM/sarama/README.md similarity index 72% rename from vendor/github.com/Shopify/sarama/README.md rename to vendor/github.com/IBM/sarama/README.md index 0ee6e6a7f..a1f6137e5 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/IBM/sarama/README.md @@ -1,18 +1,17 @@ # sarama -[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama) -[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/main/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) +[![Go Reference](https://pkg.go.dev/badge/github.com/IBM/sarama.svg)](https://pkg.go.dev/github.com/IBM/sarama) Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/). ## Getting started -- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama). +- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/IBM/sarama). - Mocks for testing are available in the [mocks](./mocks) subpackage. - The [examples](./examples) directory contains more elaborate example applications. - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. -You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). +You might also want to look at the [Frequently Asked Questions](https://github.com/IBM/sarama/wiki/Frequently-Asked-Questions). ## Compatibility and API stability @@ -21,13 +20,13 @@ the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. However, older releases of Kafka are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. -You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. +You can import a version with a guaranteed stable API via http://gopkg.in/IBM/sarama.v1. A changelog is available [here](CHANGELOG.md). ## Contributing -- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/main/.github/CONTRIBUTING.md). -- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. +- Get started by checking our [contribution guidelines](https://github.com/IBM/sarama/blob/main/.github/CONTRIBUTING.md). +- Read the [Sarama wiki](https://github.com/IBM/sarama/wiki) for more technical and design details. - The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. - For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. - If you have any questions, just ask! diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/IBM/sarama/Vagrantfile similarity index 100% rename from vendor/github.com/Shopify/sarama/Vagrantfile rename to vendor/github.com/IBM/sarama/Vagrantfile diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/IBM/sarama/acl_bindings.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_bindings.go rename to vendor/github.com/IBM/sarama/acl_bindings.go diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/IBM/sarama/acl_create_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_create_request.go rename to vendor/github.com/IBM/sarama/acl_create_request.go diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/IBM/sarama/acl_create_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_create_response.go rename to vendor/github.com/IBM/sarama/acl_create_response.go diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/IBM/sarama/acl_delete_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_delete_request.go rename to vendor/github.com/IBM/sarama/acl_delete_request.go diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/IBM/sarama/acl_delete_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_delete_response.go rename to vendor/github.com/IBM/sarama/acl_delete_response.go diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/IBM/sarama/acl_describe_request.go similarity index 93% rename from vendor/github.com/Shopify/sarama/acl_describe_request.go rename to vendor/github.com/IBM/sarama/acl_describe_request.go index e0fe9023a..98edb6740 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_request.go +++ b/vendor/github.com/IBM/sarama/acl_describe_request.go @@ -1,6 +1,6 @@ package sarama -// DescribeAclsRequest is a secribe acl request type +// DescribeAclsRequest is a describe acl request type type DescribeAclsRequest struct { Version int AclFilter diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/IBM/sarama/acl_describe_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_describe_response.go rename to vendor/github.com/IBM/sarama/acl_describe_response.go diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/IBM/sarama/acl_filter.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_filter.go rename to vendor/github.com/IBM/sarama/acl_filter.go diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/IBM/sarama/acl_types.go similarity index 93% rename from vendor/github.com/Shopify/sarama/acl_types.go rename to vendor/github.com/IBM/sarama/acl_types.go index c3ba8ddcf..62bb5342a 100644 --- a/vendor/github.com/Shopify/sarama/acl_types.go +++ b/vendor/github.com/IBM/sarama/acl_types.go @@ -60,7 +60,7 @@ func (a *AclOperation) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation +// UnmarshalText takes a text representation of the operation and converts it to an AclOperation func (a *AclOperation) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclOperation{ @@ -114,7 +114,7 @@ func (a *AclPermissionType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType +// UnmarshalText takes a text representation of the permission type and converts it to an AclPermissionType func (a *AclPermissionType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclPermissionType{ @@ -166,7 +166,7 @@ func (a *AclResourceType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType +// UnmarshalText takes a text representation of the resource type and converts it to an AclResourceType func (a *AclResourceType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourceType{ @@ -217,7 +217,7 @@ func (a *AclResourcePatternType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType +// UnmarshalText takes a text representation of the resource pattern type and converts it to an AclResourcePatternType func (a *AclResourcePatternType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourcePatternType{ diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go rename to vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go rename to vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go similarity index 96% rename from vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go rename to vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go index 57ecf6488..1d6da75f5 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go +++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go @@ -1,6 +1,6 @@ package sarama -// AddPartitionsToTxnRequest is a add paartition request +// AddPartitionsToTxnRequest is a add partition request type AddPartitionsToTxnRequest struct { TransactionalID string ProducerID int64 diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go rename to vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/IBM/sarama/admin.go similarity index 96% rename from vendor/github.com/Shopify/sarama/admin.go rename to vendor/github.com/IBM/sarama/admin.go index a334daff5..29eeca1c6 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/IBM/sarama/admin.go @@ -207,19 +207,17 @@ func isErrNoController(err error) bool { // provided retryable func) up to the maximum number of tries permitted by // the admin client configuration func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error { - var err error - for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ { - err = fn() - if err == nil || !retryable(err) { + for attemptsRemaining := ca.conf.Admin.Retry.Max + 1; ; { + err := fn() + attemptsRemaining-- + if err == nil || attemptsRemaining <= 0 || !retryable(err) { return err } Logger.Printf( "admin/request retrying after %dms... (%d attempts remaining)\n", - ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt) + ca.conf.Admin.Retry.Backoff/time.Millisecond, attemptsRemaining) time.Sleep(ca.conf.Admin.Retry.Backoff) - continue } - return err } func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { @@ -275,13 +273,19 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO } func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { - controller, err := ca.Controller() - if err != nil { - return nil, err - } - - request := NewMetadataRequest(ca.conf.Version, topics) - response, err := controller.GetMetadata(request) + var response *MetadataResponse + err = ca.retryOnError(isErrNoController, func() error { + controller, err := ca.Controller() + if err != nil { + return err + } + request := NewMetadataRequest(ca.conf.Version, topics) + response, err = controller.GetMetadata(request) + if isErrNoController(err) { + _, _ = ca.refreshController() + } + return err + }) if err != nil { return nil, err } @@ -289,13 +293,20 @@ func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetada } func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { - controller, err := ca.Controller() - if err != nil { - return nil, int32(0), err - } + var response *MetadataResponse + err = ca.retryOnError(isErrNoController, func() error { + controller, err := ca.Controller() + if err != nil { + return err + } - request := NewMetadataRequest(ca.conf.Version, nil) - response, err := controller.GetMetadata(request) + request := NewMetadataRequest(ca.conf.Version, nil) + response, err = controller.GetMetadata(request) + if isErrNoController(err) { + _, _ = ca.refreshController() + } + return err + }) if err != nil { return nil, int32(0), err } @@ -545,13 +556,20 @@ func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []in request.AddBlock(topic, partitions) - b, err := ca.Controller() - if err != nil { - return nil, err - } - _ = b.Open(ca.client.Config()) + var rsp *ListPartitionReassignmentsResponse + err = ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + _ = b.Open(ca.client.Config()) - rsp, err := b.ListPartitionReassignments(request) + rsp, err = b.ListPartitionReassignments(request) + if isErrNoController(err) { + _, _ = ca.refreshController() + } + return err + }) if err == nil && rsp != nil { return rsp.TopicStatus, nil @@ -891,7 +909,7 @@ func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*Group describeReq := &DescribeGroupsRequest{ Groups: brokerGroups, } - if ca.conf.Version.IsAtLeast(V2_3_0_0) { + if ca.conf.Version.IsAtLeast(V2_4_0_0) { describeReq.Version = 4 } response, err := broker.DescribeGroups(describeReq) diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_client_quotas_request.go rename to vendor/github.com/IBM/sarama/alter_client_quotas_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_client_quotas_response.go rename to vendor/github.com/IBM/sarama/alter_client_quotas_response.go diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/IBM/sarama/alter_configs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_configs_request.go rename to vendor/github.com/IBM/sarama/alter_configs_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/IBM/sarama/alter_configs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_configs_response.go rename to vendor/github.com/IBM/sarama/alter_configs_response.go diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go rename to vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go rename to vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go rename to vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go rename to vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/IBM/sarama/api_versions_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/api_versions_request.go rename to vendor/github.com/IBM/sarama/api_versions_request.go diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/IBM/sarama/api_versions_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/api_versions_response.go rename to vendor/github.com/IBM/sarama/api_versions_response.go diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go similarity index 99% rename from vendor/github.com/Shopify/sarama/async_producer.go rename to vendor/github.com/IBM/sarama/async_producer.go index 50f226f8e..dfd891237 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/IBM/sarama/async_producer.go @@ -50,7 +50,7 @@ type AsyncProducer interface { // errors to be returned. Errors() <-chan *ProducerError - // IsTransactional return true when current producer is is transactional. + // IsTransactional return true when current producer is transactional. IsTransactional() bool // TxnStatus return current producer transaction status. diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go similarity index 95% rename from vendor/github.com/Shopify/sarama/balance_strategy.go rename to vendor/github.com/IBM/sarama/balance_strategy.go index 4594df6f6..8635bdf7d 100644 --- a/vendor/github.com/Shopify/sarama/balance_strategy.go +++ b/vendor/github.com/IBM/sarama/balance_strategy.go @@ -57,7 +57,8 @@ type BalanceStrategy interface { // -------------------------------------------------------------------- -// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members. +// NewBalanceStrategyRange returns a range balance strategy, +// which is the default and assigns partitions as ranges to consumer group members. // This follows the same logic as // https://kafka.apache.org/31/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html // @@ -65,27 +66,33 @@ type BalanceStrategy interface { // // M1: {T1: [0, 1, 2], T2: [0, 1, 2]} // M2: {T2: [3, 4, 5], T2: [3, 4, 5]} -var BalanceStrategyRange = &balanceStrategy{ - name: RangeBalanceStrategyName, - coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { - partitionsPerConsumer := len(partitions) / len(memberIDs) - consumersWithExtraPartition := len(partitions) % len(memberIDs) - - sort.Strings(memberIDs) - - for i, memberID := range memberIDs { - min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) - extra := 0 - if i < consumersWithExtraPartition { - extra = 1 +func NewBalanceStrategyRange() BalanceStrategy { + return &balanceStrategy{ + name: RangeBalanceStrategyName, + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + partitionsPerConsumer := len(partitions) / len(memberIDs) + consumersWithExtraPartition := len(partitions) % len(memberIDs) + + sort.Strings(memberIDs) + + for i, memberID := range memberIDs { + min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) + extra := 0 + if i < consumersWithExtraPartition { + extra = 1 + } + max := min + partitionsPerConsumer + extra + plan.Add(memberID, topic, partitions[min:max]...) } - max := min + partitionsPerConsumer + extra - plan.Add(memberID, topic, partitions[min:max]...) - } - }, + }, + } } -// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments +// Deprecated: use NewBalanceStrategyRange to avoid data race issue +var BalanceStrategyRange = NewBalanceStrategyRange() + +// NewBalanceStrategySticky returns a sticky balance strategy, +// which assigns partitions to members with an attempt to preserve earlier assignments // while maintain a balanced partition distribution. // Example with topic T with six partitions (0..5) and two members (M1, M2): // @@ -97,13 +104,18 @@ var BalanceStrategyRange = &balanceStrategy{ // M1: {T: [0, 2]} // M2: {T: [1, 3]} // M3: {T: [4, 5]} -var BalanceStrategySticky = &stickyBalanceStrategy{} +func NewBalanceStrategySticky() BalanceStrategy { + return &stickyBalanceStrategy{} +} + +// Deprecated: use NewBalanceStrategySticky to avoid data race issue +var BalanceStrategySticky = NewBalanceStrategySticky() // -------------------------------------------------------------------- type balanceStrategy struct { - name string coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) + name string } // Name implements BalanceStrategy. @@ -171,10 +183,7 @@ func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetad } // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state - isFreshAssignment := false - if len(currentAssignment) == 0 { - isFreshAssignment = true - } + isFreshAssignment := len(currentAssignment) == 0 // create a mapping of all current topic partitions and the consumers that can be assigned to them partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string) @@ -281,10 +290,7 @@ func strsContains(s []string, value string) bool { // Balance assignments across consumers for maximum fairness and stickiness. func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) { - initializing := false - if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 { - initializing = true - } + initializing := len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 // assign all unassigned partitions for _, partition := range unassignedPartitions { @@ -337,11 +343,17 @@ func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPart } } -// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// NewBalanceStrategyRoundRobin returns a round-robin balance strategy, +// which assigns partitions to members in alternating order. // For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2): // M0: [t0p0, t0p2, t1p1] // M1: [t0p1, t1p0, t1p2] -var BalanceStrategyRoundRobin = new(roundRobinBalancer) +func NewBalanceStrategyRoundRobin() BalanceStrategy { + return new(roundRobinBalancer) +} + +// Deprecated: use NewBalanceStrategyRoundRobin to avoid data race issue +var BalanceStrategyRoundRobin = NewBalanceStrategyRoundRobin() type roundRobinBalancer struct{} @@ -414,8 +426,8 @@ func (tp *topicAndPartition) comparedValue() string { } type memberAndTopic struct { - memberID string topics map[string]struct{} + memberID string } func (m *memberAndTopic) hasTopic(topic string) bool { @@ -681,11 +693,8 @@ func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, par } heap.Init(&pq) - for { - // loop until no consumer-group members remain - if pq.Len() == 0 { - break - } + // loop until no consumer-group members remain + for pq.Len() != 0 { member := pq[0] // partitions that were assigned to a different consumer last time @@ -1106,7 +1115,7 @@ type assignmentPriorityQueue []*consumerGroupMember func (pq assignmentPriorityQueue) Len() int { return len(pq) } func (pq assignmentPriorityQueue) Less(i, j int) bool { - // order asssignment priority queue in descending order using assignment-count/member-id + // order assignment priority queue in descending order using assignment-count/member-id if len(pq[i].assignments) == len(pq[j].assignments) { return strings.Compare(pq[i].id, pq[j].id) > 0 } diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go similarity index 99% rename from vendor/github.com/Shopify/sarama/broker.go rename to vendor/github.com/IBM/sarama/broker.go index d049e9b47..7ed987fe3 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/IBM/sarama/broker.go @@ -175,7 +175,9 @@ func (b *Broker) Open(conf *Config) error { b.lock.Lock() - b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + if b.metricRegistry == nil { + b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + } go withRecover(func() { defer func() { @@ -453,7 +455,7 @@ func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error return } - // Wellformed response + // Well-formed response b.updateThrottleMetric(res.ThrottleTime) cb(res, nil) }, diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/IBM/sarama/client.go similarity index 98% rename from vendor/github.com/Shopify/sarama/client.go rename to vendor/github.com/IBM/sarama/client.go index f7872a1b3..d9fb77d64 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/IBM/sarama/client.go @@ -50,7 +50,7 @@ type Client interface { // topic/partition, as determined by querying the cluster metadata. Leader(topic string, partitionID int32) (*Broker, error) - // LeaderAndEpoch returns the the leader and its epoch for the current + // LeaderAndEpoch returns the leader and its epoch for the current // topic/partition, as determined by querying the cluster metadata. LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) @@ -132,10 +132,10 @@ const ( ) type client struct { - // updateMetaDataMs stores the time at which metadata was lasted updated. + // updateMetadataMs stores the time at which metadata was lasted updated. // Note: this accessed atomically so must be the first word in the struct // as per golang/go#41970 - updateMetaDataMs int64 + updateMetadataMs int64 conf *Config closer, closed chan none // for shutting down background metadata updater @@ -513,7 +513,7 @@ func (client *client) RefreshMetadata(topics ...string) error { // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper // error. This handles the case by returning an error instead of sending it - // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 + // off to Kafka. See: https://github.com/IBM/sarama/pull/38#issuecomment-26362310 for _, topic := range topics { if topic == "" { return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return @@ -975,13 +975,14 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, time.Sleep(backoff) } - t := atomic.LoadInt64(&client.updateMetaDataMs) - if time.Since(time.Unix(t/1e3, 0)) < backoff { + t := atomic.LoadInt64(&client.updateMetadataMs) + if time.Since(time.UnixMilli(t)) < backoff { return err } + attemptsRemaining-- Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) - return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline) + return client.tryRefreshMetadata(topics, attemptsRemaining, deadline) } return err } @@ -999,10 +1000,7 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, req := NewMetadataRequest(client.conf.Version, topics) req.AllowAutoTopicCreation = allowAutoTopicCreation - t := atomic.LoadInt64(&client.updateMetaDataMs) - if !atomic.CompareAndSwapInt64(&client.updateMetaDataMs, t, time.Now().UnixNano()/int64(time.Millisecond)) { - return nil - } + atomic.StoreInt64(&client.updateMetadataMs, time.Now().UnixMilli()) response, err := broker.GetMetadata(req) var kerror KError @@ -1160,9 +1158,10 @@ func (client *client) findCoordinator(coordinatorKey string, coordinatorType Coo retry := func(err error) (*FindCoordinatorResponse, error) { if attemptsRemaining > 0 { backoff := client.computeBackoff(attemptsRemaining) + attemptsRemaining-- Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) time.Sleep(backoff) - return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining-1) + return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining) } return nil, err } diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/IBM/sarama/compress.go similarity index 100% rename from vendor/github.com/Shopify/sarama/compress.go rename to vendor/github.com/IBM/sarama/compress.go diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/IBM/sarama/config.go similarity index 98% rename from vendor/github.com/Shopify/sarama/config.go rename to vendor/github.com/IBM/sarama/config.go index b07034434..eb27d98ac 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/IBM/sarama/config.go @@ -294,7 +294,7 @@ type Config struct { Interval time.Duration } Rebalance struct { - // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + // Strategy for allocating topic partitions to members. // Deprecated: Strategy exists for historical compatibility // and should not be used. Please use GroupStrategies. Strategy BalanceStrategy @@ -302,7 +302,7 @@ type Config struct { // GroupStrategies is the priority-ordered list of client-side consumer group // balancing strategies that will be offered to the coordinator. The first // strategy that all group members support will be chosen by the leader. - // default: [BalanceStrategyRange] + // default: [ NewBalanceStrategyRange() ] GroupStrategies []BalanceStrategy // The maximum allowed time for each worker to join the group once a rebalance has begun. @@ -539,7 +539,7 @@ func NewConfig() *Config { c.Consumer.Group.Session.Timeout = 10 * time.Second c.Consumer.Group.Heartbeat.Interval = 3 * time.Second - c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{BalanceStrategyRange} + c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{NewBalanceStrategyRange()} c.Consumer.Group.Rebalance.Timeout = 60 * time.Second c.Consumer.Group.Rebalance.Retry.Max = 4 c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second @@ -650,19 +650,26 @@ func (c *Config) Validate() error { return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used") } - if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH { + switch c.Net.SASL.GSSAPI.AuthType { + case KRB5_USER_AUTH: if c.Net.SASL.GSSAPI.Password == "" { return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " + "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH") } - } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH { + case KRB5_KEYTAB_AUTH: if c.Net.SASL.GSSAPI.KeyTabPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" + - " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") + " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") } - } else { - return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH") + case KRB5_CCACHE_AUTH: + if c.Net.SASL.GSSAPI.CCachePath == "" { + return ConfigurationError("Net.SASL.GSSAPI.CCachePath must not be empty when GSS-API mechanism is used" + + " and Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH") + } + default: + return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH, KRB5_KEYTAB_AUTH, and KRB5_CCACHE_AUTH") } + if c.Net.SASL.GSSAPI.KerberosConfigPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used") } diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/IBM/sarama/config_resource_type.go similarity index 100% rename from vendor/github.com/Shopify/sarama/config_resource_type.go rename to vendor/github.com/IBM/sarama/config_resource_type.go diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/IBM/sarama/consumer.go similarity index 99% rename from vendor/github.com/Shopify/sarama/consumer.go rename to vendor/github.com/IBM/sarama/consumer.go index eb27df8d7..4d08b3dda 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/IBM/sarama/consumer.go @@ -85,13 +85,13 @@ type Consumer interface { // New calls to the broker will return records from these partitions if there are any to be fetched. Resume(topicPartitions map[string][]int32) - // Pause suspends fetching from all partitions. Future calls to the broker will not return any + // PauseAll suspends fetching from all partitions. Future calls to the broker will not return any // records from these partitions until they have been resumed using Resume()/ResumeAll(). // Note that this method does not affect partition subscription. // In particular, it does not cause a group rebalance when automatic assignment is used. PauseAll() - // Resume resumes all partitions which have been paused with Pause()/PauseAll(). + // ResumeAll resumes all partitions which have been paused with Pause()/PauseAll(). // New calls to the broker will return records from these partitions if there are any to be fetched. ResumeAll() } diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/IBM/sarama/consumer_group.go similarity index 99% rename from vendor/github.com/Shopify/sarama/consumer_group.go rename to vendor/github.com/IBM/sarama/consumer_group.go index ecdbcfa68..68f463976 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group.go +++ b/vendor/github.com/IBM/sarama/consumer_group.go @@ -252,7 +252,10 @@ func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, ha if refreshCoordinator { err := c.client.RefreshCoordinator(c.groupID) if err != nil { - return c.retryNewSession(ctx, topics, handler, retries, true) + if retries <= 0 { + return nil, err + } + return c.retryNewSession(ctx, topics, handler, retries-1, true) } } @@ -403,7 +406,7 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler claims = members.Topics // in the case of stateful balance strategies, hold on to the returned - // assignment metadata, otherwise, reset the statically defined conusmer + // assignment metadata, otherwise, reset the statically defined consumer // group metadata if members.UserData != nil { c.userData = members.UserData diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/IBM/sarama/consumer_group_members.go similarity index 100% rename from vendor/github.com/Shopify/sarama/consumer_group_members.go rename to vendor/github.com/IBM/sarama/consumer_group_members.go diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/IBM/sarama/consumer_metadata_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/consumer_metadata_request.go rename to vendor/github.com/IBM/sarama/consumer_metadata_request.go diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/IBM/sarama/consumer_metadata_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/consumer_metadata_response.go rename to vendor/github.com/IBM/sarama/consumer_metadata_response.go diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/IBM/sarama/control_record.go similarity index 100% rename from vendor/github.com/Shopify/sarama/control_record.go rename to vendor/github.com/IBM/sarama/control_record.go diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/IBM/sarama/crc32_field.go similarity index 100% rename from vendor/github.com/Shopify/sarama/crc32_field.go rename to vendor/github.com/IBM/sarama/crc32_field.go diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/IBM/sarama/create_partitions_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_partitions_request.go rename to vendor/github.com/IBM/sarama/create_partitions_request.go diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/IBM/sarama/create_partitions_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_partitions_response.go rename to vendor/github.com/IBM/sarama/create_partitions_response.go diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/IBM/sarama/create_topics_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_topics_request.go rename to vendor/github.com/IBM/sarama/create_topics_request.go diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/IBM/sarama/create_topics_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_topics_response.go rename to vendor/github.com/IBM/sarama/create_topics_response.go diff --git a/vendor/github.com/IBM/sarama/decompress.go b/vendor/github.com/IBM/sarama/decompress.go new file mode 100644 index 000000000..a01cefaa5 --- /dev/null +++ b/vendor/github.com/IBM/sarama/decompress.go @@ -0,0 +1,98 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4/v4" +) + +var ( + lz4ReaderPool = sync.Pool{ + New: func() interface{} { + return lz4.NewReader(nil) + }, + } + + gzipReaderPool sync.Pool + + bufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + bytesPool = sync.Pool{ + New: func() interface{} { + res := make([]byte, 0, 4096) + return &res + }, + } +) + +func decompress(cc CompressionCodec, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var err error + reader, ok := gzipReaderPool.Get().(*gzip.Reader) + if !ok { + reader, err = gzip.NewReader(bytes.NewReader(data)) + } else { + err = reader.Reset(bytes.NewReader(data)) + } + + if err != nil { + return nil, err + } + + buffer := bufferPool.Get().(*bytes.Buffer) + _, err = buffer.ReadFrom(reader) + // copy the buffer to a new slice with the correct length + // reuse gzipReader and buffer + gzipReaderPool.Put(reader) + res := make([]byte, buffer.Len()) + copy(res, buffer.Bytes()) + buffer.Reset() + bufferPool.Put(buffer) + + return res, err + case CompressionSnappy: + return snappy.Decode(data) + case CompressionLZ4: + reader, ok := lz4ReaderPool.Get().(*lz4.Reader) + if !ok { + reader = lz4.NewReader(bytes.NewReader(data)) + } else { + reader.Reset(bytes.NewReader(data)) + } + buffer := bufferPool.Get().(*bytes.Buffer) + _, err := buffer.ReadFrom(reader) + // copy the buffer to a new slice with the correct length + // reuse lz4Reader and buffer + lz4ReaderPool.Put(reader) + res := make([]byte, buffer.Len()) + copy(res, buffer.Bytes()) + buffer.Reset() + bufferPool.Put(buffer) + + return res, err + case CompressionZSTD: + buffer := *bytesPool.Get().(*[]byte) + var err error + buffer, err = zstdDecompress(ZstdDecoderParams{}, buffer, data) + // copy the buffer to a new slice with the correct length and reuse buffer + res := make([]byte, len(buffer)) + copy(res, buffer) + buffer = buffer[:0] + bytesPool.Put(&buffer) + + return res, err + default: + return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/IBM/sarama/delete_groups_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_groups_request.go rename to vendor/github.com/IBM/sarama/delete_groups_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/IBM/sarama/delete_groups_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_groups_response.go rename to vendor/github.com/IBM/sarama/delete_groups_response.go diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_request.go b/vendor/github.com/IBM/sarama/delete_offsets_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_offsets_request.go rename to vendor/github.com/IBM/sarama/delete_offsets_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_response.go b/vendor/github.com/IBM/sarama/delete_offsets_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_offsets_response.go rename to vendor/github.com/IBM/sarama/delete_offsets_response.go diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/IBM/sarama/delete_records_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_records_request.go rename to vendor/github.com/IBM/sarama/delete_records_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/IBM/sarama/delete_records_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_records_response.go rename to vendor/github.com/IBM/sarama/delete_records_response.go diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/IBM/sarama/delete_topics_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_topics_request.go rename to vendor/github.com/IBM/sarama/delete_topics_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/IBM/sarama/delete_topics_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_topics_response.go rename to vendor/github.com/IBM/sarama/delete_topics_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_client_quotas_request.go rename to vendor/github.com/IBM/sarama/describe_client_quotas_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_client_quotas_response.go rename to vendor/github.com/IBM/sarama/describe_client_quotas_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/IBM/sarama/describe_configs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_configs_request.go rename to vendor/github.com/IBM/sarama/describe_configs_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/IBM/sarama/describe_configs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_configs_response.go rename to vendor/github.com/IBM/sarama/describe_configs_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/IBM/sarama/describe_groups_request.go similarity index 92% rename from vendor/github.com/Shopify/sarama/describe_groups_request.go rename to vendor/github.com/IBM/sarama/describe_groups_request.go index f81f69ac4..fc8e6b588 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ b/vendor/github.com/IBM/sarama/describe_groups_request.go @@ -44,8 +44,14 @@ func (r *DescribeGroupsRequest) headerVersion() int16 { func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3, 4: + case 1: + return V1_1_0_0 + case 2: + return V2_0_0_0 + case 3: return V2_3_0_0 + case 4: + return V2_4_0_0 } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/IBM/sarama/describe_groups_response.go similarity index 98% rename from vendor/github.com/Shopify/sarama/describe_groups_response.go rename to vendor/github.com/IBM/sarama/describe_groups_response.go index 09052e431..12bf93e15 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/IBM/sarama/describe_groups_response.go @@ -65,8 +65,14 @@ func (r *DescribeGroupsResponse) headerVersion() int16 { func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3, 4: + case 1: + return V1_1_0_0 + case 2: + return V2_0_0_0 + case 3: return V2_3_0_0 + case 4: + return V2_4_0_0 } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_log_dirs_request.go rename to vendor/github.com/IBM/sarama/describe_log_dirs_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_log_dirs_response.go rename to vendor/github.com/IBM/sarama/describe_log_dirs_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go rename to vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go rename to vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/IBM/sarama/dev.yml similarity index 100% rename from vendor/github.com/Shopify/sarama/dev.yml rename to vendor/github.com/IBM/sarama/dev.yml diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml similarity index 96% rename from vendor/github.com/Shopify/sarama/docker-compose.yml rename to vendor/github.com/IBM/sarama/docker-compose.yml index e1119c87f..22ee21bf9 100644 --- a/vendor/github.com/Shopify/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -40,7 +40,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' @@ -62,7 +62,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' @@ -84,7 +84,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' @@ -106,7 +106,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' @@ -128,7 +128,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/IBM/sarama/encoder_decoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/encoder_decoder.go rename to vendor/github.com/IBM/sarama/encoder_decoder.go diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/IBM/sarama/end_txn_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/end_txn_request.go rename to vendor/github.com/IBM/sarama/end_txn_request.go diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/IBM/sarama/end_txn_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/end_txn_response.go rename to vendor/github.com/IBM/sarama/end_txn_response.go diff --git a/vendor/github.com/Shopify/sarama/entrypoint.sh b/vendor/github.com/IBM/sarama/entrypoint.sh similarity index 94% rename from vendor/github.com/Shopify/sarama/entrypoint.sh rename to vendor/github.com/IBM/sarama/entrypoint.sh index 8cd2efcb9..7b344fae8 100644 --- a/vendor/github.com/Shopify/sarama/entrypoint.sh +++ b/vendor/github.com/IBM/sarama/entrypoint.sh @@ -1,6 +1,6 @@ #!/bin/bash -KAFKA_VERSION="${KAFKA_VERSION:-3.3.1}" +KAFKA_VERSION="${KAFKA_VERSION:-3.3.2}" KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" if [ ! -d "${KAFKA_HOME}" ]; then diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/IBM/sarama/errors.go similarity index 99% rename from vendor/github.com/Shopify/sarama/errors.go rename to vendor/github.com/IBM/sarama/errors.go index 27977f166..8d1d16834 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/IBM/sarama/errors.go @@ -79,7 +79,7 @@ var ErrTransactionNotReady = errors.New("transaction manager: transaction is not // ErrNonTransactedProducer when calling BeginTxn, CommitTxn or AbortTxn on a non transactional producer. var ErrNonTransactedProducer = errors.New("transaction manager: you need to add TransactionalID to producer") -// ErrTransitionNotAllowed when txnmgr state transiion is not valid. +// ErrTransitionNotAllowed when txnmgr state transition is not valid. var ErrTransitionNotAllowed = errors.New("transaction manager: invalid transition attempted") // ErrCannotTransitionNilError when transition is attempted with an nil error. @@ -89,7 +89,7 @@ var ErrCannotTransitionNilError = errors.New("transaction manager: cannot transi var ErrTxnUnableToParseResponse = errors.New("transaction manager: unable to parse response") // MultiErrorFormat specifies the formatter applied to format multierrors. The -// default implementation is a consensed version of the hashicorp/go-multierror +// default implementation is a condensed version of the hashicorp/go-multierror // default one var MultiErrorFormat multierror.ErrorFormatFunc = func(es []error) string { if len(es) == 1 { diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/IBM/sarama/fetch_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/fetch_request.go rename to vendor/github.com/IBM/sarama/fetch_request.go diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/IBM/sarama/fetch_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/fetch_response.go rename to vendor/github.com/IBM/sarama/fetch_response.go diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/IBM/sarama/find_coordinator_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/find_coordinator_request.go rename to vendor/github.com/IBM/sarama/find_coordinator_request.go diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/IBM/sarama/find_coordinator_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/find_coordinator_response.go rename to vendor/github.com/IBM/sarama/find_coordinator_response.go diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/IBM/sarama/gssapi_kerberos.go similarity index 99% rename from vendor/github.com/Shopify/sarama/gssapi_kerberos.go rename to vendor/github.com/IBM/sarama/gssapi_kerberos.go index ab8b70196..8abbcdc38 100644 --- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go +++ b/vendor/github.com/IBM/sarama/gssapi_kerberos.go @@ -23,6 +23,7 @@ const ( GSS_API_GENERIC_TAG = 0x60 KRB5_USER_AUTH = 1 KRB5_KEYTAB_AUTH = 2 + KRB5_CCACHE_AUTH = 3 GSS_API_INITIAL = 1 GSS_API_VERIFY = 2 GSS_API_FINISH = 3 @@ -31,6 +32,7 @@ const ( type GSSAPIConfig struct { AuthType int KeyTabPath string + CCachePath string KerberosConfigPath string ServiceName string Username string diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/IBM/sarama/heartbeat_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/heartbeat_request.go rename to vendor/github.com/IBM/sarama/heartbeat_request.go diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/IBM/sarama/heartbeat_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/heartbeat_response.go rename to vendor/github.com/IBM/sarama/heartbeat_response.go diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go rename to vendor/github.com/IBM/sarama/incremental_alter_configs_request.go diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go rename to vendor/github.com/IBM/sarama/incremental_alter_configs_response.go diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/IBM/sarama/init_producer_id_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/init_producer_id_request.go rename to vendor/github.com/IBM/sarama/init_producer_id_request.go diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/IBM/sarama/init_producer_id_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/init_producer_id_response.go rename to vendor/github.com/IBM/sarama/init_producer_id_response.go diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/IBM/sarama/interceptors.go similarity index 100% rename from vendor/github.com/Shopify/sarama/interceptors.go rename to vendor/github.com/IBM/sarama/interceptors.go diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/IBM/sarama/join_group_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/join_group_request.go rename to vendor/github.com/IBM/sarama/join_group_request.go diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/IBM/sarama/join_group_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/join_group_response.go rename to vendor/github.com/IBM/sarama/join_group_response.go diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/IBM/sarama/kerberos_client.go similarity index 79% rename from vendor/github.com/Shopify/sarama/kerberos_client.go rename to vendor/github.com/IBM/sarama/kerberos_client.go index 01a53193b..289126879 100644 --- a/vendor/github.com/Shopify/sarama/kerberos_client.go +++ b/vendor/github.com/IBM/sarama/kerberos_client.go @@ -3,6 +3,7 @@ package sarama import ( krb5client "github.com/jcmturner/gokrb5/v8/client" krb5config "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/credentials" "github.com/jcmturner/gokrb5/v8/keytab" "github.com/jcmturner/gokrb5/v8/types" ) @@ -32,13 +33,23 @@ func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) { func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) { var client *krb5client.Client - if config.AuthType == KRB5_KEYTAB_AUTH { + switch config.AuthType { + case KRB5_KEYTAB_AUTH: kt, err := keytab.Load(config.KeyTabPath) if err != nil { return nil, err } client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) - } else { + case KRB5_CCACHE_AUTH: + cc, err := credentials.LoadCCache(config.CCachePath) + if err != nil { + return nil, err + } + client, err = krb5client.NewFromCCache(cc, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + if err != nil { + return nil, err + } + default: client = krb5client.NewWithPassword(config.Username, config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) } diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/IBM/sarama/leave_group_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/leave_group_request.go rename to vendor/github.com/IBM/sarama/leave_group_request.go diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/IBM/sarama/leave_group_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/leave_group_response.go rename to vendor/github.com/IBM/sarama/leave_group_response.go diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/IBM/sarama/length_field.go similarity index 100% rename from vendor/github.com/Shopify/sarama/length_field.go rename to vendor/github.com/IBM/sarama/length_field.go diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/IBM/sarama/list_groups_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_groups_request.go rename to vendor/github.com/IBM/sarama/list_groups_request.go diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/IBM/sarama/list_groups_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_groups_response.go rename to vendor/github.com/IBM/sarama/list_groups_response.go diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go rename to vendor/github.com/IBM/sarama/list_partition_reassignments_request.go diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go rename to vendor/github.com/IBM/sarama/list_partition_reassignments_response.go diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/IBM/sarama/message.go similarity index 100% rename from vendor/github.com/Shopify/sarama/message.go rename to vendor/github.com/IBM/sarama/message.go diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/IBM/sarama/message_set.go similarity index 100% rename from vendor/github.com/Shopify/sarama/message_set.go rename to vendor/github.com/IBM/sarama/message_set.go diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/IBM/sarama/metadata_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/metadata_request.go rename to vendor/github.com/IBM/sarama/metadata_request.go diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/IBM/sarama/metadata_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/metadata_response.go rename to vendor/github.com/IBM/sarama/metadata_response.go diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/IBM/sarama/metrics.go similarity index 100% rename from vendor/github.com/Shopify/sarama/metrics.go rename to vendor/github.com/IBM/sarama/metrics.go diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/IBM/sarama/mockbroker.go similarity index 100% rename from vendor/github.com/Shopify/sarama/mockbroker.go rename to vendor/github.com/IBM/sarama/mockbroker.go diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/IBM/sarama/mockkerberos.go similarity index 100% rename from vendor/github.com/Shopify/sarama/mockkerberos.go rename to vendor/github.com/IBM/sarama/mockkerberos.go diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/IBM/sarama/mockresponses.go similarity index 100% rename from vendor/github.com/Shopify/sarama/mockresponses.go rename to vendor/github.com/IBM/sarama/mockresponses.go diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/IBM/sarama/offset_commit_request.go similarity index 95% rename from vendor/github.com/Shopify/sarama/offset_commit_request.go rename to vendor/github.com/IBM/sarama/offset_commit_request.go index 5dd88220d..ed0566fe6 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/IBM/sarama/offset_commit_request.go @@ -220,7 +220,11 @@ func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { } } -func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + r.AddBlockWithLeaderEpoch(topic, partitionID, offset, 0, timestamp, metadata) +} + +func (r *OffsetCommitRequest) AddBlockWithLeaderEpoch(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) } diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/IBM/sarama/offset_commit_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_commit_response.go rename to vendor/github.com/IBM/sarama/offset_commit_response.go diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/IBM/sarama/offset_fetch_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_fetch_request.go rename to vendor/github.com/IBM/sarama/offset_fetch_request.go diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/IBM/sarama/offset_fetch_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_fetch_response.go rename to vendor/github.com/IBM/sarama/offset_fetch_response.go diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go similarity index 98% rename from vendor/github.com/Shopify/sarama/offset_manager.go rename to vendor/github.com/IBM/sarama/offset_manager.go index 1ea15ff93..9b7960599 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/IBM/sarama/offset_manager.go @@ -304,7 +304,7 @@ func (om *offsetManager) constructRequest() *OffsetCommitRequest { for _, pom := range topicManagers { pom.lock.Lock() if pom.dirty { - r.AddBlock(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, perPartitionTimestamp, pom.metadata) + r.AddBlockWithLeaderEpoch(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, perPartitionTimestamp, pom.metadata) } pom.lock.Unlock() } @@ -359,13 +359,13 @@ func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest // nothing wrong but we didn't commit, we'll get it next time round case ErrFencedInstancedId: pom.handleError(err) - // TODO close the whole consumer for instacne fenced.... + // TODO close the whole consumer for instance fenced.... om.tryCancelSession() case ErrUnknownTopicOrPartition: // let the user know *and* try redispatching - if topic-auto-create is // enabled, redispatching should trigger a metadata req and create the // topic; if not then re-dispatching won't help, but we've let the user - // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + // know and it shouldn't hurt either (see https://github.com/IBM/sarama/issues/706) fallthrough default: // dunno, tell the user and try redispatching diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/IBM/sarama/offset_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_request.go rename to vendor/github.com/IBM/sarama/offset_request.go diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/IBM/sarama/offset_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_response.go rename to vendor/github.com/IBM/sarama/offset_response.go diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/IBM/sarama/packet_decoder.go similarity index 98% rename from vendor/github.com/Shopify/sarama/packet_decoder.go rename to vendor/github.com/IBM/sarama/packet_decoder.go index b8cae5350..526e0f42f 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/IBM/sarama/packet_decoder.go @@ -55,7 +55,7 @@ type pushDecoder interface { // Saves the offset into the input buffer as the location to actually read the calculated value when able. saveOffset(in int) - // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + // Returns the length of data to reserve for the input of this encoder (e.g. 4 bytes for a CRC32). reserveLength() int // Indicates that all required data is now available to calculate and check the field. diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/IBM/sarama/packet_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/packet_encoder.go rename to vendor/github.com/IBM/sarama/packet_encoder.go diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/IBM/sarama/partitioner.go similarity index 100% rename from vendor/github.com/Shopify/sarama/partitioner.go rename to vendor/github.com/IBM/sarama/partitioner.go diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/IBM/sarama/prep_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/prep_encoder.go rename to vendor/github.com/IBM/sarama/prep_encoder.go diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/IBM/sarama/produce_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/produce_request.go rename to vendor/github.com/IBM/sarama/produce_request.go diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/IBM/sarama/produce_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/produce_response.go rename to vendor/github.com/IBM/sarama/produce_response.go diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/IBM/sarama/produce_set.go similarity index 100% rename from vendor/github.com/Shopify/sarama/produce_set.go rename to vendor/github.com/IBM/sarama/produce_set.go diff --git a/vendor/github.com/Shopify/sarama/quota_types.go b/vendor/github.com/IBM/sarama/quota_types.go similarity index 100% rename from vendor/github.com/Shopify/sarama/quota_types.go rename to vendor/github.com/IBM/sarama/quota_types.go diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/IBM/sarama/real_decoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/real_decoder.go rename to vendor/github.com/IBM/sarama/real_decoder.go diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/IBM/sarama/real_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/real_encoder.go rename to vendor/github.com/IBM/sarama/real_encoder.go diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/IBM/sarama/record.go similarity index 100% rename from vendor/github.com/Shopify/sarama/record.go rename to vendor/github.com/IBM/sarama/record.go diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/IBM/sarama/record_batch.go similarity index 100% rename from vendor/github.com/Shopify/sarama/record_batch.go rename to vendor/github.com/IBM/sarama/record_batch.go diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/IBM/sarama/records.go similarity index 100% rename from vendor/github.com/Shopify/sarama/records.go rename to vendor/github.com/IBM/sarama/records.go diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/IBM/sarama/request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/request.go rename to vendor/github.com/IBM/sarama/request.go diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/IBM/sarama/response_header.go similarity index 100% rename from vendor/github.com/Shopify/sarama/response_header.go rename to vendor/github.com/IBM/sarama/response_header.go diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/IBM/sarama/sarama.go similarity index 99% rename from vendor/github.com/Shopify/sarama/sarama.go rename to vendor/github.com/IBM/sarama/sarama.go index a42bc075a..4d5f60a66 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/IBM/sarama/sarama.go @@ -91,7 +91,7 @@ import ( var ( // Logger is the instance of a StdLogger interface that Sarama writes connection - // management events to. By default it is set to discard all log messages via ioutil.Discard, + // management events to. By default it is set to discard all log messages via io.Discard, // but you can set it to redirect wherever you want. Logger StdLogger = log.New(io.Discard, "[Sarama] ", log.LstdFlags) diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_authenticate_request.go rename to vendor/github.com/IBM/sarama/sasl_authenticate_request.go diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_authenticate_response.go rename to vendor/github.com/IBM/sarama/sasl_authenticate_response.go diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/IBM/sarama/sasl_handshake_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_handshake_request.go rename to vendor/github.com/IBM/sarama/sasl_handshake_request.go diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/IBM/sarama/sasl_handshake_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_handshake_response.go rename to vendor/github.com/IBM/sarama/sasl_handshake_response.go diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/IBM/sarama/scram_formatter.go similarity index 100% rename from vendor/github.com/Shopify/sarama/scram_formatter.go rename to vendor/github.com/IBM/sarama/scram_formatter.go diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/IBM/sarama/sticky_assignor_user_data.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go rename to vendor/github.com/IBM/sarama/sticky_assignor_user_data.go diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/IBM/sarama/sync_group_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sync_group_request.go rename to vendor/github.com/IBM/sarama/sync_group_request.go diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/IBM/sarama/sync_group_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sync_group_response.go rename to vendor/github.com/IBM/sarama/sync_group_response.go diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/IBM/sarama/sync_producer.go similarity index 98% rename from vendor/github.com/Shopify/sarama/sync_producer.go rename to vendor/github.com/IBM/sarama/sync_producer.go index 8765ac336..3119baa6d 100644 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ b/vendor/github.com/IBM/sarama/sync_producer.go @@ -33,7 +33,7 @@ type SyncProducer interface { // TxnStatus return current producer transaction status. TxnStatus() ProducerTxnStatusFlag - // IsTransactional return true when current producer is is transactional. + // IsTransactional return true when current producer is transactional. IsTransactional() bool // BeginTxn mark current transaction as ready. diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/IBM/sarama/timestamp.go similarity index 100% rename from vendor/github.com/Shopify/sarama/timestamp.go rename to vendor/github.com/IBM/sarama/timestamp.go diff --git a/vendor/github.com/Shopify/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go similarity index 99% rename from vendor/github.com/Shopify/sarama/transaction_manager.go rename to vendor/github.com/IBM/sarama/transaction_manager.go index e18abecd3..e1bcda3f9 100644 --- a/vendor/github.com/Shopify/sarama/transaction_manager.go +++ b/vendor/github.com/IBM/sarama/transaction_manager.go @@ -14,7 +14,7 @@ type ProducerTxnStatusFlag int16 const ( // ProducerTxnFlagUninitialized when txnmgr is created ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota - // ProducerTxnFlagInitializing when txnmgr is initilizing + // ProducerTxnFlagInitializing when txnmgr is initializing ProducerTxnFlagInitializing // ProducerTxnFlagReady when is ready to receive transaction ProducerTxnFlagReady @@ -22,7 +22,7 @@ const ( ProducerTxnFlagInTransaction // ProducerTxnFlagEndTransaction when transaction will be committed ProducerTxnFlagEndTransaction - // ProducerTxnFlagInError whan having abortable or fatal error + // ProducerTxnFlagInError when having abortable or fatal error ProducerTxnFlagInError // ProducerTxnFlagCommittingTransaction when committing txn ProducerTxnFlagCommittingTransaction @@ -117,13 +117,13 @@ var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{ ProducerTxnFlagReady, ProducerTxnFlagInError, }, - // When we need are initilizing + // When we need are initializing ProducerTxnFlagInitializing: { ProducerTxnFlagInitializing, ProducerTxnFlagReady, ProducerTxnFlagInError, }, - // When we have initilized transactional producer + // When we have initialized transactional producer ProducerTxnFlagReady: { ProducerTxnFlagInTransaction, }, @@ -660,7 +660,7 @@ func (t *transactionManager) finishTransaction(commit bool) error { t.mutex.Lock() defer t.mutex.Unlock() - // Ensure no error when committing or abording + // Ensure no error when committing or aborting if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 { return t.lastError } else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/txn_offset_commit_request.go rename to vendor/github.com/IBM/sarama/txn_offset_commit_request.go diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/txn_offset_commit_response.go rename to vendor/github.com/IBM/sarama/txn_offset_commit_response.go diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go similarity index 98% rename from vendor/github.com/Shopify/sarama/utils.go rename to vendor/github.com/IBM/sarama/utils.go index 819b6597c..4526543d6 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/IBM/sarama/utils.go @@ -193,6 +193,7 @@ var ( V3_2_3_0 = newKafkaVersion(3, 2, 3, 0) V3_3_0_0 = newKafkaVersion(3, 3, 0, 0) V3_3_1_0 = newKafkaVersion(3, 3, 1, 0) + V3_3_2_0 = newKafkaVersion(3, 3, 2, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -248,9 +249,10 @@ var ( V3_2_3_0, V3_3_0_0, V3_3_1_0, + V3_3_2_0, } MinVersion = V0_8_2_0 - MaxVersion = V3_3_1_0 + MaxVersion = V3_3_2_0 DefaultVersion = V1_0_0_0 // reduced set of versions to matrix test @@ -266,7 +268,7 @@ var ( V2_8_2_0, V3_1_2_0, V3_2_3_0, - V3_3_1_0, + V3_3_2_0, } ) diff --git a/vendor/github.com/Shopify/sarama/version.go b/vendor/github.com/IBM/sarama/version.go similarity index 100% rename from vendor/github.com/Shopify/sarama/version.go rename to vendor/github.com/IBM/sarama/version.go diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/IBM/sarama/zstd.go similarity index 100% rename from vendor/github.com/Shopify/sarama/zstd.go rename to vendor/github.com/IBM/sarama/zstd.go diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md deleted file mode 100644 index c2f92ec9a..000000000 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ /dev/null @@ -1,1187 +0,0 @@ -# Changelog - -## Version 1.31.1 (2022-02-01) - -- #2126 - @bai - Populate missing kafka versions -- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image -- #2123 - @bai - Update klauspost/compress to 0.14 -- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy -- #2119 - @bai - Add Kafka 3.1.0 version number -- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption -- #2051 - @seveas - Expose the TLS connection state of a broker connection -- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys -- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup -- #2113 - @mosceo - Fix typo - -## Version 1.31.0 (2022-01-18) - -## What's Changed -### :tada: New Features / Improvements -* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/Shopify/sarama/pull/2088 -* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/Shopify/sarama/pull/1686 -* Support request pipelining in AsyncProducer by @slaunay in https://github.com/Shopify/sarama/pull/2094 -### :bug: Fixes -* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/Shopify/sarama/pull/2080 -* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/Shopify/sarama/pull/2081 -* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/Shopify/sarama/pull/2082 -* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/Shopify/sarama/pull/2096 -* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/Shopify/sarama/pull/2107 -* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/Shopify/sarama/pull/2108 -* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/Shopify/sarama/pull/2078 -* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/Shopify/sarama/pull/2111 -### :wrench: Maintenance -* chore: bump runtime and test dependencies by @dnwe in https://github.com/Shopify/sarama/pull/2100 -### :memo: Documentation -* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/Shopify/sarama/pull/2099 -### :heavy_plus_sign: Other Changes -* Fix typo by @mosceo in https://github.com/Shopify/sarama/pull/2084 - -## New Contributors -* @grongor made their first contribution in https://github.com/Shopify/sarama/pull/2080 -* @fengyinqiao made their first contribution in https://github.com/Shopify/sarama/pull/2088 -* @xujianhai666 made their first contribution in https://github.com/Shopify/sarama/pull/1686 -* @mosceo made their first contribution in https://github.com/Shopify/sarama/pull/2084 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.1...v1.31.0 - -## Version 1.30.1 (2021-12-04) - -## What's Changed -### :tada: New Features / Improvements -* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/Shopify/sarama/pull/2045 -### :bug: Fixes -* fix: set min-go-version to 1.16 by @troyanov in https://github.com/Shopify/sarama/pull/2048 -* logger: fix debug logs' formatting directives by @utrack in https://github.com/Shopify/sarama/pull/2054 -* fix: stuck on the batch with zero records length by @pachmu in https://github.com/Shopify/sarama/pull/2057 -* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/Shopify/sarama/pull/2076 -### :wrench: Maintenance -* chore: add release notes configuration by @dnwe in https://github.com/Shopify/sarama/pull/2046 -* chore: confluent platform version bump by @lizthegrey in https://github.com/Shopify/sarama/pull/2070 - -## Notes -* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x - -## New Contributors -* @troyanov made their first contribution in https://github.com/Shopify/sarama/pull/2048 -* @lizthegrey made their first contribution in https://github.com/Shopify/sarama/pull/2045 -* @utrack made their first contribution in https://github.com/Shopify/sarama/pull/2054 -* @pachmu made their first contribution in https://github.com/Shopify/sarama/pull/2057 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.0...v1.30.1 - -## Version 1.30.0 (2021-09-29) - -⚠️ This release has been superseded by v1.30.1 and should _not_ be used. - -**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 - ---- - -ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** - ---- - -# New Features / Improvements - -- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh -- #2000 - @matzew - Using xdg-go module for SCRAM -- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures -- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM -- #2006 - @faillefer - Add support for DeleteOffsets operation -- #1909 - @agriffaut - KIP-546 Client quota APIs -- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state -- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger -- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log -- #2019 - @dnwe - feat: add logging & a metric for producer throttle -- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface -- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol -- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open -- #2034 - @bai - Add support for kafka 3.0.0 - -# Fixes - -- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest -- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation -- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls -- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true -- #2007 - @bai - Add support for Go 1.17 -- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks -- #2010 - @dnwe - chore: enable exportloopref and misspell linters -- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements -- #2015 - @bai - Change default branch to main -- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() -- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 -- #2016 - @dnwe - chore: replace deprecated Go calls -- #2017 - @dnwe - chore: delete legacy vagrant script -- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test -- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 -- #2033 - @bai - Update dependencies -- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method -- #2035 - @dnwe - chore: populate the missing kafka versions -- #2038 - @dnwe - feat: add a fuzzing workflow to github actions - -## New Contributors -* @zifengyu made their first contribution in https://github.com/Shopify/sarama/pull/1983 -* @doxsch made their first contribution in https://github.com/Shopify/sarama/pull/1990 -* @LubergAlexander made their first contribution in https://github.com/Shopify/sarama/pull/1988 -* @HurSungYun made their first contribution in https://github.com/Shopify/sarama/pull/2001 -* @gdm85 made their first contribution in https://github.com/Shopify/sarama/pull/2003 -* @qiangmzsx made their first contribution in https://github.com/Shopify/sarama/pull/1973 -* @zhaomoran made their first contribution in https://github.com/Shopify/sarama/pull/1992 -* @faillefer made their first contribution in https://github.com/Shopify/sarama/pull/2006 -* @crivera-fastly made their first contribution in https://github.com/Shopify/sarama/pull/1718 -* @null-sleep made their first contribution in https://github.com/Shopify/sarama/pull/1984 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.29.1...v1.30.0 - -## Version 1.29.1 (2021-06-24) - -# New Features / Improvements - -- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API -- #1964 - @ajanikow - Add DelegationToken ResourceType - -# Fixes - -- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire -- #1971 - @KerryJava - fix kafka-producer-performance throughput panic -- #1968 - @dnwe - chore: bump golang.org/x versions -- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers -- #1963 - @dnwe - fix: ensure backoff timer is re-used -- #1949 - @dnwe - fix: explicitly use uint64 for payload length - -## Version 1.29.0 (2021-05-07) - -### New Features / Improvements - -- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API -- #1869 - @wyndhblb - zstd: encode+decode performance improvements -- #1541 - @izolight - add String, (Un)MarshalText for acl types. -- #1921 - @bai - Add support for Kafka 2.8.0 - -### Fixes -- #1936 - @dnwe - fix(consumer): follow preferred broker -- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication -- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) -- #1926 - @dnwe - fix: correct initial CodeQL findings -- #1925 - @bai - Test out CodeQL -- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos -- #1922 - @bai - Update go dependencies -- #1898 - @mmaslankaprv - Parsing only known control batches value -- #1887 - @withshubh - Fix: issues affecting code quality - -## Version 1.28.0 (2021-02-15) - -**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** - -- #1870 - @kvch - Update Kerberos library to latest major -- #1876 - @bai - Update docs, reference pkg.go.dev -- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close -- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages -- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies -- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy -- #1862 - @bai - Fix CI setenv permissions issues -- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev -- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica - -## Version 1.27.2 (2020-10-21) - -### Improvements - -#1750 - @krantideep95 Adds missing mock responses for mocking consumer group - -## Fixes - -#1817 - reverts #1785 - Add private method to Client interface to prevent implementation - -## Version 1.27.1 (2020-10-07) - -### Improvements - -#1775 - @d1egoaz - Adds a Producer Interceptor example -#1781 - @justin-chen - Refresh brokers given list of seed brokers -#1784 - @justin-chen - Add randomize seed broker method -#1790 - @d1egoaz - remove example binary -#1798 - @bai - Test against Go 1.15 -#1785 - @justin-chen - Add private method to Client interface to prevent implementation -#1802 - @uvw - Support Go 1.13 error unwrapping - -## Fixes - -#1791 - @stanislavkozlovski - bump default version to 1.0.0 - -## Version 1.27.0 (2020-08-11) - -### Improvements - -#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration -#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests -#1699 - @wclaeys - Consumer group support for manually comitting offsets -#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 -#1726 - @d1egoaz - Include zstd on the functional tests -#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors -#1738 - @varun06 - fixed variable names that are named same as some std lib package names -#1741 - @varun06 - updated zstd dependency to latest v1.10.10 -#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base -#1763 - @alrs - remove deprecated tls options from test -#1769 - @bai - Add support for Kafka 2.6.0 - -## Fixes - -#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication -#1744 - @alrs - Fix isBalanced Function Signature - -## Version 1.26.4 (2020-05-19) - -## Fixes - -- #1701 - @d1egoaz - Set server name only for the current broker -- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka - -## Version 1.26.3 (2020-05-07) - -## Fixes - -- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config - -## Version 1.26.2 (2020-05-06) - -## ⚠️ Known Issues - -This release has been marked as not ready for production and may be unstable, please use v1.26.4. - -### Improvements - -- #1560 - @iyacontrol - add sync pool for gzip 1-9 -- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID -- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs -- #1632 - @bai - Add support for Go 1.14 -- #1640 - @random-dwi - Feature/fix list partition reassignments -- #1646 - @mimaison - Add DescribeLogDirs to admin client -- #1667 - @bai - Add support for kafka 2.5.0 - -## Fixes - -- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 -- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine -- #1602 - @d1egoaz - adds a note about consumer groups Consume method -- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly -- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented -- #1614 - @alrs - produce_response.go: Remove Unused Functions -- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables -- #1639 - @agriffaut - Handle errors with no message but error code -- #1643 - @kzinglzy - fix `config.net.keepalive` -- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs -- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata -- #1650 - @lavoiesl - Return the response error in heartbeatLoop -- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die -- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. - -## Version 1.26.1 (2020-02-04) - -Improvements: -- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) -- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595)) -- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573)) -- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592)) - -Bug Fixes: -- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) -- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) - -## Version 1.26.0 (2020-01-24) - -New Features: -- Enable zstd compression - ([1574](https://github.com/Shopify/sarama/pull/1574), - [1582](https://github.com/Shopify/sarama/pull/1582)) -- Support headers in tools kafka-console-producer - ([1549](https://github.com/Shopify/sarama/pull/1549)) - -Improvements: -- Add SASL AuthIdentity to SASL frames (authzid) - ([1585](https://github.com/Shopify/sarama/pull/1585)). - -Bug Fixes: -- Sending messages with ZStd compression enabled fails in multiple ways - ([1252](https://github.com/Shopify/sarama/issues/1252)). -- Use the broker for any admin on BrokerConfig - ([1571](https://github.com/Shopify/sarama/pull/1571)). -- Set DescribeConfigRequest Version field - ([1576](https://github.com/Shopify/sarama/pull/1576)). -- ConsumerGroup flooding logs with client/metadata update req - ([1578](https://github.com/Shopify/sarama/pull/1578)). -- MetadataRequest version in DescribeCluster - ([1580](https://github.com/Shopify/sarama/pull/1580)). -- Fix deadlock in consumer group handleError - ([1581](https://github.com/Shopify/sarama/pull/1581)) -- Fill in the Fetch{Request,Response} protocol - ([1582](https://github.com/Shopify/sarama/pull/1582)). -- Retry topic request on ControllerNotAvailable - ([1586](https://github.com/Shopify/sarama/pull/1586)). - -## Version 1.25.0 (2020-01-13) - -New Features: -- Support TLS protocol in kafka-producer-performance - ([1538](https://github.com/Shopify/sarama/pull/1538)). -- Add support for kafka 2.4.0 - ([1552](https://github.com/Shopify/sarama/pull/1552)). - -Improvements: -- Allow the Consumer to disable auto-commit offsets - ([1164](https://github.com/Shopify/sarama/pull/1164)). -- Produce records with consistent timestamps - ([1455](https://github.com/Shopify/sarama/pull/1455)). - -Bug Fixes: -- Fix incorrect SetTopicMetadata name mentions - ([1534](https://github.com/Shopify/sarama/pull/1534)). -- Fix client.tryRefreshMetadata Println - ([1535](https://github.com/Shopify/sarama/pull/1535)). -- Fix panic on calling updateMetadata on closed client - ([1531](https://github.com/Shopify/sarama/pull/1531)). -- Fix possible faulty metrics in TestFuncProducing - ([1545](https://github.com/Shopify/sarama/pull/1545)). - -## Version 1.24.1 (2019-10-31) - -New Features: -- Add DescribeLogDirs Request/Response pair - ([1520](https://github.com/Shopify/sarama/pull/1520)). - -Bug Fixes: -- Fix ClusterAdmin returning invalid controller ID on DescribeCluster - ([1518](https://github.com/Shopify/sarama/pull/1518)). -- Fix issue with consumergroup not rebalancing when new partition is added - ([1525](https://github.com/Shopify/sarama/pull/1525)). -- Ensure consistent use of read/write deadlines - ([1529](https://github.com/Shopify/sarama/pull/1529)). - -## Version 1.24.0 (2019-10-09) - -New Features: -- Add sticky partition assignor - ([1416](https://github.com/Shopify/sarama/pull/1416)). -- Switch from cgo zstd package to pure Go implementation - ([1477](https://github.com/Shopify/sarama/pull/1477)). - -Improvements: -- Allow creating ClusterAdmin from client - ([1415](https://github.com/Shopify/sarama/pull/1415)). -- Set KafkaVersion in ListAcls method - ([1452](https://github.com/Shopify/sarama/pull/1452)). -- Set request version in CreateACL ClusterAdmin method - ([1458](https://github.com/Shopify/sarama/pull/1458)). -- Set request version in DeleteACL ClusterAdmin method - ([1461](https://github.com/Shopify/sarama/pull/1461)). -- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest - ([1464](https://github.com/Shopify/sarama/pull/1464)). -- Remove direct usage of gofork - ([1465](https://github.com/Shopify/sarama/pull/1465)). -- Add support for Go 1.13 - ([1478](https://github.com/Shopify/sarama/pull/1478)). -- Improve behavior of NewMockListAclsResponse - ([1481](https://github.com/Shopify/sarama/pull/1481)). - -Bug Fixes: -- Fix race condition in consumergroup example - ([1434](https://github.com/Shopify/sarama/pull/1434)). -- Fix brokerProducer goroutine leak - ([1442](https://github.com/Shopify/sarama/pull/1442)). -- Use released version of lz4 library - ([1469](https://github.com/Shopify/sarama/pull/1469)). -- Set correct version in MockDeleteTopicsResponse - ([1484](https://github.com/Shopify/sarama/pull/1484)). -- Fix CLI help message typo - ([1494](https://github.com/Shopify/sarama/pull/1494)). - -Known Issues: -- Please **don't** use Zstd, as it doesn't work right now. - See https://github.com/Shopify/sarama/issues/1252 - -## Version 1.23.1 (2019-07-22) - -Bug Fixes: -- Fix fetch delete bug record - ([1425](https://github.com/Shopify/sarama/pull/1425)). -- Handle SASL/OAUTHBEARER token rejection - ([1428](https://github.com/Shopify/sarama/pull/1428)). - -## Version 1.23.0 (2019-07-02) - -New Features: -- Add support for Kafka 2.3.0 - ([1418](https://github.com/Shopify/sarama/pull/1418)). -- Add support for ListConsumerGroupOffsets v2 - ([1374](https://github.com/Shopify/sarama/pull/1374)). -- Add support for DeleteConsumerGroup - ([1417](https://github.com/Shopify/sarama/pull/1417)). -- Add support for SASLVersion configuration - ([1410](https://github.com/Shopify/sarama/pull/1410)). -- Add kerberos support - ([1366](https://github.com/Shopify/sarama/pull/1366)). - -Improvements: -- Improve sasl_scram_client example - ([1406](https://github.com/Shopify/sarama/pull/1406)). -- Fix shutdown and race-condition in consumer-group example - ([1404](https://github.com/Shopify/sarama/pull/1404)). -- Add support for error codes 77—81 - ([1397](https://github.com/Shopify/sarama/pull/1397)). -- Pool internal objects allocated per message - ([1385](https://github.com/Shopify/sarama/pull/1385)). -- Reduce packet decoder allocations - ([1373](https://github.com/Shopify/sarama/pull/1373)). -- Support timeout when fetching metadata - ([1359](https://github.com/Shopify/sarama/pull/1359)). - -Bug Fixes: -- Fix fetch size integer overflow - ([1376](https://github.com/Shopify/sarama/pull/1376)). -- Handle and log throttled FetchResponses - ([1383](https://github.com/Shopify/sarama/pull/1383)). -- Refactor misspelled word Resouce to Resource - ([1368](https://github.com/Shopify/sarama/pull/1368)). - -## Version 1.22.1 (2019-04-29) - -Improvements: -- Use zstd 1.3.8 - ([1350](https://github.com/Shopify/sarama/pull/1350)). -- Add support for SaslHandshakeRequest v1 - ([1354](https://github.com/Shopify/sarama/pull/1354)). - -Bug Fixes: -- Fix V5 MetadataRequest nullable topics array - ([1353](https://github.com/Shopify/sarama/pull/1353)). -- Use a different SCRAM client for each broker connection - ([1349](https://github.com/Shopify/sarama/pull/1349)). -- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 - ([1344](https://github.com/Shopify/sarama/pull/1344)). - -## Version 1.22.0 (2019-04-09) - -New Features: -- Add Offline Replicas Operation to Client - ([1318](https://github.com/Shopify/sarama/pull/1318)). -- Allow using proxy when connecting to broker - ([1326](https://github.com/Shopify/sarama/pull/1326)). -- Implement ReadCommitted - ([1307](https://github.com/Shopify/sarama/pull/1307)). -- Add support for Kafka 2.2.0 - ([1331](https://github.com/Shopify/sarama/pull/1331)). -- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes - ([1331](https://github.com/Shopify/sarama/pull/1295)). - -Improvements: -- Unregister all broker metrics on broker stop - ([1232](https://github.com/Shopify/sarama/pull/1232)). -- Add SCRAM authentication example - ([1303](https://github.com/Shopify/sarama/pull/1303)). -- Add consumergroup examples - ([1304](https://github.com/Shopify/sarama/pull/1304)). -- Expose consumer batch size metric - ([1296](https://github.com/Shopify/sarama/pull/1296)). -- Add TLS options to console producer and consumer - ([1300](https://github.com/Shopify/sarama/pull/1300)). -- Reduce client close bookkeeping - ([1297](https://github.com/Shopify/sarama/pull/1297)). -- Satisfy error interface in create responses - ([1154](https://github.com/Shopify/sarama/pull/1154)). -- Please lint gods - ([1346](https://github.com/Shopify/sarama/pull/1346)). - -Bug Fixes: -- Fix multi consumer group instance crash - ([1338](https://github.com/Shopify/sarama/pull/1338)). -- Update lz4 to latest version - ([1347](https://github.com/Shopify/sarama/pull/1347)). -- Retry ErrNotCoordinatorForConsumer in new consumergroup session - ([1231](https://github.com/Shopify/sarama/pull/1231)). -- Fix cleanup error handler - ([1332](https://github.com/Shopify/sarama/pull/1332)). -- Fix rate condition in PartitionConsumer - ([1156](https://github.com/Shopify/sarama/pull/1156)). - -## Version 1.21.0 (2019-02-24) - -New Features: -- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest - ([1236](https://github.com/Shopify/sarama/pull/1236)). -- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests - ([1178](https://github.com/Shopify/sarama/pull/1178)). -- Implement SASL/OAUTHBEARER - ([1240](https://github.com/Shopify/sarama/pull/1240)). - -Improvements: -- Add Go mod support - ([1282](https://github.com/Shopify/sarama/pull/1282)). -- Add error codes 73—76 - ([1239](https://github.com/Shopify/sarama/pull/1239)). -- Add retry backoff function - ([1160](https://github.com/Shopify/sarama/pull/1160)). -- Maintain metadata in the producer even when retries are disabled - ([1189](https://github.com/Shopify/sarama/pull/1189)). -- Include ReplicaAssignment in ListTopics - ([1274](https://github.com/Shopify/sarama/pull/1274)). -- Add producer performance tool - ([1222](https://github.com/Shopify/sarama/pull/1222)). -- Add support LogAppend timestamps - ([1258](https://github.com/Shopify/sarama/pull/1258)). - -Bug Fixes: -- Fix potential deadlock when a heartbeat request fails - ([1286](https://github.com/Shopify/sarama/pull/1286)). -- Fix consuming compacted topic - ([1227](https://github.com/Shopify/sarama/pull/1227)). -- Set correct Kafka version for DescribeConfigsRequest v1 - ([1277](https://github.com/Shopify/sarama/pull/1277)). -- Update kafka test version - ([1273](https://github.com/Shopify/sarama/pull/1273)). - -## Version 1.20.1 (2019-01-10) - -New Features: -- Add optional replica id in offset request - ([1100](https://github.com/Shopify/sarama/pull/1100)). - -Improvements: -- Implement DescribeConfigs Request + Response v1 & v2 - ([1230](https://github.com/Shopify/sarama/pull/1230)). -- Reuse compression objects - ([1185](https://github.com/Shopify/sarama/pull/1185)). -- Switch from png to svg for GoDoc link in README - ([1243](https://github.com/Shopify/sarama/pull/1243)). -- Fix typo in deprecation notice for FetchResponseBlock.Records - ([1242](https://github.com/Shopify/sarama/pull/1242)). -- Fix typos in consumer metadata response file - ([1244](https://github.com/Shopify/sarama/pull/1244)). - -Bug Fixes: -- Revert to individual msg retries for non-idempotent - ([1203](https://github.com/Shopify/sarama/pull/1203)). -- Respect MaxMessageBytes limit for uncompressed messages - ([1141](https://github.com/Shopify/sarama/pull/1141)). - -## Version 1.20.0 (2018-12-10) - -New Features: - - Add support for zstd compression - ([#1170](https://github.com/Shopify/sarama/pull/1170)). - - Add support for Idempotent Producer - ([#1152](https://github.com/Shopify/sarama/pull/1152)). - - Add support support for Kafka 2.1.0 - ([#1229](https://github.com/Shopify/sarama/pull/1229)). - - Add support support for OffsetCommit request/response pairs versions v1 to v5 - ([#1201](https://github.com/Shopify/sarama/pull/1201)). - - Add support support for OffsetFetch request/response pair up to version v5 - ([#1198](https://github.com/Shopify/sarama/pull/1198)). - -Improvements: - - Export broker's Rack setting - ([#1173](https://github.com/Shopify/sarama/pull/1173)). - - Always use latest patch version of Go on CI - ([#1202](https://github.com/Shopify/sarama/pull/1202)). - - Add error codes 61 to 72 - ([#1195](https://github.com/Shopify/sarama/pull/1195)). - -Bug Fixes: - - Fix build without cgo - ([#1182](https://github.com/Shopify/sarama/pull/1182)). - - Fix go vet suggestion in consumer group file - ([#1209](https://github.com/Shopify/sarama/pull/1209)). - - Fix typos in code and comments - ([#1228](https://github.com/Shopify/sarama/pull/1228)). - -## Version 1.19.0 (2018-09-27) - -New Features: - - Implement a higher-level consumer group - ([#1099](https://github.com/Shopify/sarama/pull/1099)). - -Improvements: - - Add support for Go 1.11 - ([#1176](https://github.com/Shopify/sarama/pull/1176)). - -Bug Fixes: - - Fix encoding of `MetadataResponse` with version 2 and higher - ([#1174](https://github.com/Shopify/sarama/pull/1174)). - - Fix race condition in mock async producer - ([#1174](https://github.com/Shopify/sarama/pull/1174)). - -## Version 1.18.0 (2018-09-07) - -New Features: - - Make `Partitioner.RequiresConsistency` vary per-message - ([#1112](https://github.com/Shopify/sarama/pull/1112)). - - Add customizable partitioner - ([#1118](https://github.com/Shopify/sarama/pull/1118)). - - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, - `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` - ([#1055](https://github.com/Shopify/sarama/pull/1055)). - -Improvements: - - Add support for Kafka 2.0.0 - ([#1149](https://github.com/Shopify/sarama/pull/1149)). - - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts - ([#1123](https://github.com/Shopify/sarama/pull/1123)). - - Simpler offset management - ([#1127](https://github.com/Shopify/sarama/pull/1127)). - -Bug Fixes: - - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka - ([#1110](https://github.com/Shopify/sarama/pull/1110)). - - Fix consumer block when response did not contain all the - expected topic/partition blocks - ([#1086](https://github.com/Shopify/sarama/pull/1086)). - - Fix consumer block when response contains only constrol messages - ([#1115](https://github.com/Shopify/sarama/pull/1115)). - - Add timeout config for ClusterAdmin requests - ([#1142](https://github.com/Shopify/sarama/pull/1142)). - - Add version check when producing message with headers - ([#1117](https://github.com/Shopify/sarama/pull/1117)). - - Fix `MetadataRequest` for empty list of topics - ([#1132](https://github.com/Shopify/sarama/pull/1132)). - - Fix producer topic metadata on-demand fetch when topic error happens in metadata response - ([#1125](https://github.com/Shopify/sarama/pull/1125)). - -## Version 1.17.0 (2018-05-30) - -New Features: - - Add support for gzip compression levels - ([#1044](https://github.com/Shopify/sarama/pull/1044)). - - Add support for Metadata request/response pairs versions v1 to v5 - ([#1047](https://github.com/Shopify/sarama/pull/1047), - [#1069](https://github.com/Shopify/sarama/pull/1069)). - - Add versioning to JoinGroup request/response pairs - ([#1098](https://github.com/Shopify/sarama/pull/1098)) - - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs - ([#1065](https://github.com/Shopify/sarama/pull/1065), - [#1096](https://github.com/Shopify/sarama/pull/1096), - [#1027](https://github.com/Shopify/sarama/pull/1027)). - - Add `Controller()` method to Client interface - ([#1063](https://github.com/Shopify/sarama/pull/1063)). - -Improvements: - - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp - ([#1010](https://github.com/Shopify/sarama/pull/1010)). - - Expose missing protocol parts: `msgSet` and `recordBatch` - ([#1049](https://github.com/Shopify/sarama/pull/1049)). - - Add support for v1 DeleteTopics Request - ([#1052](https://github.com/Shopify/sarama/pull/1052)). - - Add support for Go 1.10 - ([#1064](https://github.com/Shopify/sarama/pull/1064)). - - Claim support for Kafka 1.1.0 - ([#1073](https://github.com/Shopify/sarama/pull/1073)). - -Bug Fixes: - - Fix FindCoordinatorResponse.encode to allow nil Coordinator - ([#1050](https://github.com/Shopify/sarama/pull/1050), - [#1051](https://github.com/Shopify/sarama/pull/1051)). - - Clear all metadata when we have the latest topic info - ([#1033](https://github.com/Shopify/sarama/pull/1033)). - - Make `PartitionConsumer.Close` idempotent - ([#1092](https://github.com/Shopify/sarama/pull/1092)). - -## Version 1.16.0 (2018-02-12) - -New Features: - - Add support for the Create/Delete Topics request/response pairs - ([#1007](https://github.com/Shopify/sarama/pull/1007), - [#1008](https://github.com/Shopify/sarama/pull/1008)). - - Add support for the Describe/Create/Delete ACL request/response pairs - ([#1009](https://github.com/Shopify/sarama/pull/1009)). - - Add support for the five transaction-related request/response pairs - ([#1016](https://github.com/Shopify/sarama/pull/1016)). - -Improvements: - - Permit setting version on mock producer responses - ([#999](https://github.com/Shopify/sarama/pull/999)). - - Add `NewMockBrokerListener` helper for testing TLS connections - ([#1019](https://github.com/Shopify/sarama/pull/1019)). - - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB - which results in much higher throughput in most cases - ([#1024](https://github.com/Shopify/sarama/pull/1024)). - - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to - reduce CPU and memory usage when processing many partitions - ([#1028](https://github.com/Shopify/sarama/pull/1028)). - - Assign relative offsets to messages in the producer to save the brokers a - recompression pass - ([#1002](https://github.com/Shopify/sarama/pull/1002), - [#1015](https://github.com/Shopify/sarama/pull/1015)). - -Bug Fixes: - - Fix producing uncompressed batches with the new protocol format - ([#1032](https://github.com/Shopify/sarama/issues/1032)). - - Fix consuming compacted topics with the new protocol format - ([#1005](https://github.com/Shopify/sarama/issues/1005)). - - Fix consuming topics with a mix of protocol formats - ([#1021](https://github.com/Shopify/sarama/issues/1021)). - - Fix consuming when the broker includes multiple batches in a single response - ([#1022](https://github.com/Shopify/sarama/issues/1022)). - - Fix detection of `PartialTrailingMessage` when the partial message was - truncated before the magic value indicating its version - ([#1030](https://github.com/Shopify/sarama/pull/1030)). - - Fix expectation-checking in the mock of `SyncProducer.SendMessages` - ([#1035](https://github.com/Shopify/sarama/pull/1035)). - -## Version 1.15.0 (2017-12-08) - -New Features: - - Claim official support for Kafka 1.0, though it did already work - ([#984](https://github.com/Shopify/sarama/pull/984)). - - Helper methods for Kafka version numbers to/from strings - ([#989](https://github.com/Shopify/sarama/pull/989)). - - Implement CreatePartitions request/response - ([#985](https://github.com/Shopify/sarama/pull/985)). - -Improvements: - - Add error codes 45-60 - ([#986](https://github.com/Shopify/sarama/issues/986)). - -Bug Fixes: - - Fix slow consuming for certain Kafka 0.11/1.0 configurations - ([#982](https://github.com/Shopify/sarama/pull/982)). - - Correctly determine when a FetchResponse contains the new message format - ([#990](https://github.com/Shopify/sarama/pull/990)). - - Fix producing with multiple headers - ([#996](https://github.com/Shopify/sarama/pull/996)). - - Fix handling of truncated record batches - ([#998](https://github.com/Shopify/sarama/pull/998)). - - Fix leaking metrics when closing brokers - ([#991](https://github.com/Shopify/sarama/pull/991)). - -## Version 1.14.0 (2017-11-13) - -New Features: - - Add support for the new Kafka 0.11 record-batch format, including the wire - protocol and the necessary behavioural changes in the producer and consumer. - Transactions and idempotency are not yet supported, but producing and - consuming should work with all the existing bells and whistles (batching, - compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta - of Arista Networks for this work. Part of - ([#901](https://github.com/Shopify/sarama/issues/901)). - -Bug Fixes: - - Fix encoding of ProduceResponse versions in test - ([#970](https://github.com/Shopify/sarama/pull/970)). - - Return partial replicas list when we have it - ([#975](https://github.com/Shopify/sarama/pull/975)). - -## Version 1.13.0 (2017-10-04) - -New Features: - - Support for FetchRequest version 3 - ([#905](https://github.com/Shopify/sarama/pull/905)). - - Permit setting version on mock FetchResponses - ([#939](https://github.com/Shopify/sarama/pull/939)). - - Add a configuration option to support storing only minimal metadata for - extremely large clusters - ([#937](https://github.com/Shopify/sarama/pull/937)). - - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets - ([#932](https://github.com/Shopify/sarama/pull/932)). - -Improvements: - - Provide the block-level timestamp when consuming compressed messages - ([#885](https://github.com/Shopify/sarama/issues/885)). - - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned - by the broker, which can be meaningful - ([#930](https://github.com/Shopify/sarama/pull/930)). - - Use a `Ticker` to reduce consumer timer overhead at the cost of higher - variance in the actual timeout - ([#933](https://github.com/Shopify/sarama/pull/933)). - -Bug Fixes: - - Gracefully handle messages with negative timestamps - ([#907](https://github.com/Shopify/sarama/pull/907)). - - Raise a proper error when encountering an unknown message version - ([#940](https://github.com/Shopify/sarama/pull/940)). - -## Version 1.12.0 (2017-05-08) - -New Features: - - Added support for the `ApiVersions` request and response pair, and Kafka - version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note - that you still need to specify the Kafka version in the Sarama configuration - for the time being. - - Added a `Brokers` method to the Client which returns the complete set of - active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). - - Added an `InSyncReplicas` method to the Client which returns the set of all - in-sync broker IDs for the given partition, now that the Kafka versions for - which this was misleading are no longer in our supported set - ([#872](https://github.com/Shopify/sarama/pull/872)). - - Added a `NewCustomHashPartitioner` method which allows constructing a hash - partitioner with a custom hash method in case the default (FNV-1a) is not - suitable - ([#837](https://github.com/Shopify/sarama/pull/837), - [#841](https://github.com/Shopify/sarama/pull/841)). - -Improvements: - - Recognize more Kafka error codes - ([#859](https://github.com/Shopify/sarama/pull/859)). - -Bug Fixes: - - Fix an issue where decoding a malformed FetchRequest would not return the - correct error ([#818](https://github.com/Shopify/sarama/pull/818)). - - Respect ordering of group protocols in JoinGroupRequests. This fix is - transparent if you're using the `AddGroupProtocol` or - `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from - the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` - ([#812](https://github.com/Shopify/sarama/issues/812)). - - Fix an alignment-related issue with atomics on 32-bit architectures - ([#859](https://github.com/Shopify/sarama/pull/859)). - -## Version 1.11.0 (2016-12-20) - -_Important:_ As of Sarama 1.11 it is necessary to set the config value of -`Producer.Return.Successes` to true in order to use the SyncProducer. Previous -versions would silently override this value when instantiating a SyncProducer -which led to unexpected values and data races. - -New Features: - - Metrics! Thanks to Sébastien Launay for all his work on this feature - ([#701](https://github.com/Shopify/sarama/pull/701), - [#746](https://github.com/Shopify/sarama/pull/746), - [#766](https://github.com/Shopify/sarama/pull/766)). - - Add support for LZ4 compression - ([#786](https://github.com/Shopify/sarama/pull/786)). - - Add support for ListOffsetRequest v1 and Kafka 0.10.1 - ([#775](https://github.com/Shopify/sarama/pull/775)). - - Added a `HighWaterMarks` method to the Consumer which aggregates the - `HighWaterMarkOffset` values of its child topic/partitions - ([#769](https://github.com/Shopify/sarama/pull/769)). - -Bug Fixes: - - Fixed producing when using timestamps, compression and Kafka 0.10 - ([#759](https://github.com/Shopify/sarama/pull/759)). - - Added missing decoder methods to DescribeGroups response - ([#756](https://github.com/Shopify/sarama/pull/756)). - - Fix producer shutdown when `Return.Errors` is disabled - ([#787](https://github.com/Shopify/sarama/pull/787)). - - Don't mutate configuration in SyncProducer - ([#790](https://github.com/Shopify/sarama/pull/790)). - - Fix crash on SASL initialization failure - ([#795](https://github.com/Shopify/sarama/pull/795)). - -## Version 1.10.1 (2016-08-30) - -Bug Fixes: - - Fix the documentation for `HashPartitioner` which was incorrect - ([#717](https://github.com/Shopify/sarama/pull/717)). - - Permit client creation even when it is limited by ACLs - ([#722](https://github.com/Shopify/sarama/pull/722)). - - Several fixes to the consumer timer optimization code, regressions introduced - in v1.10.0. Go's timers are finicky - ([#730](https://github.com/Shopify/sarama/pull/730), - [#733](https://github.com/Shopify/sarama/pull/733), - [#734](https://github.com/Shopify/sarama/pull/734)). - - Handle consuming compressed relative offsets with Kafka 0.10 - ([#735](https://github.com/Shopify/sarama/pull/735)). - -## Version 1.10.0 (2016-08-02) - -_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of -Kafka you are running against (via the `config.Version` value) in order to use -features that may not be compatible with old Kafka versions. If you don't -specify this value it will default to 0.8.2 (the minimum supported), and trying -to use more recent features (like the offset manager) will fail with an error. - -_Also:_ The offset-manager's behaviour has been changed to match the upstream -java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and -[#713](https://github.com/Shopify/sarama/pull/713)). If you use the -offset-manager, please ensure that you are committing one *greater* than the -last consumed message offset or else you may end up consuming duplicate -messages. - -New Features: - - Support for Kafka 0.10 - ([#672](https://github.com/Shopify/sarama/pull/672), - [#678](https://github.com/Shopify/sarama/pull/678), - [#681](https://github.com/Shopify/sarama/pull/681), and others). - - Support for configuring the target Kafka version - ([#676](https://github.com/Shopify/sarama/pull/676)). - - Batch producing support in the SyncProducer - ([#677](https://github.com/Shopify/sarama/pull/677)). - - Extend producer mock to allow setting expectations on message contents - ([#667](https://github.com/Shopify/sarama/pull/667)). - -Improvements: - - Support `nil` compressed messages for deleting in compacted topics - ([#634](https://github.com/Shopify/sarama/pull/634)). - - Pre-allocate decoding errors, greatly reducing heap usage and GC time against - misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). - - Re-use consumer expiry timers, removing one allocation per consumed message - ([#707](https://github.com/Shopify/sarama/pull/707)). - -Bug Fixes: - - Actually default the client ID to "sarama" like we say we do - ([#664](https://github.com/Shopify/sarama/pull/664)). - - Fix a rare issue where `Client.Leader` could return the wrong error - ([#685](https://github.com/Shopify/sarama/pull/685)). - - Fix a possible tight loop in the consumer - ([#693](https://github.com/Shopify/sarama/pull/693)). - - Match upstream's offset-tracking behaviour - ([#705](https://github.com/Shopify/sarama/pull/705)). - - Report UnknownTopicOrPartition errors from the offset manager - ([#706](https://github.com/Shopify/sarama/pull/706)). - - Fix possible negative partition value from the HashPartitioner - ([#709](https://github.com/Shopify/sarama/pull/709)). - -## Version 1.9.0 (2016-05-16) - -New Features: - - Add support for custom offset manager retention durations - ([#602](https://github.com/Shopify/sarama/pull/602)). - - Publish low-level mocks to enable testing of third-party producer/consumer - implementations ([#570](https://github.com/Shopify/sarama/pull/570)). - - Declare support for Golang 1.6 - ([#611](https://github.com/Shopify/sarama/pull/611)). - - Support for SASL plain-text auth - ([#648](https://github.com/Shopify/sarama/pull/648)). - -Improvements: - - Simplified broker locking scheme slightly - ([#604](https://github.com/Shopify/sarama/pull/604)). - - Documentation cleanup - ([#605](https://github.com/Shopify/sarama/pull/605), - [#621](https://github.com/Shopify/sarama/pull/621), - [#654](https://github.com/Shopify/sarama/pull/654)). - -Bug Fixes: - - Fix race condition shutting down the OffsetManager - ([#658](https://github.com/Shopify/sarama/pull/658)). - -## Version 1.8.0 (2016-02-01) - -New Features: - - Full support for Kafka 0.9: - - All protocol messages and fields - ([#586](https://github.com/Shopify/sarama/pull/586), - [#588](https://github.com/Shopify/sarama/pull/588), - [#590](https://github.com/Shopify/sarama/pull/590)). - - Verified that TLS support works - ([#581](https://github.com/Shopify/sarama/pull/581)). - - Fixed the OffsetManager compatibility - ([#585](https://github.com/Shopify/sarama/pull/585)). - -Improvements: - - Optimize for fewer system calls when reading from the network - ([#584](https://github.com/Shopify/sarama/pull/584)). - - Automatically retry `InvalidMessage` errors to match upstream behaviour - ([#589](https://github.com/Shopify/sarama/pull/589)). - -## Version 1.7.0 (2015-12-11) - -New Features: - - Preliminary support for Kafka 0.9 - ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several - caveats: - - Protocol-layer support is mostly in place - ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 - renamed some messages and fields, which we did not in order to preserve API - compatibility. - - The producer and consumer work against 0.9, but the offset manager does - not ([#573](https://github.com/Shopify/sarama/pull/573)). - - TLS support may or may not work - ([#581](https://github.com/Shopify/sarama/pull/581)). - -Improvements: - - Don't wait for request timeouts on dead brokers, greatly speeding recovery - when the TCP connection is left hanging - ([#548](https://github.com/Shopify/sarama/pull/548)). - - Refactored part of the producer. The new version provides a much more elegant - solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also - slightly more efficient, and much more precise in calculating batch sizes - when compression is used - ([#549](https://github.com/Shopify/sarama/pull/549), - [#550](https://github.com/Shopify/sarama/pull/550), - [#551](https://github.com/Shopify/sarama/pull/551)). - -Bug Fixes: - - Fix race condition in consumer test mock - ([#553](https://github.com/Shopify/sarama/pull/553)). - -## Version 1.6.1 (2015-09-25) - -Bug Fixes: - - Fix panic that could occur if a user-supplied message value failed to encode - ([#449](https://github.com/Shopify/sarama/pull/449)). - -## Version 1.6.0 (2015-09-04) - -New Features: - - Implementation of a consumer offset manager using the APIs introduced in - Kafka 0.8.2. The API is designed mainly for integration into a future - high-level consumer, not for direct use, although it is *possible* to use it - directly. - ([#461](https://github.com/Shopify/sarama/pull/461)). - -Improvements: - - CRC32 calculation is much faster on machines with SSE4.2 instructions, - removing a major hotspot from most profiles - ([#255](https://github.com/Shopify/sarama/pull/255)). - -Bug Fixes: - - Make protocol decoding more robust against some malformed packets generated - by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), - [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways - ([#528](https://github.com/Shopify/sarama/pull/528)). - - Fix a potential race condition panic in the consumer on shutdown - ([#529](https://github.com/Shopify/sarama/pull/529)). - -## Version 1.5.0 (2015-08-17) - -New Features: - - TLS-encrypted network connections are now supported. This feature is subject - to change when Kafka releases built-in TLS support, but for now this is - enough to work with TLS-terminating proxies - ([#154](https://github.com/Shopify/sarama/pull/154)). - -Improvements: - - The consumer will not block if a single partition is not drained by the user; - all other partitions will continue to consume normally - ([#485](https://github.com/Shopify/sarama/pull/485)). - - Formatting of error strings has been much improved - ([#495](https://github.com/Shopify/sarama/pull/495)). - - Internal refactoring of the producer for code cleanliness and to enable - future work ([#300](https://github.com/Shopify/sarama/pull/300)). - -Bug Fixes: - - Fix a potential deadlock in the consumer on shutdown - ([#475](https://github.com/Shopify/sarama/pull/475)). - -## Version 1.4.3 (2015-07-21) - -Bug Fixes: - - Don't include the partitioner in the producer's "fetch partitions" - circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). - - Don't retry messages until the broker is closed when abandoning a broker in - the producer ([#468](https://github.com/Shopify/sarama/pull/468)). - - Update the import path for snappy-go, it has moved again and the API has - changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). - -## Version 1.4.2 (2015-05-27) - -Bug Fixes: - - Update the import path for snappy-go, it has moved from google code to github - ([#456](https://github.com/Shopify/sarama/pull/456)). - -## Version 1.4.1 (2015-05-25) - -Improvements: - - Optimizations when decoding snappy messages, thanks to John Potocny - ([#446](https://github.com/Shopify/sarama/pull/446)). - -Bug Fixes: - - Fix hypothetical race conditions on producer shutdown - ([#450](https://github.com/Shopify/sarama/pull/450), - [#451](https://github.com/Shopify/sarama/pull/451)). - -## Version 1.4.0 (2015-05-01) - -New Features: - - The consumer now implements `Topics()` and `Partitions()` methods to enable - users to dynamically choose what topics/partitions to consume without - instantiating a full client - ([#431](https://github.com/Shopify/sarama/pull/431)). - - The partition-consumer now exposes the high water mark offset value returned - by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). - - Added a `kafka-console-consumer` tool capable of handling multiple - partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` - ([#439](https://github.com/Shopify/sarama/pull/439), - [#442](https://github.com/Shopify/sarama/pull/442)). - -Improvements: - - The producer's logging during retry scenarios is more consistent, more - useful, and slightly less verbose - ([#429](https://github.com/Shopify/sarama/pull/429)). - - The client now shuffles its initial list of seed brokers in order to prevent - thundering herd on the first broker in the list - ([#441](https://github.com/Shopify/sarama/pull/441)). - -Bug Fixes: - - The producer now correctly manages its state if retries occur when it is - shutting down, fixing several instances of confusing behaviour and at least - one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). - - The consumer now handles messages for different partitions asynchronously, - making it much more resilient to specific user code ordering - ([#325](https://github.com/Shopify/sarama/pull/325)). - -## Version 1.3.0 (2015-04-16) - -New Features: - - The client now tracks consumer group coordinators using - ConsumerMetadataRequests similar to how it tracks partition leadership using - regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). - This adds two methods to the client API: - - `Coordinator(consumerGroup string) (*Broker, error)` - - `RefreshCoordinator(consumerGroup string) error` - -Improvements: - - ConsumerMetadataResponses now automatically create a Broker object out of the - ID/address/port combination for the Coordinator; accessing the fields - individually has been deprecated - ([#413](https://github.com/Shopify/sarama/pull/413)). - - Much improved handling of `OffsetOutOfRange` errors in the consumer. - Consumers will fail to start if the provided offset is out of range - ([#418](https://github.com/Shopify/sarama/pull/418)) - and they will automatically shut down if the offset falls out of range - ([#424](https://github.com/Shopify/sarama/pull/424)). - - Small performance improvement in encoding and decoding protocol messages - ([#427](https://github.com/Shopify/sarama/pull/427)). - -Bug Fixes: - - Fix a rare race condition in the client's background metadata refresher if - it happens to be activated while the client is being closed - ([#422](https://github.com/Shopify/sarama/pull/422)). - -## Version 1.2.0 (2015-04-07) - -Improvements: - - The producer's behaviour when `Flush.Frequency` is set is now more intuitive - ([#389](https://github.com/Shopify/sarama/pull/389)). - - The producer is now somewhat more memory-efficient during and after retrying - messages due to an improved queue implementation - ([#396](https://github.com/Shopify/sarama/pull/396)). - - The consumer produces much more useful logging output when leadership - changes ([#385](https://github.com/Shopify/sarama/pull/385)). - - The client's `GetOffset` method will now automatically refresh metadata and - retry once in the event of stale information or similar - ([#394](https://github.com/Shopify/sarama/pull/394)). - - Broker connections now have support for using TCP keepalives - ([#407](https://github.com/Shopify/sarama/issues/407)). - -Bug Fixes: - - The OffsetCommitRequest message now correctly implements all three possible - API versions ([#390](https://github.com/Shopify/sarama/pull/390), - [#400](https://github.com/Shopify/sarama/pull/400)). - -## Version 1.1.0 (2015-03-20) - -Improvements: - - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly - broken topics don't choke throughput - ([#373](https://github.com/Shopify/sarama/pull/373)). - -Bug Fixes: - - Fix the producer's internal reference counting in certain unusual scenarios - ([#367](https://github.com/Shopify/sarama/pull/367)). - - Fix the consumer's internal reference counting in certain unusual scenarios - ([#369](https://github.com/Shopify/sarama/pull/369)). - - Fix a condition where the producer's internal control messages could have - gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). - - Fix an issue where invalid partition lists would be cached when asking for - metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). - - -## Version 1.0.0 (2015-03-17) - -Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: - -- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. -- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. -- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. -- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. -- All the configuration values have been unified in the `Config` struct. -- Much improved test suite. diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go deleted file mode 100644 index aa7fb7498..000000000 --- a/vendor/github.com/Shopify/sarama/decompress.go +++ /dev/null @@ -1,61 +0,0 @@ -package sarama - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "sync" - - snappy "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4/v4" -) - -var ( - lz4ReaderPool = sync.Pool{ - New: func() interface{} { - return lz4.NewReader(nil) - }, - } - - gzipReaderPool sync.Pool -) - -func decompress(cc CompressionCodec, data []byte) ([]byte, error) { - switch cc { - case CompressionNone: - return data, nil - case CompressionGZIP: - var err error - reader, ok := gzipReaderPool.Get().(*gzip.Reader) - if !ok { - reader, err = gzip.NewReader(bytes.NewReader(data)) - } else { - err = reader.Reset(bytes.NewReader(data)) - } - - if err != nil { - return nil, err - } - - defer gzipReaderPool.Put(reader) - - return io.ReadAll(reader) - case CompressionSnappy: - return snappy.Decode(data) - case CompressionLZ4: - reader, ok := lz4ReaderPool.Get().(*lz4.Reader) - if !ok { - reader = lz4.NewReader(bytes.NewReader(data)) - } else { - reader.Reset(bytes.NewReader(data)) - } - defer lz4ReaderPool.Put(reader) - - return io.ReadAll(reader) - case CompressionZSTD: - return zstdDecompress(ZstdDecoderParams{}, nil, data) - default: - return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} - } -} diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index a2bf06e94..7a008a4d2 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@v0.7.2 + - go install mvdan.cc/garble@v0.9.3 builds: - diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index d73fb86e4..f710a34ec 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,47 @@ This package provides various compression algorithms. # changelog +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + * Dec 11, 2022 (v1.15.13) * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 @@ -587,6 +628,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv * [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. * [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. # license diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 000000000..23a43387b --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klaupost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 6f341914c..dac97e58a 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error { c1.encodeZero(tt[src[ip-2]]) ip -= 2 } + src = src[:ip] // Main compression loop. switch { case !s.zeroBits && s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush. // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case !s.zeroBits: // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) s.bw.flush32() c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } default: - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) s.bw.flush32() c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } } @@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) { for _, v := range in { s.count[v]++ } - m := uint32(0) + m, symlen := uint32(0), s.symbolLen for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + symlen = uint16(i) + 1 } + s.symbolLen = symlen return int(m) } diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index 926f5f153..cc05d0f7e 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error { // If the buffer is over-read an error is returned. func (s *Scratch) decompress() error { br := &s.bits - br.init(s.br.unread()) + if err := br.init(s.br.unread()); err != nil { + return err + } var s1, s2 decoder // Initialize and decode first state and symbol. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index 504a7be9d..e36d9742f 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index ec71f7a34..b4d7164e3 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -13,14 +13,6 @@ type bitWriter struct { out []byte } -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -60,6 +52,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index d9223a91e..4ee4fa18d 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { tmp := src[n : n+4] // tmp should be len 4 bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) } } else { for ; n >= 0; n -= 4 { @@ -484,34 +483,35 @@ func (s *Scratch) buildCTable() error { // Different from reference implementation. huffNode0 := s.nodes[0 : huffNodesLen+1] - for huffNode[nonNullRank].count == 0 { + for huffNode[nonNullRank].count() == 0 { nonNullRank-- } lowS := int16(nonNullRank) nodeRoot := nodeNb + lowS - 1 lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) nodeNb++ lowS -= 2 for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 + huffNode[n].setCount(1 << 30) } // fake entry, strong barrier - huffNode0[0].count = 1 << 31 + huffNode0[0].setCount(1 << 31) // create parents for nodeNb <= nodeRoot { var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n1 = lowS lowS-- } else { n1 = lowN lowN++ } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n2 = lowS lowS-- } else { @@ -519,18 +519,19 @@ func (s *Scratch) buildCTable() error { lowN++ } - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) nodeNb++ } // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 + huffNode[nodeRoot].setNbBits(0) for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } s.actualTableLog = s.setMaxHeight(int(nonNullRank)) maxNbBits := s.actualTableLog @@ -542,7 +543,7 @@ func (s *Scratch) buildCTable() error { var nbPerRank [tableLogMax + 1]uint16 var valPerRank [16]uint16 for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ + nbPerRank[v.nbBits()]++ } // determine stating value per rank { @@ -557,7 +558,7 @@ func (s *Scratch) buildCTable() error { // push nbBits per symbol, symbol order for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits + s.cTable[v.symbol()].nBits = v.nbBits() } // assign value within rank, symbol order @@ -603,12 +604,12 @@ func (s *Scratch) huffSort() { pos := rank[r].current rank[r].current++ prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { + for pos > rank[r].base && c > prev.count() { nodes[pos&huffNodesMask] = prev pos-- prev = nodes[(pos-1)&huffNodesMask] } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) } } @@ -617,7 +618,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { huffNode := s.nodes[1 : huffNodesLen+1] //huffNode = huffNode[: huffNodesLen] - largestBits := huffNode[lastNonNull].nbBits + largestBits := huffNode[lastNonNull].nbBits() // early exit : no elt > maxNbBits if largestBits <= maxNbBits { @@ -627,14 +628,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { baseCost := int(1) << (largestBits - maxNbBits) n := uint32(lastNonNull) - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) n-- } // n stops at huffNode[n].nbBits <= maxNbBits - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } // n end at index of smallest symbol using < maxNbBits @@ -655,10 +656,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { { currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { + if huffNode[pos].nbBits() >= currentNbBits { continue } - currentNbBits = huffNode[pos].nbBits // < maxNbBits + currentNbBits = huffNode[pos].nbBits() // < maxNbBits rankLast[maxNbBits-currentNbBits] = uint32(pos) } } @@ -675,8 +676,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { if lowPos == noSymbol { break } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() if highTotal <= lowTotal { break } @@ -692,13 +693,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // this rank is no longer empty rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] } - huffNode[rankLast[nBitsToDecrease]].nbBits++ + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) if rankLast[nBitsToDecrease] == 0 { /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol } else { rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ } } @@ -706,15 +708,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { for totalCost < 0 { /* Sometimes, cost correction overshoot */ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } - huffNode[n+1].nbBits-- + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) rankLast[1] = n + 1 totalCost++ continue } - huffNode[rankLast[1]+1].nbBits-- + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) rankLast[1]++ totalCost++ } @@ -722,9 +724,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { return maxNbBits } -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 } + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 42a237eac..54bd08b25 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { b, err := fse.Decompress(in[:iSize], s.fse) s.fse.Out = nil if err != nil { - return s, nil, err + return s, nil, fmt.Errorf("fse decompress returned: %w", err) } if len(b) > 255 { return s, nil, errors.New("corrupt input: output table too large") @@ -253,7 +253,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { switch d.actualTableLog { case 8: - const shift = 8 - 8 + const shift = 0 for br.off >= 4 { br.fillFast() v := dt[uint8(br.value>>(56+shift))] diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s index 8d2187a2c..c4c7ab2d1 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -4,360 +4,349 @@ // func decompress4x_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), AX MOVBQZX 8(AX), DI - MOVQ 16(AX), SI - MOVQ 48(AX), BX - MOVQ 24(AX), R9 - MOVQ 32(AX), R10 - MOVQ (AX), R11 + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 // Main loop main_loop: - MOVQ SI, R8 - CMPQ R8, BX + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), AX - SUBQ $0x20, R13 + MOVQ 24(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ (R11), R14 + MOVQ (R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 24(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), AX - SUBQ $0x20, R13 + MOVQ 72(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 48(R11), R14 + MOVQ 48(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 72(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), AX - SUBQ $0x20, R13 + MOVQ 120(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 96(R11), R14 + MOVQ 96(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 120(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), AX - SUBQ $0x20, R13 + MOVQ 168(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 144(R11), R14 + MOVQ 144(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 168(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x02, SI + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX TESTB DL, DL JZ main_loop MOVQ ctx+0(FP), AX - SUBQ 16(AX), SI - SHLQ $0x02, SI - MOVQ SI, 40(AX) + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) RET // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), CX MOVBQZX 8(CX), DI MOVQ 16(CX), BX MOVQ 48(CX), SI - MOVQ 24(CX), R9 - MOVQ 32(CX), R10 - MOVQ (CX), R11 + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 // Main loop main_loop: - MOVQ BX, R8 - CMPQ R8, SI + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ (R11), R15 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 24(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -365,88 +354,86 @@ skip_fill0: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 48(R11), R15 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 72(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -454,88 +441,86 @@ skip_fill1: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 96(R11), R15 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 120(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -543,88 +528,86 @@ skip_fill2: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 144(R11), R15 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 168(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -632,11 +615,12 @@ skip_fill3: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) + MOVQ R11, 176(R10) + MOVB R12, 184(R10) ADDQ $0x04, BX TESTB DL, DL JZ main_loop @@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -667,7 +651,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -744,7 +728,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) @@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -772,7 +756,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -839,7 +823,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 298c4f8e9..2aa6a95a0 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -87,22 +87,32 @@ func emitCopy(dst []byte, offset, length int) int { return i + 2 } -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - func hash(u, shift uint32) uint32 { return (u * 0x1e35a7bd) >> shift } +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It // assumes that the varint-encoded length of the decompressed bytes has already // been written. diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 65b38abed..bdd49c8b2 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -304,7 +304,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 6b9929ddf..9f17ce601 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "errors" "fmt" + "hash/crc32" "io" "os" "path/filepath" @@ -192,16 +193,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.dataStorage) < cSize { + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. if b.lowMem || cSize > maxCompressedBlockSize { b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debugDecoder { @@ -210,6 +209,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return err } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } return nil } @@ -441,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err } } var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) @@ -587,7 +592,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { } seq.fse.setRLE(symb) if debugDecoder { - printf("RLE set to %+v, code: %v", symb, v) + printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 12e8f6f0b..fd4a36f73 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -473,7 +473,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { return b.encodeLits(b.literals, rawAllLits) } // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 5) + saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { if org == nil { return errIncompressible @@ -779,10 +779,13 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { } b.output = wr.out + // Maybe even add a bigger margin. if len(b.output)-3-bhOffset >= b.size { - // Maybe even add a bigger margin. + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() b.litEnc.Reuse = huff0.ReusePolicyNone - return errIncompressible + return nil } // Size is output minus block header. diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 176788f25..55a388553 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { - return 0, nil + return 0, io.ErrUnexpectedEOF } r := bb[0] *b = bb[1:] @@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { } func (r *readerWrapper) readByte() (byte, error) { - n2, err := r.r.Read(r.tmp[:1]) + n2, err := io.ReadFull(r.r, r.tmp[:1]) if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 30459cd3f..f04aaa21e 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -40,8 +40,7 @@ type Decoder struct { frame *frameDec // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict + dicts map[uint32]*dict // streamWg is the waitgroup for all streams streamWg sync.WaitGroup @@ -103,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) for _, dc := range d.o.dicts { d.dicts[dc.id] = dc } @@ -341,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } return dst, err } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(&dict) + if err = d.setDict(frame); err != nil { + return nil, err } if frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -463,12 +455,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { } if len(next.b) > 0 { - n, err := d.current.crc.Write(next.b) - if err == nil { - if n != len(next.b) { - d.current.err = io.ErrShortWrite - } - } + d.current.crc.Write(next.b) } if next.err == nil && next.d != nil && next.d.hasCRC { got := uint32(d.current.crc.Sum64()) @@ -495,18 +482,12 @@ func (d *Decoder) nextBlockSync() (ok bool) { if !d.syncStream.inFrame { d.frame.history.reset() d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } if d.current.err != nil { return false } - if d.frame.DictionaryID != nil { - dict, ok := d.dicts[*d.frame.DictionaryID] - if !ok { - d.current.err = ErrUnknownDictionary - return false - } else { - d.frame.history.setDict(&dict) - } - } if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { d.current.err = ErrDecoderSizeExceeded return false @@ -865,13 +846,8 @@ decodeStream: if debugDecoder && err != nil { println("Frame decoder returned", err) } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } + if err == nil { + err = d.setDict(frame) } if err == nil && d.frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -953,3 +929,20 @@ decodeStream: hist.reset() d.frame.history.b = frameHistCache } + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index f42448e69..774c5f00f 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,6 +6,8 @@ package zstd import ( "errors" + "fmt" + "math/bits" "runtime" ) @@ -18,7 +20,7 @@ type decoderOptions struct { concurrent int maxDecodedSize uint64 maxWindowSize uint64 - dicts []dict + dicts []*dict ignoreChecksum bool limitToCap bool decodeBufsBelow int @@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption { } // WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { for _, b := range dicts { @@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption { if err != nil { return err } - o.dicts = append(o.dicts, *d) + o.dicts = append(o.dicts, d) } return nil } } +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + // WithDecoderMaxWindow allows to set a maximum window size for decodes. // This allows rejecting packets that will cause big memory usage. // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index b2725f77b..ca0951452 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -21,6 +21,9 @@ type dict struct { const dictMagic = "\x37\xa4\x30\xec" +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + // ID returns the dictionary id or 0 if d is nil. func (d *dict) ID() uint32 { if d == nil { @@ -29,14 +32,38 @@ func (d *dict) ID() uint32 { return d.id } -// DictContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) DictContentSize() int { +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { if d == nil { return 0 } return len(d.content) } +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + // Load a dictionary as described in // https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format func loadDict(b []byte) (*dict, error) { @@ -61,7 +88,7 @@ func loadDict(b []byte) (*dict, error) { var err error d.litEnc, b, err = huff0.ReadTable(b[8:], nil) if err != nil { - return nil, err + return nil, fmt.Errorf("loading literal table: %w", err) } d.litEnc.Reuse = huff0.ReusePolicyMust @@ -119,3 +146,16 @@ func loadDict(b []byte) (*dict, error) { return &d, nil } + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index bfb2e146c..e008b9929 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -149,7 +149,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { if singleBlock { e.lowMem = true } - e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) e.lowMem = low } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 830f5ba74..9819d4145 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -32,10 +32,9 @@ type match struct { length int32 rep int32 est int32 - _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes } -const highScore = 25000 +const highScore = maxMatchLen * 8 // estBits will estimate output bits from predefined tables. func (m *match) estBits(bitsPerByte int32) { @@ -160,7 +159,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { // nextEmit is where in src the next emitLiteral should start from. nextEmit := s - cv := load6432(src, s) // Relative offsets offset1 := int32(blk.recentOffsets[0]) @@ -174,7 +172,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - _ = addLiterals if debugEncoder { println("recent offsets:", blk.recentOffsets) @@ -189,53 +186,96 @@ encodeLoop: panic("offset0 was 0") } - bestOf := func(a, b *match) *match { - if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 { - return a - } - return b - } - const goodEnough = 100 + const goodEnough = 250 + + cv := load6432(src, s) nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] - matchAt := func(offset int32, s int32, first uint32, rep int32) match { + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { if s-offset >= e.maxMatchOff || load3232(src, offset) != first { - return match{s: s, est: highScore} + return } if debugAsserts { + if offset <= 0 { + panic(offset) + } if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) } } - m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} - m.estBits(bitsPerByte) - return m + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if rep < 0 { + // Extend candidate match backwards as far as possible. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } } - m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) - m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) - m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) - m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) - best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) if canRepeat && best.length < goodEnough { - cv32 := uint32(cv >> 8) - spp := s + 1 - m1 := matchAt(spp-offset1, spp, cv32, 1) - m2 := matchAt(spp-offset2, spp, cv32, 2) - m3 := matchAt(spp-offset3, spp, cv32, 3) - best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) - if best.length > 0 { - cv32 = uint32(cv >> 24) - spp += 2 - m1 := matchAt(spp-offset1, spp, cv32, 1) - m2 := matchAt(spp-offset2, spp, cv32, 2) - m3 := matchAt(spp-offset3, spp, cv32, 3) - best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } } } // Load next and check... @@ -250,47 +290,45 @@ encodeLoop: if s >= sLimit { break encodeLoop } - cv = load6432(src, s) continue } - s++ candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s) - cv2 := load6432(src, s+1) + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] // Short at s+1 - m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) // Long at s+1, s+2 - m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) - m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) - m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) - m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) - best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5)) + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) if false { // Short at s+3. // Too often worse... - m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) - best = bestOf(best, &m) + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) } - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - // Start check at a fixed offset to allow for a few mismatches. - // For this compression level 2 yields the best results. - const skipBeginning = 2 - if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { - m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - bestEnd := bestOf(best, &m) - if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { - m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - bestEnd = bestOf(bestEnd, &m) + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } } - best = bestEnd } } } @@ -303,51 +341,34 @@ encodeLoop: // We have a match, we can store the forward value if best.rep > 0 { - s = best.s var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := best.s - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - repIndex := best.offset - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ + if debugAsserts && s <= nextEmit { + panic("s <= nextEmit") } - addLiterals(&seq, start) + addLiterals(&seq, best.s) - // rep 0 - seq.offset = uint32(best.rep) + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) if debugSequences { println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - // Index match start+1 (long) -> s - 1 - index0 := s + // Index old s + 1 -> s - 1 + index0 := s + 1 s = best.s + best.length nextEmit = s if s >= sLimit { if debugEncoder { println("repeat ended", s, best.length) - } break encodeLoop } // Index skipped... off := index0 + e.cur - for index0 < s-1 { + for index0 < s { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -357,17 +378,19 @@ encodeLoop: index0++ } switch best.rep { - case 2: + case 2, 4 | 1: offset1, offset2 = offset2, offset1 - case 3: + case 3, 4 | 2: offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 } - cv = load6432(src, s) continue } // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. + index0 := s + 1 s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 @@ -380,22 +403,9 @@ encodeLoop: panic("invalid offset") } - // Extend the n-byte match as long as possible. - l := best.length - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - // Write our sequence var seq seq + l := best.length seq.litLen = uint32(s - nextEmit) seq.matchLen = uint32(l - zstdMinMatch) if seq.litLen > 0 { @@ -412,10 +422,8 @@ encodeLoop: break encodeLoop } - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - // every entry - for index0 < s-1 { + // Index old s + 1 -> s - 1 + for index0 < s { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -424,50 +432,6 @@ encodeLoop: e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} index0++ } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } } if int(nextEmit) < len(src) { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 315b1a8f2..cbc626eec 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -133,8 +133,7 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. @@ -645,8 +644,7 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 65c6c36dc..4de0aed0d 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -277,23 +277,9 @@ func (e *Encoder) nextBlock(final bool) error { s.eofWritten = true } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.err = err - return err + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err } _, s.err = s.w.Write(blk.output) s.nWritten += int64(len(blk.output)) @@ -343,22 +329,8 @@ func (e *Encoder) nextBlock(final bool) error { } s.wWg.Done() }() - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.writeErr = err + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { return } _, s.writeErr = s.w.Write(blk.output) @@ -568,25 +540,15 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If we got the exact same number of literals as input, // assume the literals cannot be compressed. - err := errIncompressible oldout := blk.output - if len(blk.literals) != len(src) || len(src) != e.o.blockSize { - // Output directly to dst - blk.output = dst - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } + // Output directly to dst + blk.output = dst - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, src) - case nil: - dst = blk.output - default: + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { panic(err) } + dst = blk.output blk.output = oldout } else { enc.Reset(e.o.dict, false) @@ -605,25 +567,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(src) == 0 { blk.last = true } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, todo) - blk.popOffsets() - case nil: - dst = append(dst, blk.output...) - default: + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { panic(err) } + dst = append(dst, blk.output...) blk.reset(nil) } } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 6015f498a..faaf81921 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math" + "math/bits" "runtime" "strings" ) @@ -38,7 +39,7 @@ func (o *encoderOptions) setDefault() { blockSize: maxCompressedBlockSize, windowSize: 8 << 20, level: SpeedDefault, - allLitEntropy: true, + allLitEntropy: false, lowMem: false, } } @@ -128,7 +129,7 @@ func WithEncoderPadding(n int) EOption { } // No need to waste our time. if n == 1 { - o.pad = 0 + n = 0 } if n > 1<<30 { return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") @@ -237,7 +238,7 @@ func WithEncoderLevel(l EncoderLevel) EOption { } } if !o.customALEntropy { - o.allLitEntropy = l > SpeedFastest + o.allLitEntropy = l > SpeedDefault } return nil @@ -305,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption { } // WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// // The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { return func(o *encoderOptions) error { d, err := loadDict(dict) @@ -316,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption { return nil } } + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 65984bf07..53e160f7e 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -29,7 +29,7 @@ type frameDec struct { FrameContentSize uint64 - DictionaryID *uint32 + DictionaryID uint32 HasCheckSum bool SingleSegment bool } @@ -73,20 +73,20 @@ func (d *frameDec) reset(br byteBuffer) error { switch err { case io.EOF, io.ErrUnexpectedEOF: return io.EOF - default: - return err case nil: signature[0] = b[0] + default: + return err } // Read the rest, don't allow io.ErrUnexpectedEOF b, err = br.readSmall(3) switch err { case io.EOF: return io.EOF - default: - return err case nil: copy(signature[1:], b) + default: + return err } if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { @@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error { // Read Dictionary_ID // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil + d.DictionaryID = 0 if size := fhd & 3; size != 0 { if size == 3 { size = 4 @@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error { if debugDecoder { println("Dict size", size, "ID:", id) } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } + d.DictionaryID = id } // Read Frame_Content_Size @@ -297,13 +293,9 @@ func (d *frameDec) next(block *blockDec) error { return nil } -// checkCRC will check the checksum if the frame has one. +// checkCRC will check the checksum, assuming the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { - if !d.HasCheckSum { - return nil - } - // We can overwrite upper tmp now buf, err := d.rawInput.readSmall(4) if err != nil { @@ -311,10 +303,6 @@ func (d *frameDec) checkCRC() error { return err } - if d.o.ignoreChecksum { - return nil - } - want := binary.LittleEndian.Uint32(buf[:4]) got := uint32(d.crc.Sum64()) @@ -330,17 +318,13 @@ func (d *frameDec) checkCRC() error { return nil } -// consumeCRC reads the checksum data if the frame has one. +// consumeCRC skips over the checksum, assuming the frame has one. func (d *frameDec) consumeCRC() error { - if d.HasCheckSum { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) } - - return nil + return err } // runDecoder will run the decoder for the remainder of the frame. @@ -419,15 +403,8 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { if d.o.ignoreChecksum { err = d.consumeCRC() } else { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() - } - } + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() } } } diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 000000000..f41932b7a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 000000000..9a7655c0f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,68 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 000000000..57b9c31c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index f833d1541..9405fcf10 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -236,9 +236,12 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { maxBlockSize = s.windowSize } + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } for i := seqs - 1; i >= 0; i-- { if br.overread() { - printf("reading sequence %d, exceeded available data\n", seqs-i) + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) return io.ErrUnexpectedEOF } var ll, mo, ml int @@ -314,9 +317,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { } size := ll + ml + len(out) if size-startSize > maxBlockSize { - if size-startSize == 424242 { - panic("here") - } return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } if size > cap(out) { @@ -427,8 +427,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { } } - // Check if space for literals - if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 191384adf..8adabd828 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -5,6 +5,7 @@ package zstd import ( "fmt" + "io" "github.com/klauspost/compress/internal/cpuinfo" ) @@ -134,6 +135,9 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ctx.ll, ctx.litRemain+ctx.ll) + case errorOverread: + return true, io.ErrUnexpectedEOF + case errorNotEnoughSpace: size := ctx.outPosition + ctx.ll + ctx.ml if debugDecoder { @@ -148,7 +152,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { s.seqSize += ctx.litRemain if s.seqSize > maxBlockSize { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } err := br.close() if err != nil { @@ -203,6 +206,9 @@ const errorNotEnoughLiterals = 4 // error reported when capacity of `out` is too small const errorNotEnoughSpace = 5 +// error reported when bits are overread. +const errorOverread = 6 + // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. // // Please refer to seqdec_generic.go for the reference implementation. @@ -248,6 +254,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { litRemain: len(s.literals), } + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + s.seqSize = 0 lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 var errCode int @@ -278,6 +288,8 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { case errorNotEnoughLiterals: ll := ctx.seqs[i].ll return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF } return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) @@ -292,6 +304,9 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { if s.seqSize > maxBlockSize { return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } err := br.close() if err != nil { printf("Closing sequences: %v, %+v\n", err, *br) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index b94993a07..b6f4ba6fc 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -38,7 +38,7 @@ sequenceDecs_decode_amd64_main_loop: sequenceDecs_decode_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_end + JLE sequenceDecs_decode_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_amd64_fill_end SHLQ $0x08, DX @@ -49,6 +49,10 @@ sequenceDecs_decode_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_amd64_fill_byte_by_byte +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_amd64_fill_end: // Update offset MOVQ R9, AX @@ -105,7 +109,7 @@ sequenceDecs_decode_amd64_ml_update_zero: sequenceDecs_decode_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_end + JLE sequenceDecs_decode_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_amd64_fill_2_end SHLQ $0x08, DX @@ -116,6 +120,10 @@ sequenceDecs_decode_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -320,6 +328,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 @@ -356,7 +369,7 @@ sequenceDecs_decode_56_amd64_main_loop: sequenceDecs_decode_56_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_end + JLE sequenceDecs_decode_56_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_56_amd64_fill_end SHLQ $0x08, DX @@ -367,6 +380,10 @@ sequenceDecs_decode_56_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_56_amd64_fill_end: // Update offset MOVQ R9, AX @@ -613,6 +630,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 @@ -649,7 +671,7 @@ sequenceDecs_decode_bmi2_main_loop: sequenceDecs_decode_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_end + JLE sequenceDecs_decode_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_bmi2_fill_end SHLQ $0x08, AX @@ -660,6 +682,10 @@ sequenceDecs_decode_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_bmi2_fill_byte_by_byte +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -700,7 +726,7 @@ sequenceDecs_decode_bmi2_fill_end: sequenceDecs_decode_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_end + JLE sequenceDecs_decode_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_bmi2_fill_2_end SHLQ $0x08, AX @@ -711,6 +737,10 @@ sequenceDecs_decode_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -889,6 +919,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 @@ -925,7 +960,7 @@ sequenceDecs_decode_56_bmi2_main_loop: sequenceDecs_decode_56_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_end + JLE sequenceDecs_decode_56_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_56_bmi2_fill_end SHLQ $0x08, AX @@ -936,6 +971,10 @@ sequenceDecs_decode_56_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_56_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -1140,6 +1179,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool // Requires: SSE TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 @@ -1804,7 +1848,7 @@ sequenceDecs_decodeSync_amd64_main_loop: sequenceDecs_decodeSync_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_end + JLE sequenceDecs_decodeSync_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_amd64_fill_end SHLQ $0x08, DX @@ -1815,6 +1859,10 @@ sequenceDecs_decodeSync_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_amd64_fill_end: // Update offset MOVQ R9, AX @@ -1871,7 +1919,7 @@ sequenceDecs_decodeSync_amd64_ml_update_zero: sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_end + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_amd64_fill_2_end SHLQ $0x08, DX @@ -1882,6 +1930,10 @@ sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -2291,6 +2343,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -2356,7 +2413,7 @@ sequenceDecs_decodeSync_bmi2_main_loop: sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_end + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_bmi2_fill_end SHLQ $0x08, AX @@ -2367,6 +2424,10 @@ sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -2407,7 +2468,7 @@ sequenceDecs_decodeSync_bmi2_fill_end: sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_bmi2_fill_2_end SHLQ $0x08, AX @@ -2418,6 +2479,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -2801,6 +2866,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -2866,7 +2936,7 @@ sequenceDecs_decodeSync_safe_amd64_main_loop: sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_safe_amd64_fill_end SHLQ $0x08, DX @@ -2877,6 +2947,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_amd64_fill_end: // Update offset MOVQ R9, AX @@ -2933,7 +3007,7 @@ sequenceDecs_decodeSync_safe_amd64_ml_update_zero: sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end SHLQ $0x08, DX @@ -2944,6 +3018,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -3455,6 +3533,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -3520,7 +3603,7 @@ sequenceDecs_decodeSync_safe_bmi2_main_loop: sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_safe_bmi2_fill_end SHLQ $0x08, AX @@ -3531,6 +3614,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -3571,7 +3658,7 @@ sequenceDecs_decodeSync_safe_bmi2_fill_end: sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end SHLQ $0x08, AX @@ -3582,6 +3669,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -4067,6 +4158,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index b1886f7c7..4be7cc736 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -9,7 +9,6 @@ import ( "errors" "log" "math" - "math/bits" ) // enable debug printing @@ -72,7 +71,6 @@ var ( ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. ErrUnknownDictionary = errors.New("unknown dictionary") // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. @@ -107,33 +105,12 @@ func printf(format string, a ...interface{}) { } } -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} - func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) } type byter interface { diff --git a/vendor/modules.txt b/vendor/modules.txt index b6c2daa40..77ab674b7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,8 +1,8 @@ # github.com/HdrHistogram/hdrhistogram-go v1.1.2 ## explicit; go 1.14 -# github.com/Shopify/sarama v1.38.1 +# github.com/IBM/sarama v1.40.1 ## explicit; go 1.17 -github.com/Shopify/sarama +github.com/IBM/sarama # github.com/aws/aws-sdk-go-v2 v1.23.0 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/aws @@ -872,8 +872,8 @@ github.com/jcmturner/rpc/v2/ndr # github.com/julienschmidt/httprouter v1.3.0 ## explicit; go 1.7 github.com/julienschmidt/httprouter -# github.com/klauspost/compress v1.15.14 -## explicit; go 1.17 +# github.com/klauspost/compress v1.16.6 +## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0