diff --git a/.github/workflows/smoke-tests.yml b/.github/workflows/smoke-tests.yml
index 19680b973b..600f8fcd13 100644
--- a/.github/workflows/smoke-tests.yml
+++ b/.github/workflows/smoke-tests.yml
@@ -37,7 +37,7 @@ jobs:
# Related to issue https://github.com/DataDog/dd-trace-go/issues/1607
name: 'go get -u smoke test'
runs-on: ubuntu-latest
- if: github.repository_owner == 'DataDog' # only run on DataDog's repository, not in forks
+ if: github.repository_owner == 'DataDog' && github.ref_type == 'tag' && contains(github.ref_name, 'v2.0.0-beta.3') # only run on DataDog's repository, not in forks
env:
PACKAGES: ./internal/... ./ddtrace/... ./profiler/... ./appsec/...
steps:
diff --git a/.github/workflows/system-tests.yml b/.github/workflows/system-tests.yml
index c5d95b7ca0..be7b3ae27b 100644
--- a/.github/workflows/system-tests.yml
+++ b/.github/workflows/system-tests.yml
@@ -12,7 +12,7 @@ on:
- main
- release-v*
tags:
- - "**"
+ - "v2.0.0-beta.3"
pull_request:
branches:
- "**"
diff --git a/.github/workflows/unit-integration-tests.yml b/.github/workflows/unit-integration-tests.yml
index cf0996709e..a379877448 100644
--- a/.github/workflows/unit-integration-tests.yml
+++ b/.github/workflows/unit-integration-tests.yml
@@ -180,20 +180,24 @@ jobs:
image: memcached:1.5.9
ports:
- 11211:11211
- zookeeper:
- image: bitnami/zookeeper:latest
- env:
- ALLOW_ANONYMOUS_LOGIN: "yes"
- ports:
- - 2181:2181
kafka:
- image: darccio/kafka:2.13-2.8.1
+ image: confluentinc/confluent-local:7.5.0
env:
- KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092
- KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
- KAFKA_CREATE_TOPICS: gotest:1:1,gosegtest:1:1
- KAFKA_BROKER_ID: 1
+ KAFKA_LISTENERS: "PLAINTEXT://0.0.0.0:9093,BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9094"
+ KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://localhost:9093,BROKER://localhost:9092"
+ KAFKA_REST_BOOTSTRAP_SERVERS: "PLAINTEXT://0.0.0.0:9093,BROKER://0.0.0.0:9092"
+ KAFKA_CONTROLLER_QUORUM_VOTERS: "1@localhost:9094"
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "BROKER:PLAINTEXT,PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT"
+ KAFKA_INTER_BROKER_LISTENER_NAME: "BROKER"
+ KAFKA_BROKER_ID: "1"
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1"
+ KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1"
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: "1"
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: "1"
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: "0"
+ KAFKA_NODE_ID: "1"
+ KAFKA_PROCESS_ROLES: "broker,controller"
+ KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
ports:
- 9092:9092
localstack:
diff --git a/contrib/99designs/gqlgen/appsec_test.go b/contrib/99designs/gqlgen/appsec_test.go
index 65f90b4fff..4d01ca003f 100644
--- a/contrib/99designs/gqlgen/appsec_test.go
+++ b/contrib/99designs/gqlgen/appsec_test.go
@@ -106,7 +106,7 @@ func TestAppSec(t *testing.T) {
// The last finished span (which is GraphQL entry) should have the "_dd.appsec.enabled" tag.
span := spans[len(spans)-1]
- require.Equal(t, 1, span.Tag("_dd.appsec.enabled"))
+ require.Equal(t, float64(1), span.Tag("_dd.appsec.enabled"))
type ddAppsecJSON struct {
Triggers []struct {
diff --git a/contrib/99designs/gqlgen/go.mod b/contrib/99designs/gqlgen/go.mod
index d1b015cf7c..0c65bec089 100644
--- a/contrib/99designs/gqlgen/go.mod
+++ b/contrib/99designs/gqlgen/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/99designs/gqlgen/v2
go 1.22.0
-toolchain go1.23.1
-
require (
github.com/99designs/gqlgen v0.17.36
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
github.com/vektah/gqlparser/v2 v2.5.16
)
diff --git a/contrib/99designs/gqlgen/tracer_test.go b/contrib/99designs/gqlgen/tracer_test.go
index ae3a97927e..700ee816a6 100644
--- a/contrib/99designs/gqlgen/tracer_test.go
+++ b/contrib/99designs/gqlgen/tracer_test.go
@@ -260,7 +260,7 @@ func TestInterceptOperation(t *testing.T) {
assertions.ElementsMatch(resNames, []string{readOp, parsingOp, validationOp, `mutation Name { name }`})
assertions.ElementsMatch(opNames, []string{readOp, parsingOp, validationOp, "graphql.mutation"})
assertions.NotNil(root)
- assertions.NotNil(root.Tag(ext.Error))
+ assertions.NotNil(root.Tag(ext.ErrorMsg))
})
t.Run("intercept operation with graphQL Subscription", func(t *testing.T) {
diff --git a/contrib/IBM/sarama/go.mod b/contrib/IBM/sarama/go.mod
index a8e1b5136b..4ea058d9b9 100644
--- a/contrib/IBM/sarama/go.mod
+++ b/contrib/IBM/sarama/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/IBM/sarama/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/IBM/sarama v1.40.0
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/Shopify/sarama/go.mod b/contrib/Shopify/sarama/go.mod
index fda221c4c9..64e1fa0d93 100644
--- a/contrib/Shopify/sarama/go.mod
+++ b/contrib/Shopify/sarama/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/Shopify/sarama/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/Shopify/sarama v1.38.1
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/aws/aws-sdk-go-v2/aws/aws.go b/contrib/aws/aws-sdk-go-v2/aws/aws.go
index 5a5c4aa98d..59b42d146a 100644
--- a/contrib/aws/aws-sdk-go-v2/aws/aws.go
+++ b/contrib/aws/aws-sdk-go-v2/aws/aws.go
@@ -24,6 +24,9 @@ import (
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
+ eventBridgeTracer "github.com/DataDog/dd-trace-go/v2/contrib/aws/eventbridge"
+ snsTracer "github.com/DataDog/dd-trace-go/v2/contrib/aws/sns"
+ sqsTracer "github.com/DataDog/dd-trace-go/v2/contrib/aws/sqs"
"github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
"github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/instrumentation"
@@ -102,6 +105,16 @@ func (mw *traceMiddleware) startTraceMiddleware(stack *middleware.Stack) error {
}
span, spanctx := tracer.StartSpanFromContext(ctx, spanName(serviceID, operation), opts...)
+ // Inject trace context
+ switch serviceID {
+ case "SQS":
+ sqsTracer.EnrichOperation(span, in, operation)
+ case "SNS":
+ snsTracer.EnrichOperation(span, in, operation)
+ case "EventBridge":
+ eventBridgeTracer.EnrichOperation(span, in, operation)
+ }
+
// Handle initialize and continue through the middleware chain.
out, metadata, err = next.HandleInitialize(spanctx, in)
if err != nil && (mw.cfg.errCheck == nil || mw.cfg.errCheck(err)) {
diff --git a/contrib/aws/aws-sdk-go-v2/aws/aws_test.go b/contrib/aws/aws-sdk-go-v2/aws/aws_test.go
index c3f7c14301..38125c387c 100644
--- a/contrib/aws/aws-sdk-go-v2/aws/aws_test.go
+++ b/contrib/aws/aws-sdk-go-v2/aws/aws_test.go
@@ -8,6 +8,7 @@ package aws
import (
"context"
"encoding/base64"
+ "encoding/json"
"net/http"
"net/http/httptest"
"net/url"
@@ -19,6 +20,7 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/dynamodb"
"github.com/aws/aws-sdk-go-v2/service/eventbridge"
+ eventBridgeTypes "github.com/aws/aws-sdk-go-v2/service/eventbridge/types"
"github.com/aws/aws-sdk-go-v2/service/kinesis"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/sfn"
@@ -254,6 +256,66 @@ func TestAppendMiddlewareSqsReceiveMessage(t *testing.T) {
}
}
+func TestAppendMiddlewareSqsSendMessage(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ expectedStatusCode := 200
+ server := mockAWS(expectedStatusCode)
+ defer server.Close()
+
+ resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+ return aws.Endpoint{
+ PartitionID: "aws",
+ URL: server.URL,
+ SigningRegion: "eu-west-1",
+ }, nil
+ })
+
+ awsCfg := aws.Config{
+ Region: "eu-west-1",
+ Credentials: aws.AnonymousCredentials{},
+ EndpointResolver: resolver,
+ }
+
+ AppendMiddleware(&awsCfg)
+
+ sqsClient := sqs.NewFromConfig(awsCfg)
+ sendMessageInput := &sqs.SendMessageInput{
+ MessageBody: aws.String("test message"),
+ QueueUrl: aws.String("https://sqs.us-west-2.amazonaws.com/123456789012/MyQueueName"),
+ }
+ _, err := sqsClient.SendMessage(context.Background(), sendMessageInput)
+ require.NoError(t, err)
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 1)
+
+ s := spans[0]
+ assert.Equal(t, "SQS.request", s.OperationName())
+ assert.Equal(t, "SendMessage", s.Tag("aws.operation"))
+ assert.Equal(t, "SQS", s.Tag("aws.service"))
+ assert.Equal(t, "MyQueueName", s.Tag("queuename"))
+ assert.Equal(t, "SQS.SendMessage", s.Tag(ext.ResourceName))
+ assert.Equal(t, "aws.SQS", s.Tag(ext.ServiceName))
+
+ // Check for trace context injection
+ assert.NotNil(t, sendMessageInput.MessageAttributes)
+ assert.Contains(t, sendMessageInput.MessageAttributes, "_datadog")
+ ddAttr := sendMessageInput.MessageAttributes["_datadog"]
+ assert.Equal(t, "String", *ddAttr.DataType)
+ assert.NotEmpty(t, *ddAttr.StringValue)
+
+ // Decode and verify the injected trace context
+ var traceContext map[string]string
+ err = json.Unmarshal([]byte(*ddAttr.StringValue), &traceContext)
+ assert.NoError(t, err)
+ assert.Contains(t, traceContext, "x-datadog-trace-id")
+ assert.Contains(t, traceContext, "x-datadog-parent-id")
+ assert.NotEmpty(t, traceContext["x-datadog-trace-id"])
+ assert.NotEmpty(t, traceContext["x-datadog-parent-id"])
+}
+
func TestAppendMiddlewareS3ListObjects(t *testing.T) {
tests := []struct {
name string
@@ -416,6 +478,22 @@ func TestAppendMiddlewareSnsPublish(t *testing.T) {
assert.Equal(t, server.URL+"/", s.Tag(ext.HTTPURL))
assert.Equal(t, "aws/aws-sdk-go-v2/aws", s.Tag(ext.Component))
assert.Equal(t, ext.SpanKindClient, s.Tag(ext.SpanKind))
+
+ // Check for trace context injection
+ assert.NotNil(t, tt.publishInput.MessageAttributes)
+ assert.Contains(t, tt.publishInput.MessageAttributes, "_datadog")
+ ddAttr := tt.publishInput.MessageAttributes["_datadog"]
+ assert.Equal(t, "Binary", *ddAttr.DataType)
+ assert.NotEmpty(t, ddAttr.BinaryValue)
+
+ // Decode and verify the injected trace context
+ var traceContext map[string]string
+ err := json.Unmarshal(ddAttr.BinaryValue, &traceContext)
+ assert.NoError(t, err)
+ assert.Contains(t, traceContext, "x-datadog-trace-id")
+ assert.Contains(t, traceContext, "x-datadog-parent-id")
+ assert.NotEmpty(t, traceContext["x-datadog-trace-id"])
+ assert.NotEmpty(t, traceContext["x-datadog-parent-id"])
})
}
}
@@ -640,6 +718,62 @@ func TestAppendMiddlewareEventBridgePutRule(t *testing.T) {
}
}
+func TestAppendMiddlewareEventBridgePutEvents(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ expectedStatusCode := 200
+ server := mockAWS(expectedStatusCode)
+ defer server.Close()
+
+ resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+ return aws.Endpoint{
+ PartitionID: "aws",
+ URL: server.URL,
+ SigningRegion: "eu-west-1",
+ }, nil
+ })
+
+ awsCfg := aws.Config{
+ Region: "eu-west-1",
+ Credentials: aws.AnonymousCredentials{},
+ EndpointResolver: resolver,
+ }
+
+ AppendMiddleware(&awsCfg)
+
+ eventbridgeClient := eventbridge.NewFromConfig(awsCfg)
+ putEventsInput := &eventbridge.PutEventsInput{
+ Entries: []eventBridgeTypes.PutEventsRequestEntry{
+ {
+ EventBusName: aws.String("my-event-bus"),
+ Detail: aws.String(`{"key": "value"}`),
+ },
+ },
+ }
+ eventbridgeClient.PutEvents(context.Background(), putEventsInput)
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 1)
+
+ s := spans[0]
+ assert.Equal(t, "PutEvents", s.Tag("aws.operation"))
+ assert.Equal(t, "EventBridge.PutEvents", s.Tag(ext.ResourceName))
+
+ // Check for trace context injection
+ assert.Len(t, putEventsInput.Entries, 1)
+ entry := putEventsInput.Entries[0]
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*entry.Detail), &detail)
+ assert.NoError(t, err)
+ assert.Contains(t, detail, "_datadog")
+ ddData, ok := detail["_datadog"].(map[string]interface{})
+ assert.True(t, ok)
+ assert.Contains(t, ddData, "x-datadog-start-time")
+ assert.Contains(t, ddData, "x-datadog-resource-name")
+ assert.Equal(t, "my-event-bus", ddData["x-datadog-resource-name"])
+}
+
func TestAppendMiddlewareSfnDescribeStateMachine(t *testing.T) {
tests := []struct {
name string
diff --git a/contrib/aws/aws-sdk-go-v2/go.mod b/contrib/aws/aws-sdk-go-v2/go.mod
index dd7b6fa8c0..fc80c49daf 100644
--- a/contrib/aws/aws-sdk-go-v2/go.mod
+++ b/contrib/aws/aws-sdk-go-v2/go.mod
@@ -2,21 +2,19 @@ module github.com/DataDog/dd-trace-go/contrib/aws/aws-sdk-go-v2/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
- github.com/aws/aws-sdk-go-v2 v1.30.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
+ github.com/aws/aws-sdk-go-v2 v1.32.2
github.com/aws/aws-sdk-go-v2/config v1.27.27
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3
- github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3
+ github.com/aws/aws-sdk-go-v2/service/eventbridge v1.35.2
github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2
github.com/aws/aws-sdk-go-v2/service/sfn v1.29.3
- github.com/aws/aws-sdk-go-v2/service/sns v1.31.3
- github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3
- github.com/aws/smithy-go v1.20.3
+ github.com/aws/aws-sdk-go-v2/service/sns v1.33.2
+ github.com/aws/aws-sdk-go-v2/service/sqs v1.36.2
+ github.com/aws/smithy-go v1.22.0
github.com/stretchr/testify v1.9.0
)
@@ -32,10 +30,10 @@ require (
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.27 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
- github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16 // indirect
diff --git a/contrib/aws/aws-sdk-go-v2/go.sum b/contrib/aws/aws-sdk-go-v2/go.sum
index 97080d0cff..be0afe61c7 100644
--- a/contrib/aws/aws-sdk-go-v2/go.sum
+++ b/contrib/aws/aws-sdk-go-v2/go.sum
@@ -18,8 +18,8 @@ github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpz
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY=
-github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
+github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI=
+github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM=
github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90=
@@ -30,18 +30,18 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8 h1:u1KOU1S15ufyZqmH/rA3POkiRH6EcDANHj2xHRzq+zc=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8/go.mod h1:WPv2FRnkIOoDv/8j2gSUsI4qDc7392w5anFB/I89GZ8=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 h1:7edmS3VOBDhK00b/MwGtGglCm7hhwNYnjJs/PgFdMQE=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21/go.mod h1:Q9o5h4HoIWG8XfzxqiuK/CGUbepCJ8uTlaE3bAbxytQ=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3 h1:nEhZKd1JQ4EB1tekcqW1oIVpDC1ZFrjrp/cLC5MXjFQ=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3/go.mod h1:q9vzW3Xr1KEXa8n4waHiFt1PrppNDlMymlYP+xpsFbY=
-github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 h1:pjZzcXU25gsD2WmlmlayEsyXIWMVOK3//x4BXvK9c0U=
-github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3/go.mod h1:4ew4HelByABYyBE+8iU8Rzrp5PdBic5yd9nFMhbnwE8=
+github.com/aws/aws-sdk-go-v2/service/eventbridge v1.35.2 h1:FGrUiKglp0u7Zs19serLM/i22+IiwGxLCOJm4OtOMBI=
+github.com/aws/aws-sdk-go-v2/service/eventbridge v1.35.2/go.mod h1:OtWNmq2QGr/BUeJfs7ASAlzg0qjt96Su401dCdOks14=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE=
@@ -58,18 +58,18 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALw
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE=
github.com/aws/aws-sdk-go-v2/service/sfn v1.29.3 h1:7BK+k08c5r1oqqHeb6ye0affEQQJ/fimBTGZSjmpjwk=
github.com/aws/aws-sdk-go-v2/service/sfn v1.29.3/go.mod h1:+mtHHxsylrf+kjxcbvfnu6jtyTT8Fa9BlqjQk5XJZ80=
-github.com/aws/aws-sdk-go-v2/service/sns v1.31.3 h1:eSTEdxkfle2G98FE+Xl3db/XAXXVTJPNQo9K/Ar8oAI=
-github.com/aws/aws-sdk-go-v2/service/sns v1.31.3/go.mod h1:1dn0delSO3J69THuty5iwP0US2Glt0mx2qBBlI13pvw=
-github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 h1:Vjqy5BZCOIsn4Pj8xzyqgGmsSqzz7y/WXbN3RgOoVrc=
-github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3/go.mod h1:L0enV3GCRd5iG9B64W35C4/hwsCB00Ib+DKVGTadKHI=
+github.com/aws/aws-sdk-go-v2/service/sns v1.33.2 h1:GeVRrB1aJsGdXxdPY6VOv0SWs+pfdeDlKgiBxi0+V6I=
+github.com/aws/aws-sdk-go-v2/service/sns v1.33.2/go.mod h1:c6Sj8zleZXYs4nyU3gpDKTzPWu7+t30YUXoLYRpbUvU=
+github.com/aws/aws-sdk-go-v2/service/sqs v1.36.2 h1:kmbcoWgbzfh5a6rvfjOnfHSGEqD13qu1GfTPRZqg0FI=
+github.com/aws/aws-sdk-go-v2/service/sqs v1.36.2/go.mod h1:/UPx74a3M0WYeT2yLQYG/qHhkPlPXd6TsppfGgy2COk=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ=
-github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE=
-github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
+github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
+github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
diff --git a/contrib/aws/aws-sdk-go/go.mod b/contrib/aws/aws-sdk-go/go.mod
index c908d6a15e..bbb3ee627c 100644
--- a/contrib/aws/aws-sdk-go/go.mod
+++ b/contrib/aws/aws-sdk-go/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/aws/aws-sdk-go/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/aws/aws-sdk-go v1.44.327
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/aws/eventbridge/eventbridge.go b/contrib/aws/eventbridge/eventbridge.go
new file mode 100644
index 0000000000..7d1edd82b0
--- /dev/null
+++ b/contrib/aws/eventbridge/eventbridge.go
@@ -0,0 +1,113 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package eventbridge
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+ "github.com/DataDog/dd-trace-go/v2/internal/log"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/eventbridge"
+ "github.com/aws/aws-sdk-go-v2/service/eventbridge/types"
+ "github.com/aws/smithy-go/middleware"
+)
+
+const (
+ datadogKey = "_datadog"
+ startTimeKey = "x-datadog-start-time"
+ resourceNameKey = "x-datadog-resource-name"
+ maxSizeBytes = 256 * 1024 // 256 KB
+)
+
+func EnrichOperation(span *tracer.Span, in middleware.InitializeInput, operation string) {
+ switch operation {
+ case "PutEvents":
+ handlePutEvents(span, in)
+ }
+}
+
+func handlePutEvents(span *tracer.Span, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*eventbridge.PutEventsInput)
+ if !ok {
+ log.Debug("Unable to read PutEvents params")
+ return
+ }
+
+ // Create trace context
+ carrier := tracer.TextMapCarrier{}
+ err := tracer.Inject(span.Context(), carrier)
+ if err != nil {
+ log.Debug("Unable to inject trace context: %s", err)
+ return
+ }
+
+ // Add start time
+ startTimeMillis := time.Now().UnixMilli()
+ carrier[startTimeKey] = strconv.FormatInt(startTimeMillis, 10)
+
+ carrierJSON, err := json.Marshal(carrier)
+ if err != nil {
+ log.Debug("Unable to marshal trace context: %s", err)
+ return
+ }
+
+ // Remove last '}'
+ reusedTraceContext := string(carrierJSON[:len(carrierJSON)-1])
+
+ for i := range params.Entries {
+ injectTraceContext(reusedTraceContext, ¶ms.Entries[i])
+ }
+}
+
+func injectTraceContext(baseTraceContext string, entryPtr *types.PutEventsRequestEntry) {
+ if entryPtr == nil {
+ return
+ }
+
+ // Build the complete trace context
+ var traceContext string
+ if entryPtr.EventBusName != nil {
+ traceContext = fmt.Sprintf(`%s,"%s":"%s"}`, baseTraceContext, resourceNameKey, *entryPtr.EventBusName)
+ } else {
+ traceContext = baseTraceContext + "}"
+ }
+
+ // Get current detail string
+ var detail string
+ if entryPtr.Detail == nil || *entryPtr.Detail == "" {
+ detail = "{}"
+ } else {
+ detail = *entryPtr.Detail
+ }
+
+ // Basic JSON structure validation
+ if len(detail) < 2 || detail[len(detail)-1] != '}' {
+ log.Debug("Unable to parse detail JSON. Not injecting trace context into EventBridge payload.")
+ return
+ }
+
+ // Create new detail string
+ var newDetail string
+ if len(detail) > 2 {
+ // Case where detail is not empty
+ newDetail = fmt.Sprintf(`%s,"%s":%s}`, detail[:len(detail)-1], datadogKey, traceContext)
+ } else {
+ // Cae where detail is empty
+ newDetail = fmt.Sprintf(`{"%s":%s}`, datadogKey, traceContext)
+ }
+
+ // Check sizes
+ if len(newDetail) > maxSizeBytes {
+ log.Debug("Payload size too large to pass context")
+ return
+ }
+
+ entryPtr.Detail = aws.String(newDetail)
+}
diff --git a/contrib/aws/eventbridge/eventbridge_test.go b/contrib/aws/eventbridge/eventbridge_test.go
new file mode 100644
index 0000000000..3e12b2381b
--- /dev/null
+++ b/contrib/aws/eventbridge/eventbridge_test.go
@@ -0,0 +1,193 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package eventbridge
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/eventbridge"
+ "github.com/aws/aws-sdk-go-v2/service/eventbridge/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEnrichOperation(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ span := tracer.StartSpan("test-span")
+
+ input := middleware.InitializeInput{
+ Parameters: &eventbridge.PutEventsInput{
+ Entries: []types.PutEventsRequestEntry{
+ {
+ Detail: aws.String(`{"@123": "value", "_foo": "bar"}`),
+ EventBusName: aws.String("test-bus"),
+ },
+ {
+ Detail: aws.String(`{"@123": "data", "_foo": "bar"}`),
+ EventBusName: aws.String("test-bus-2"),
+ },
+ },
+ },
+ }
+
+ EnrichOperation(span, input, "PutEvents")
+
+ params, ok := input.Parameters.(*eventbridge.PutEventsInput)
+ require.True(t, ok)
+ require.Len(t, params.Entries, 2)
+
+ for _, entry := range params.Entries {
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*entry.Detail), &detail)
+ require.NoError(t, err)
+
+ assert.Contains(t, detail, "@123") // make sure user data still exists
+ assert.Contains(t, detail, "_foo")
+ assert.Contains(t, detail, datadogKey)
+ ddData, ok := detail[datadogKey].(map[string]interface{})
+ require.True(t, ok)
+
+ assert.Contains(t, ddData, startTimeKey)
+ assert.Contains(t, ddData, resourceNameKey)
+ assert.Equal(t, *entry.EventBusName, ddData[resourceNameKey])
+ }
+}
+
+func TestInjectTraceContext(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ ctx := context.Background()
+ span, _ := tracer.StartSpanFromContext(ctx, "test-span")
+ baseTraceContext := fmt.Sprintf(`{"x-datadog-trace-id":"%s","x-datadog-parent-id":"%d","x-datadog-start-time":"123456789"`, span.Context().TraceID(), span.Context().SpanID())
+
+ tests := []struct {
+ name string
+ entry types.PutEventsRequestEntry
+ expected func(*testing.T, *types.PutEventsRequestEntry)
+ }{
+ {
+ name: "Inject into empty detail",
+ entry: types.PutEventsRequestEntry{
+ EventBusName: aws.String("test-bus"),
+ },
+ expected: func(t *testing.T, entry *types.PutEventsRequestEntry) {
+ assert.NotNil(t, entry.Detail)
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*entry.Detail), &detail)
+ require.NoError(t, err)
+ assert.Contains(t, detail, datadogKey)
+ },
+ },
+ {
+ name: "Inject into existing detail",
+ entry: types.PutEventsRequestEntry{
+ Detail: aws.String(`{"existing": "data"}`),
+ EventBusName: aws.String("test-bus"),
+ },
+ expected: func(t *testing.T, entry *types.PutEventsRequestEntry) {
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*entry.Detail), &detail)
+ require.NoError(t, err)
+ assert.Contains(t, detail, "existing")
+ assert.Equal(t, "data", detail["existing"])
+ assert.Contains(t, detail, datadogKey)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ injectTraceContext(baseTraceContext, &tt.entry)
+ tt.expected(t, &tt.entry)
+
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*tt.entry.Detail), &detail)
+ require.NoError(t, err)
+
+ ddData := detail[datadogKey].(map[string]interface{})
+ assert.Contains(t, ddData, startTimeKey)
+ assert.Contains(t, ddData, resourceNameKey)
+ assert.Equal(t, *tt.entry.EventBusName, ddData[resourceNameKey])
+
+ // Check that start time exists and is not empty
+ startTime, ok := ddData[startTimeKey]
+ assert.True(t, ok)
+ assert.Equal(t, startTime, "123456789")
+
+ carrier := tracer.TextMapCarrier{}
+ for k, v := range ddData {
+ if s, ok := v.(string); ok {
+ carrier[k] = s
+ }
+ }
+
+ extractedSpanContext, err := tracer.Extract(&carrier)
+ assert.NoError(t, err)
+ assert.Equal(t, span.Context().TraceID(), extractedSpanContext.TraceID())
+ assert.Equal(t, span.Context().SpanID(), extractedSpanContext.SpanID())
+ })
+ }
+}
+
+func TestInjectTraceContextSizeLimit(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ baseTraceContext := `{"x-datadog-trace-id":"12345","x-datadog-parent-id":"67890","x-datadog-start-time":"123456789"`
+
+ tests := []struct {
+ name string
+ entry types.PutEventsRequestEntry
+ expected func(*testing.T, *types.PutEventsRequestEntry)
+ }{
+ {
+ name: "Do not inject when payload is too large",
+ entry: types.PutEventsRequestEntry{
+ Detail: aws.String(`{"large": "` + strings.Repeat("a", maxSizeBytes-50) + `"}`),
+ EventBusName: aws.String("test-bus"),
+ },
+ expected: func(t *testing.T, entry *types.PutEventsRequestEntry) {
+ assert.GreaterOrEqual(t, len(*entry.Detail), maxSizeBytes-50)
+ assert.NotContains(t, *entry.Detail, datadogKey)
+ assert.True(t, strings.HasPrefix(*entry.Detail, `{"large": "`))
+ assert.True(t, strings.HasSuffix(*entry.Detail, `"}`))
+ },
+ },
+ {
+ name: "Inject when payload is just under the limit",
+ entry: types.PutEventsRequestEntry{
+ Detail: aws.String(`{"large": "` + strings.Repeat("a", maxSizeBytes-1000) + `"}`),
+ EventBusName: aws.String("test-bus"),
+ },
+ expected: func(t *testing.T, entry *types.PutEventsRequestEntry) {
+ assert.Less(t, len(*entry.Detail), maxSizeBytes)
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*entry.Detail), &detail)
+ require.NoError(t, err)
+ assert.Contains(t, detail, datadogKey)
+ assert.Contains(t, detail, "large")
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ injectTraceContext(baseTraceContext, &tt.entry)
+ tt.expected(t, &tt.entry)
+ })
+ }
+}
diff --git a/contrib/aws/sns/sns.go b/contrib/aws/sns/sns.go
new file mode 100644
index 0000000000..42b238176e
--- /dev/null
+++ b/contrib/aws/sns/sns.go
@@ -0,0 +1,106 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package sns
+
+import (
+ "encoding/json"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+ "github.com/DataDog/dd-trace-go/v2/internal/log"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/sns"
+ "github.com/aws/aws-sdk-go-v2/service/sns/types"
+ "github.com/aws/smithy-go/middleware"
+)
+
+const (
+ datadogKey = "_datadog"
+ maxMessageAttributes = 10
+)
+
+func EnrichOperation(span *tracer.Span, in middleware.InitializeInput, operation string) {
+ switch operation {
+ case "Publish":
+ handlePublish(span, in)
+ case "PublishBatch":
+ handlePublishBatch(span, in)
+ }
+}
+
+func handlePublish(span *tracer.Span, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sns.PublishInput)
+ if !ok {
+ log.Debug("Unable to read PublishInput params")
+ return
+ }
+
+ traceContext, err := getTraceContext(span)
+ if err != nil {
+ log.Debug("Unable to get trace context: %s", err.Error())
+ return
+ }
+
+ if params.MessageAttributes == nil {
+ params.MessageAttributes = make(map[string]types.MessageAttributeValue)
+ }
+
+ injectTraceContext(traceContext, params.MessageAttributes)
+}
+
+func handlePublishBatch(span *tracer.Span, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sns.PublishBatchInput)
+ if !ok {
+ log.Debug("Unable to read PublishBatch params")
+ return
+ }
+
+ traceContext, err := getTraceContext(span)
+ if err != nil {
+ log.Debug("Unable to get trace context: %s", err.Error())
+ return
+ }
+
+ for i := range params.PublishBatchRequestEntries {
+ if params.PublishBatchRequestEntries[i].MessageAttributes == nil {
+ params.PublishBatchRequestEntries[i].MessageAttributes = make(map[string]types.MessageAttributeValue)
+ }
+ injectTraceContext(traceContext, params.PublishBatchRequestEntries[i].MessageAttributes)
+ }
+}
+
+func getTraceContext(span *tracer.Span) (types.MessageAttributeValue, error) {
+ carrier := tracer.TextMapCarrier{}
+ err := tracer.Inject(span.Context(), carrier)
+ if err != nil {
+ return types.MessageAttributeValue{}, err
+ }
+
+ jsonBytes, err := json.Marshal(carrier)
+ if err != nil {
+ return types.MessageAttributeValue{}, err
+ }
+
+ // Use Binary since SNS subscription filter policies fail silently with JSON
+ // strings. https://github.com/DataDog/datadog-lambda-js/pull/269
+ attribute := types.MessageAttributeValue{
+ DataType: aws.String("Binary"),
+ BinaryValue: jsonBytes,
+ }
+
+ return attribute, nil
+}
+
+func injectTraceContext(traceContext types.MessageAttributeValue, messageAttributes map[string]types.MessageAttributeValue) {
+ // SNS only allows a maximum of 10 message attributes.
+ // https://docs.aws.amazon.com/sns/latest/dg/sns-message-attributes.html
+ // Only inject if there's room.
+ if len(messageAttributes) >= maxMessageAttributes {
+ log.Info("Cannot inject trace context: message already has maximum allowed attributes")
+ return
+ }
+
+ messageAttributes[datadogKey] = traceContext
+}
diff --git a/contrib/aws/sns/sns_test.go b/contrib/aws/sns/sns_test.go
new file mode 100644
index 0000000000..4d3a020813
--- /dev/null
+++ b/contrib/aws/sns/sns_test.go
@@ -0,0 +1,177 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package sns
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/sns"
+ "github.com/aws/aws-sdk-go-v2/service/sns/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEnrichOperation(t *testing.T) {
+ tests := []struct {
+ name string
+ operation string
+ input middleware.InitializeInput
+ setup func(context.Context) *tracer.Span
+ check func(*testing.T, middleware.InitializeInput)
+ }{
+ {
+ name: "Publish",
+ operation: "Publish",
+ input: middleware.InitializeInput{
+ Parameters: &sns.PublishInput{
+ Message: aws.String("test message"),
+ TopicArn: aws.String("arn:aws:sns:us-east-1:123456789012:test-topic"),
+ },
+ },
+ setup: func(ctx context.Context) *tracer.Span {
+ span, _ := tracer.StartSpanFromContext(ctx, "test-span")
+ return span
+ },
+ check: func(t *testing.T, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sns.PublishInput)
+ require.True(t, ok)
+ require.NotNil(t, params)
+ require.NotNil(t, params.MessageAttributes)
+ assert.Contains(t, params.MessageAttributes, datadogKey)
+ assert.NotNil(t, params.MessageAttributes[datadogKey].DataType)
+ assert.Equal(t, "Binary", *params.MessageAttributes[datadogKey].DataType)
+ assert.NotNil(t, params.MessageAttributes[datadogKey].BinaryValue)
+ assert.NotEmpty(t, params.MessageAttributes[datadogKey].BinaryValue)
+ },
+ },
+ {
+ name: "PublishBatch",
+ operation: "PublishBatch",
+ input: middleware.InitializeInput{
+ Parameters: &sns.PublishBatchInput{
+ TopicArn: aws.String("arn:aws:sns:us-east-1:123456789012:test-topic"),
+ PublishBatchRequestEntries: []types.PublishBatchRequestEntry{
+ {
+ Id: aws.String("1"),
+ Message: aws.String("test message 1"),
+ },
+ {
+ Id: aws.String("2"),
+ Message: aws.String("test message 2"),
+ },
+ },
+ },
+ },
+ setup: func(ctx context.Context) *tracer.Span {
+ span, _ := tracer.StartSpanFromContext(ctx, "test-span")
+ return span
+ },
+ check: func(t *testing.T, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sns.PublishBatchInput)
+ require.True(t, ok)
+ require.NotNil(t, params)
+ require.NotNil(t, params.PublishBatchRequestEntries)
+ require.Len(t, params.PublishBatchRequestEntries, 2)
+
+ for _, entry := range params.PublishBatchRequestEntries {
+ require.NotNil(t, entry.MessageAttributes)
+ assert.Contains(t, entry.MessageAttributes, datadogKey)
+ assert.NotNil(t, entry.MessageAttributes[datadogKey].DataType)
+ assert.Equal(t, "Binary", *entry.MessageAttributes[datadogKey].DataType)
+ assert.NotNil(t, entry.MessageAttributes[datadogKey].BinaryValue)
+ assert.NotEmpty(t, entry.MessageAttributes[datadogKey].BinaryValue)
+ }
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ ctx := context.Background()
+ span := tt.setup(ctx)
+
+ EnrichOperation(span, tt.input, tt.operation)
+
+ if tt.check != nil {
+ tt.check(t, tt.input)
+ }
+ })
+ }
+}
+
+func TestInjectTraceContext(t *testing.T) {
+ tests := []struct {
+ name string
+ existingAttributes int
+ expectInjection bool
+ }{
+ {
+ name: "Inject with no existing attributes",
+ existingAttributes: 0,
+ expectInjection: true,
+ },
+ {
+ name: "Inject with some existing attributes",
+ existingAttributes: 5,
+ expectInjection: true,
+ },
+ {
+ name: "No injection when at max attributes",
+ existingAttributes: maxMessageAttributes,
+ expectInjection: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ span := tracer.StartSpan("test-span")
+
+ messageAttributes := make(map[string]types.MessageAttributeValue)
+ for i := 0; i < tt.existingAttributes; i++ {
+ messageAttributes[fmt.Sprintf("attr%d", i)] = types.MessageAttributeValue{
+ DataType: aws.String("String"),
+ StringValue: aws.String("value"),
+ }
+ }
+
+ traceContext, err := getTraceContext(span)
+ assert.NoError(t, err)
+ injectTraceContext(traceContext, messageAttributes)
+
+ if tt.expectInjection {
+ assert.Contains(t, messageAttributes, datadogKey)
+ assert.NotNil(t, messageAttributes[datadogKey].DataType)
+ assert.Equal(t, "Binary", *messageAttributes[datadogKey].DataType)
+ assert.NotNil(t, messageAttributes[datadogKey].BinaryValue)
+ assert.NotEmpty(t, messageAttributes[datadogKey].BinaryValue)
+
+ carrier := tracer.TextMapCarrier{}
+ err := json.Unmarshal(messageAttributes[datadogKey].BinaryValue, &carrier)
+ assert.NoError(t, err)
+
+ extractedSpanContext, err := tracer.Extract(carrier)
+ assert.NoError(t, err)
+ assert.Equal(t, span.Context().TraceID(), extractedSpanContext.TraceID())
+ assert.Equal(t, span.Context().SpanID(), extractedSpanContext.SpanID())
+ } else {
+ assert.NotContains(t, messageAttributes, datadogKey)
+ }
+ })
+ }
+}
diff --git a/contrib/aws/sqs/sqs.go b/contrib/aws/sqs/sqs.go
new file mode 100644
index 0000000000..6d14fe688c
--- /dev/null
+++ b/contrib/aws/sqs/sqs.go
@@ -0,0 +1,104 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package sqs
+
+import (
+ "encoding/json"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+ "github.com/DataDog/dd-trace-go/v2/internal/log"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/sqs"
+ "github.com/aws/aws-sdk-go-v2/service/sqs/types"
+ "github.com/aws/smithy-go/middleware"
+)
+
+const (
+ datadogKey = "_datadog"
+ maxMessageAttributes = 10
+)
+
+func EnrichOperation(span *tracer.Span, in middleware.InitializeInput, operation string) {
+ switch operation {
+ case "SendMessage":
+ handleSendMessage(span, in)
+ case "SendMessageBatch":
+ handleSendMessageBatch(span, in)
+ }
+}
+
+func handleSendMessage(span *tracer.Span, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sqs.SendMessageInput)
+ if !ok {
+ log.Debug("Unable to read SendMessage params")
+ return
+ }
+
+ traceContext, err := getTraceContext(span)
+ if err != nil {
+ log.Debug("Unable to get trace context: %s", err.Error())
+ return
+ }
+
+ if params.MessageAttributes == nil {
+ params.MessageAttributes = make(map[string]types.MessageAttributeValue)
+ }
+
+ injectTraceContext(traceContext, params.MessageAttributes)
+}
+
+func handleSendMessageBatch(span *tracer.Span, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sqs.SendMessageBatchInput)
+ if !ok {
+ log.Debug("Unable to read SendMessageBatch params")
+ return
+ }
+
+ traceContext, err := getTraceContext(span)
+ if err != nil {
+ log.Debug("Unable to get trace context: %s", err.Error())
+ return
+ }
+
+ for i := range params.Entries {
+ if params.Entries[i].MessageAttributes == nil {
+ params.Entries[i].MessageAttributes = make(map[string]types.MessageAttributeValue)
+ }
+ injectTraceContext(traceContext, params.Entries[i].MessageAttributes)
+ }
+}
+
+func getTraceContext(span *tracer.Span) (types.MessageAttributeValue, error) {
+ carrier := tracer.TextMapCarrier{}
+ err := tracer.Inject(span.Context(), carrier)
+ if err != nil {
+ return types.MessageAttributeValue{}, err
+ }
+
+ jsonBytes, err := json.Marshal(carrier)
+ if err != nil {
+ return types.MessageAttributeValue{}, err
+ }
+
+ attribute := types.MessageAttributeValue{
+ DataType: aws.String("String"),
+ StringValue: aws.String(string(jsonBytes)),
+ }
+
+ return attribute, nil
+}
+
+func injectTraceContext(traceContext types.MessageAttributeValue, messageAttributes map[string]types.MessageAttributeValue) {
+ // SQS only allows a maximum of 10 message attributes.
+ // https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes
+ // Only inject if there's room.
+ if len(messageAttributes) >= maxMessageAttributes {
+ log.Info("Cannot inject trace context: message already has maximum allowed attributes")
+ return
+ }
+
+ messageAttributes[datadogKey] = traceContext
+}
diff --git a/contrib/aws/sqs/sqs_test.go b/contrib/aws/sqs/sqs_test.go
new file mode 100644
index 0000000000..03472c3f3e
--- /dev/null
+++ b/contrib/aws/sqs/sqs_test.go
@@ -0,0 +1,182 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package sqs
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go-v2/service/sqs/types"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/sqs"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEnrichOperation(t *testing.T) {
+ tests := []struct {
+ name string
+ operation string
+ input middleware.InitializeInput
+ setup func(context.Context) *tracer.Span
+ check func(*testing.T, middleware.InitializeInput)
+ }{
+ {
+ name: "SendMessage",
+ operation: "SendMessage",
+ input: middleware.InitializeInput{
+ Parameters: &sqs.SendMessageInput{
+ MessageBody: aws.String("test message"),
+ QueueUrl: aws.String("https://sqs.us-east-1.amazonaws.com/1234567890/test-queue"),
+ },
+ },
+ setup: func(ctx context.Context) *tracer.Span {
+ span, _ := tracer.StartSpanFromContext(ctx, "test-span")
+ return span
+ },
+ check: func(t *testing.T, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sqs.SendMessageInput)
+ require.True(t, ok)
+ require.NotNil(t, params)
+ require.NotNil(t, params.MessageAttributes)
+ assert.Contains(t, params.MessageAttributes, datadogKey)
+ assert.NotNil(t, params.MessageAttributes[datadogKey].DataType)
+ assert.Equal(t, "String", *params.MessageAttributes[datadogKey].DataType)
+ assert.NotNil(t, params.MessageAttributes[datadogKey].StringValue)
+ assert.NotEmpty(t, *params.MessageAttributes[datadogKey].StringValue)
+ },
+ },
+ {
+ name: "SendMessageBatch",
+ operation: "SendMessageBatch",
+ input: middleware.InitializeInput{
+ Parameters: &sqs.SendMessageBatchInput{
+ QueueUrl: aws.String("https://sqs.us-east-1.amazonaws.com/1234567890/test-queue"),
+ Entries: []types.SendMessageBatchRequestEntry{
+ {
+ Id: aws.String("1"),
+ MessageBody: aws.String("test message 1"),
+ },
+ {
+ Id: aws.String("2"),
+ MessageBody: aws.String("test message 2"),
+ },
+ {
+ Id: aws.String("3"),
+ MessageBody: aws.String("test message 3"),
+ },
+ },
+ },
+ },
+ setup: func(ctx context.Context) *tracer.Span {
+ span, _ := tracer.StartSpanFromContext(ctx, "test-span")
+ return span
+ },
+ check: func(t *testing.T, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sqs.SendMessageBatchInput)
+ require.True(t, ok)
+ require.NotNil(t, params)
+ require.NotNil(t, params.Entries)
+ require.Len(t, params.Entries, 3)
+
+ for _, entry := range params.Entries {
+ require.NotNil(t, entry.MessageAttributes)
+ assert.Contains(t, entry.MessageAttributes, datadogKey)
+ assert.NotNil(t, entry.MessageAttributes[datadogKey].DataType)
+ assert.Equal(t, "String", *entry.MessageAttributes[datadogKey].DataType)
+ assert.NotNil(t, entry.MessageAttributes[datadogKey].StringValue)
+ assert.NotEmpty(t, *entry.MessageAttributes[datadogKey].StringValue)
+ }
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ ctx := context.Background()
+ span := tt.setup(ctx)
+
+ EnrichOperation(span, tt.input, tt.operation)
+
+ if tt.check != nil {
+ tt.check(t, tt.input)
+ }
+ })
+ }
+}
+
+func TestInjectTraceContext(t *testing.T) {
+ tests := []struct {
+ name string
+ existingAttributes int
+ expectInjection bool
+ }{
+ {
+ name: "Inject with no existing attributes",
+ existingAttributes: 0,
+ expectInjection: true,
+ },
+ {
+ name: "Inject with some existing attributes",
+ existingAttributes: 5,
+ expectInjection: true,
+ },
+ {
+ name: "No injection when at max attributes",
+ existingAttributes: maxMessageAttributes,
+ expectInjection: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ span := tracer.StartSpan("test-span")
+
+ messageAttributes := make(map[string]types.MessageAttributeValue)
+ for i := 0; i < tt.existingAttributes; i++ {
+ messageAttributes[fmt.Sprintf("attr%d", i)] = types.MessageAttributeValue{
+ DataType: aws.String("String"),
+ StringValue: aws.String("value"),
+ }
+ }
+
+ traceContext, err := getTraceContext(span)
+ assert.NoError(t, err)
+ injectTraceContext(traceContext, messageAttributes)
+
+ if tt.expectInjection {
+ assert.Contains(t, messageAttributes, datadogKey)
+ assert.NotNil(t, messageAttributes[datadogKey].DataType)
+ assert.Equal(t, "String", *messageAttributes[datadogKey].DataType)
+ assert.NotNil(t, messageAttributes[datadogKey].StringValue)
+ assert.NotEmpty(t, *messageAttributes[datadogKey].StringValue)
+
+ carrier := tracer.TextMapCarrier{}
+ err := json.Unmarshal([]byte(*messageAttributes[datadogKey].StringValue), &carrier)
+ assert.NoError(t, err)
+
+ extractedSpanContext, err := tracer.Extract(carrier)
+ assert.NoError(t, err)
+ assert.Equal(t, span.Context().TraceID(), extractedSpanContext.TraceID())
+ assert.Equal(t, span.Context().SpanID(), extractedSpanContext.SpanID())
+ } else {
+ assert.NotContains(t, messageAttributes, datadogKey)
+ }
+ })
+ }
+}
diff --git a/contrib/bradfitz/gomemcache/go.mod b/contrib/bradfitz/gomemcache/go.mod
index ac9870982a..d5b801579e 100644
--- a/contrib/bradfitz/gomemcache/go.mod
+++ b/contrib/bradfitz/gomemcache/go.mod
@@ -2,8 +2,6 @@ module github.com/DataDog/dd-trace-go/contrib/bradfitz/gomemcache/v2
go 1.22.0
-toolchain go1.23.1
-
require (
github.com/bradfitz/gomemcache v0.0.0-20230611145640-acc696258285
github.com/stretchr/testify v1.9.0
@@ -27,7 +25,7 @@ require (
github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 // indirect
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.5 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
diff --git a/contrib/cloud.google.com/go/pubsub.v1/go.mod b/contrib/cloud.google.com/go/pubsub.v1/go.mod
index 7ded960429..3904f3ef89 100644
--- a/contrib/cloud.google.com/go/pubsub.v1/go.mod
+++ b/contrib/cloud.google.com/go/pubsub.v1/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/cloud.google.com/go/pubsub.v1/v2
go 1.22.0
-toolchain go1.23.1
-
require (
cloud.google.com/go/pubsub v1.36.1
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
google.golang.org/api v0.169.0
google.golang.org/grpc v1.64.0
diff --git a/contrib/cloud.google.com/go/pubsub.v1/internal/tracing/tracing.go b/contrib/cloud.google.com/go/pubsub.v1/internal/tracing/tracing.go
index 38d9be9795..ce82caa401 100644
--- a/contrib/cloud.google.com/go/pubsub.v1/internal/tracing/tracing.go
+++ b/contrib/cloud.google.com/go/pubsub.v1/internal/tracing/tracing.go
@@ -3,6 +3,13 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2024 Datadog, Inc.
+// Package tracing contains tracing logic for the cloud.google.com/go/pubsub.v1 instrumentation.
+//
+// WARNING: this package SHOULD NOT import cloud.google.com/go/pubsub.
+//
+// The motivation of this package is to support orchestrion, which cannot use the main package because it imports
+// the cloud.google.com/go/pubsub package, and since orchestrion modifies the library code itself,
+// this would cause an import cycle.
package tracing
import (
diff --git a/contrib/confluentinc/confluent-kafka-go/consumer.go b/contrib/confluentinc/confluent-kafka-go/consumer.go
new file mode 100644
index 0000000000..2f250e8d25
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/consumer.go
@@ -0,0 +1,85 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import (
+ "math"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+)
+
+func WrapConsumeEventsChannel[E any, TE Event](tr *KafkaTracer, in chan E, consumer Consumer, translateFn func(E) TE) chan E {
+ // in will be nil when consuming via the events channel is not enabled
+ if in == nil {
+ return nil
+ }
+
+ out := make(chan E, 1)
+ go func() {
+ defer close(out)
+ for evt := range in {
+ tEvt := translateFn(evt)
+ var next *tracer.Span
+
+ // only trace messages
+ if msg, ok := tEvt.KafkaMessage(); ok {
+ next = tr.StartConsumeSpan(msg)
+ tr.SetConsumeCheckpoint(msg)
+ } else if offset, ok := tEvt.KafkaOffsetsCommitted(); ok {
+ tr.TrackCommitOffsets(offset.GetOffsets(), offset.GetError())
+ tr.TrackHighWatermarkOffset(offset.GetOffsets(), consumer)
+ }
+
+ out <- evt
+
+ if tr.PrevSpan != nil {
+ tr.PrevSpan.Finish()
+ }
+ tr.PrevSpan = next
+ }
+ // finish any remaining span
+ if tr.PrevSpan != nil {
+ tr.PrevSpan.Finish()
+ tr.PrevSpan = nil
+ }
+ }()
+ return out
+}
+
+func (tr *KafkaTracer) StartConsumeSpan(msg Message) *tracer.Span {
+ opts := []tracer.StartSpanOption{
+ tracer.ServiceName(tr.consumerServiceName),
+ tracer.ResourceName("Consume Topic " + msg.GetTopicPartition().GetTopic()),
+ tracer.SpanType(ext.SpanTypeMessageConsumer),
+ tracer.Tag(ext.MessagingKafkaPartition, msg.GetTopicPartition().GetPartition()),
+ tracer.Tag("offset", msg.GetTopicPartition().GetOffset()),
+ tracer.Tag(ext.Component, ComponentName(tr.ckgoVersion)),
+ tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
+ tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
+ tracer.Measured(),
+ }
+ if tr.bootstrapServers != "" {
+ opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, tr.bootstrapServers))
+ }
+ if tr.tagFns != nil {
+ for key, tagFn := range tr.tagFns {
+ opts = append(opts, tracer.Tag(key, tagFn(msg)))
+ }
+ }
+ if !math.IsNaN(tr.analyticsRate) {
+ opts = append(opts, tracer.Tag(ext.EventSampleRate, tr.analyticsRate))
+ }
+ // kafka supports headers, so try to extract a span context
+ carrier := MessageCarrier{msg: msg}
+ if spanctx, err := tracer.Extract(carrier); err == nil {
+ opts = append(opts, tracer.ChildOf(spanctx))
+ }
+ span, _ := tracer.StartSpanFromContext(tr.ctx, tr.consumerSpanName, opts...)
+ // reinject the span context so consumers can pick it up
+ tracer.Inject(span.Context(), carrier)
+ return span
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/dsm.go b/contrib/confluentinc/confluent-kafka-go/dsm.go
new file mode 100644
index 0000000000..755dfc7b14
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/dsm.go
@@ -0,0 +1,88 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import (
+ "context"
+
+ "github.com/DataDog/dd-trace-go/v2/datastreams"
+ "github.com/DataDog/dd-trace-go/v2/datastreams/options"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+)
+
+func (tr *KafkaTracer) TrackCommitOffsets(offsets []TopicPartition, err error) {
+ if err != nil || tr.groupID == "" || !tr.dsmEnabled {
+ return
+ }
+ for _, tp := range offsets {
+ tracer.TrackKafkaCommitOffset(tr.groupID, tp.GetTopic(), tp.GetPartition(), tp.GetOffset())
+ }
+}
+
+func (tr *KafkaTracer) TrackHighWatermarkOffset(offsets []TopicPartition, consumer Consumer) {
+ if !tr.dsmEnabled {
+ return
+ }
+ for _, tp := range offsets {
+ if _, high, err := consumer.GetWatermarkOffsets(tp.GetTopic(), tp.GetPartition()); err == nil {
+ tracer.TrackKafkaHighWatermarkOffset("", tp.GetTopic(), tp.GetPartition(), high)
+ }
+ }
+}
+
+func (tr *KafkaTracer) TrackProduceOffsets(msg Message) {
+ err := msg.GetTopicPartition().GetError()
+ if err != nil || !tr.dsmEnabled || msg.GetTopicPartition().GetTopic() == "" {
+ return
+ }
+ tp := msg.GetTopicPartition()
+ tracer.TrackKafkaProduceOffset(tp.GetTopic(), tp.GetPartition(), tp.GetOffset())
+}
+
+func (tr *KafkaTracer) SetConsumeCheckpoint(msg Message) {
+ if !tr.dsmEnabled || msg == nil {
+ return
+ }
+ edges := []string{"direction:in", "topic:" + msg.GetTopicPartition().GetTopic(), "type:kafka"}
+ if tr.groupID != "" {
+ edges = append(edges, "group:"+tr.groupID)
+ }
+ carrier := NewMessageCarrier(msg)
+ ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
+ datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
+ options.CheckpointParams{PayloadSize: getMsgSize(msg)},
+ edges...,
+ )
+ if !ok {
+ return
+ }
+ datastreams.InjectToBase64Carrier(ctx, carrier)
+}
+
+func (tr *KafkaTracer) SetProduceCheckpoint(msg Message) {
+ if !tr.dsmEnabled || msg == nil {
+ return
+ }
+ edges := []string{"direction:out", "topic:" + msg.GetTopicPartition().GetTopic(), "type:kafka"}
+ carrier := NewMessageCarrier(msg)
+ ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
+ datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
+ options.CheckpointParams{PayloadSize: getMsgSize(msg)},
+ edges...,
+ )
+ if !ok || tr.librdKafkaVersion < 0x000b0400 {
+ // headers not supported before librdkafka >=0.11.4
+ return
+ }
+ datastreams.InjectToBase64Carrier(ctx, carrier)
+}
+
+func getMsgSize(msg Message) (size int64) {
+ for _, header := range msg.GetHeaders() {
+ size += int64(len(header.GetKey()) + len(header.GetValue()))
+ }
+ return size + int64(len(msg.GetValue())+len(msg.GetKey()))
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/go.mod b/contrib/confluentinc/confluent-kafka-go/kafka.v2/go.mod
index 96f8d54049..de49d68324 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka.v2/go.mod
+++ b/contrib/confluentinc/confluent-kafka-go/kafka.v2/go.mod
@@ -2,12 +2,11 @@ module github.com/DataDog/dd-trace-go/contrib/confluentinc/confluent-kafka-go/ka
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/confluentinc/confluent-kafka-go/v2 v2.2.0
github.com/stretchr/testify v1.9.0
+ go.uber.org/goleak v1.3.0
)
require (
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/headers.go b/contrib/confluentinc/confluent-kafka-go/kafka.v2/headers.go
index 990c1914d7..49e5425bb2 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka.v2/headers.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka.v2/headers.go
@@ -6,48 +6,15 @@
package kafka
import (
- "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
-
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
-)
-
-// A MessageCarrier injects and extracts traces from a sarama.ProducerMessage.
-type MessageCarrier struct {
- msg *kafka.Message
-}
-var _ interface {
- tracer.TextMapReader
- tracer.TextMapWriter
-} = (*MessageCarrier)(nil)
-
-// ForeachKey iterates over every header.
-func (c MessageCarrier) ForeachKey(handler func(key, val string) error) error {
- for _, h := range c.msg.Headers {
- err := handler(string(h.Key), string(h.Value))
- if err != nil {
- return err
- }
- }
- return nil
-}
+ tracing "github.com/DataDog/dd-trace-go/v2/contrib/confluentinc/confluent-kafka-go"
+)
-// Set sets a header.
-func (c MessageCarrier) Set(key, val string) {
- // ensure uniqueness of keys
- for i := 0; i < len(c.msg.Headers); i++ {
- if string(c.msg.Headers[i].Key) == key {
- c.msg.Headers = append(c.msg.Headers[:i], c.msg.Headers[i+1:]...)
- i--
- }
- }
- c.msg.Headers = append(c.msg.Headers, kafka.Header{
- Key: key,
- Value: []byte(val),
- })
-}
+// A MessageCarrier injects and extracts traces from a kafka.Message.
+type MessageCarrier = tracing.MessageCarrier
// NewMessageCarrier creates a new MessageCarrier.
func NewMessageCarrier(msg *kafka.Message) MessageCarrier {
- return MessageCarrier{msg}
+ return tracing.NewMessageCarrier(wrapMessage(msg))
}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka.go b/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka.go
index d8f3076abe..2bd892a52e 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka.go
@@ -7,15 +7,11 @@
package kafka // import "github.com/DataDog/dd-trace-go/contrib/confluentinc/confluent-kafka-go/kafka.v2/v2"
import (
- "context"
- "math"
"time"
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
- "github.com/DataDog/dd-trace-go/v2/datastreams"
- "github.com/DataDog/dd-trace-go/v2/datastreams/options"
- "github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
+ tracing "github.com/DataDog/dd-trace-go/v2/contrib/confluentinc/confluent-kafka-go"
"github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/instrumentation"
)
@@ -31,6 +27,11 @@ func init() {
instr = instrumentation.Load(instrumentation.PackageConfluentKafkaGoV2)
}
+func newKafkaTracer(opts ...Option) *tracing.KafkaTracer {
+ v, _ := kafka.LibraryVersion()
+ return tracing.NewKafkaTracer(tracing.CKGoVersion2, v, opts...)
+}
+
// NewConsumer calls kafka.NewConsumer and wraps the resulting Consumer.
func NewConsumer(conf *kafka.ConfigMap, opts ...Option) (*Consumer, error) {
c, err := kafka.NewConsumer(conf)
@@ -54,93 +55,21 @@ func NewProducer(conf *kafka.ConfigMap, opts ...Option) (*Producer, error) {
// A Consumer wraps a kafka.Consumer.
type Consumer struct {
*kafka.Consumer
- cfg *config
+ tracer *tracing.KafkaTracer
events chan kafka.Event
- prev *tracer.Span
}
// WrapConsumer wraps a kafka.Consumer so that any consumed events are traced.
func WrapConsumer(c *kafka.Consumer, opts ...Option) *Consumer {
wrapped := &Consumer{
Consumer: c,
- cfg: newConfig(opts...),
+ tracer: newKafkaTracer(opts...),
}
- instr.Logger().Debug("%s: Wrapping Consumer: %#v", pkgPath, wrapped.cfg)
- wrapped.events = wrapped.traceEventsChannel(c.Events())
+ instr.Logger().Debug("%s: Wrapping Consumer: %#v", pkgPath, wrapped.tracer)
+ wrapped.events = tracing.WrapConsumeEventsChannel(wrapped.tracer, c.Events(), c, wrapEvent)
return wrapped
}
-func (c *Consumer) traceEventsChannel(in chan kafka.Event) chan kafka.Event {
- // in will be nil when consuming via the events channel is not enabled
- if in == nil {
- return nil
- }
-
- out := make(chan kafka.Event, 1)
- go func() {
- defer close(out)
- for evt := range in {
- var next *tracer.Span
-
- // only trace messages
- if msg, ok := evt.(*kafka.Message); ok {
- next = c.startSpan(msg)
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- } else if offset, ok := evt.(kafka.OffsetsCommitted); ok {
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, offset.Offsets, offset.Error)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, offset.Offsets)
- }
-
- out <- evt
-
- if c.prev != nil {
- c.prev.Finish()
- }
- c.prev = next
- }
- // finish any remaining span
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
- }
- }()
- return out
-}
-
-func (c *Consumer) startSpan(msg *kafka.Message) *tracer.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(c.cfg.consumerServiceName),
- tracer.ResourceName("Consume Topic " + *msg.TopicPartition.Topic),
- tracer.SpanType(ext.SpanTypeMessageConsumer),
- tracer.Tag(ext.MessagingKafkaPartition, msg.TopicPartition.Partition),
- tracer.Tag("offset", msg.TopicPartition.Offset),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Measured(),
- }
- if c.cfg.bootstrapServers != "" {
- opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, c.cfg.bootstrapServers))
- }
- if c.cfg.tagFns != nil {
- for key, tagFn := range c.cfg.tagFns {
- opts = append(opts, tracer.Tag(key, tagFn(msg)))
- }
- }
- if !math.IsNaN(c.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, c.cfg.analyticsRate))
- }
- // kafka supports headers, so try to extract a span context
- carrier := NewMessageCarrier(msg)
- if spanctx, err := tracer.Extract(carrier); err == nil {
- opts = append(opts, tracer.ChildOf(spanctx))
- }
- span, _ := tracer.StartSpanFromContext(c.cfg.ctx, c.cfg.consumerSpanName, opts...)
- // reinject the span context so consumers can pick it up
- tracer.Inject(span.Context(), carrier)
- return span
-}
-
// Close calls the underlying Consumer.Close and if polling is enabled, finishes
// any remaining span.
func (c *Consumer) Close() error {
@@ -148,178 +77,111 @@ func (c *Consumer) Close() error {
// we only close the previous span if consuming via the events channel is
// not enabled, because otherwise there would be a data race from the
// consuming goroutine.
- if c.events == nil && c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.events == nil && c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
return err
}
-// Events returns the kafka Events channel (if enabled). Message events will be
+// Events returns the kafka Events channel (if enabled). msg events will be
// traced.
func (c *Consumer) Events() chan kafka.Event {
return c.events
}
-// Poll polls the consumer for messages or events. Message will be
+// Poll polls the consumer for messages or events. msg will be
// traced.
func (c *Consumer) Poll(timeoutMS int) (event kafka.Event) {
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
evt := c.Consumer.Poll(timeoutMS)
if msg, ok := evt.(*kafka.Message); ok {
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- c.prev = c.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ c.tracer.SetConsumeCheckpoint(tMsg)
+ c.tracer.PrevSpan = c.tracer.StartConsumeSpan(tMsg)
} else if offset, ok := evt.(kafka.OffsetsCommitted); ok {
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, offset.Offsets, offset.Error)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, offset.Offsets)
+ tOffsets := wrapTopicPartitions(offset.Offsets)
+ c.tracer.TrackCommitOffsets(tOffsets, offset.Error)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
}
return evt
}
-func (c *Consumer) trackHighWatermark(dataStreamsEnabled bool, offsets []kafka.TopicPartition) {
- if !dataStreamsEnabled {
- return
- }
- for _, tp := range offsets {
- if _, high, err := c.Consumer.GetWatermarkOffsets(*tp.Topic, tp.Partition); err == nil {
- tracer.TrackKafkaHighWatermarkOffset("", *tp.Topic, tp.Partition, high)
- }
- }
-}
-
-// ReadMessage polls the consumer for a message. Message will be traced.
+// ReadMessage polls the consumer for a message. msg will be traced.
func (c *Consumer) ReadMessage(timeout time.Duration) (*kafka.Message, error) {
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
msg, err := c.Consumer.ReadMessage(timeout)
if err != nil {
return nil, err
}
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- c.prev = c.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ c.tracer.SetConsumeCheckpoint(tMsg)
+ c.tracer.PrevSpan = c.tracer.StartConsumeSpan(tMsg)
return msg, nil
}
// Commit commits current offsets and tracks the commit offsets if data streams is enabled.
func (c *Consumer) Commit() ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.Commit()
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
// CommitMessage commits a message and tracks the commit offsets if data streams is enabled.
func (c *Consumer) CommitMessage(msg *kafka.Message) ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.CommitMessage(msg)
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
// CommitOffsets commits provided offsets and tracks the commit offsets if data streams is enabled.
func (c *Consumer) CommitOffsets(offsets []kafka.TopicPartition) ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.CommitOffsets(offsets)
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
-func commitOffsets(dataStreamsEnabled bool, groupID string, tps []kafka.TopicPartition, err error) {
- if err != nil || groupID == "" || !dataStreamsEnabled {
- return
- }
- for _, tp := range tps {
- tracer.TrackKafkaCommitOffset(groupID, *tp.Topic, tp.Partition, int64(tp.Offset))
- }
-}
-
-func trackProduceOffsets(dataStreamsEnabled bool, msg *kafka.Message, err error) {
- if err != nil || !dataStreamsEnabled || msg.TopicPartition.Topic == nil {
- return
- }
- tracer.TrackKafkaProduceOffset(*msg.TopicPartition.Topic, msg.TopicPartition.Partition, int64(msg.TopicPartition.Offset))
-}
-
// A Producer wraps a kafka.Producer.
type Producer struct {
*kafka.Producer
- cfg *config
+ tracer *tracing.KafkaTracer
produceChannel chan *kafka.Message
events chan kafka.Event
- libraryVersion int
}
// WrapProducer wraps a kafka.Producer so requests are traced.
func WrapProducer(p *kafka.Producer, opts ...Option) *Producer {
- version, _ := kafka.LibraryVersion()
wrapped := &Producer{
- Producer: p,
- cfg: newConfig(opts...),
- events: p.Events(),
- libraryVersion: version,
+ Producer: p,
+ tracer: newKafkaTracer(opts...),
+ events: p.Events(),
}
- instr.Logger().Debug("%s: Wrapping Producer: %#v", pkgPath, wrapped.cfg)
- wrapped.produceChannel = wrapped.traceProduceChannel(p.ProduceChannel())
- if wrapped.cfg.dataStreamsEnabled {
- wrapped.events = wrapped.traceEventsChannel(p.Events())
+ instr.Logger().Debug("%s: Wrapping Producer: %#v", pkgPath, wrapped.tracer)
+ wrapped.produceChannel = tracing.WrapProduceChannel(wrapped.tracer, p.ProduceChannel(), wrapMessage)
+ if wrapped.tracer.DSMEnabled() {
+ wrapped.events = tracing.WrapProduceEventsChannel(wrapped.tracer, p.Events(), wrapEvent)
}
return wrapped
}
-// Events returns the kafka Events channel (if enabled). Message events will be monitored
+// Events returns the kafka Events channel (if enabled). msg events will be monitored
// with data streams monitoring (if enabled)
func (p *Producer) Events() chan kafka.Event {
return p.events
}
-func (p *Producer) traceProduceChannel(out chan *kafka.Message) chan *kafka.Message {
- if out == nil {
- return out
- }
- in := make(chan *kafka.Message, 1)
- go func() {
- for msg := range in {
- span := p.startSpan(msg)
- setProduceCheckpoint(p.cfg.dataStreamsEnabled, p.libraryVersion, msg)
- out <- msg
- span.Finish()
- }
- }()
- return in
-}
-
-func (p *Producer) startSpan(msg *kafka.Message) *tracer.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(p.cfg.producerServiceName),
- tracer.ResourceName("Produce Topic " + *msg.TopicPartition.Topic),
- tracer.SpanType(ext.SpanTypeMessageProducer),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Tag(ext.MessagingKafkaPartition, msg.TopicPartition.Partition),
- }
- if p.cfg.bootstrapServers != "" {
- opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, p.cfg.bootstrapServers))
- }
- if !math.IsNaN(p.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, p.cfg.analyticsRate))
- }
- //if there's a span context in the headers, use that as the parent
- carrier := NewMessageCarrier(msg)
- if spanctx, err := tracer.Extract(carrier); err == nil {
- opts = append(opts, tracer.ChildOf(spanctx))
- }
- span, _ := tracer.StartSpanFromContext(p.cfg.ctx, p.cfg.producerSpanName, opts...)
- // inject the span context so consumers can pick it up
- tracer.Inject(span.Context(), carrier)
- return span
-}
-
// Close calls the underlying Producer.Close and also closes the internal
// wrapping producer channel.
func (p *Producer) Close() {
@@ -329,33 +191,23 @@ func (p *Producer) Close() {
// Produce calls the underlying Producer.Produce and traces the request.
func (p *Producer) Produce(msg *kafka.Message, deliveryChan chan kafka.Event) error {
- span := p.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ span := p.tracer.StartProduceSpan(tMsg)
- // if the user has selected a delivery channel, we will wrap it and
- // wait for the delivery event to finish the span
- if deliveryChan != nil {
- oldDeliveryChan := deliveryChan
- deliveryChan = make(chan kafka.Event)
- go func() {
- var err error
- evt := <-deliveryChan
- if msg, ok := evt.(*kafka.Message); ok {
- // delivery errors are returned via TopicPartition.Error
- err = msg.TopicPartition.Error
- trackProduceOffsets(p.cfg.dataStreamsEnabled, msg, err)
- }
- span.Finish(tracer.WithError(err))
- oldDeliveryChan <- evt
- }()
- }
+ var errChan chan error
+ deliveryChan, errChan = tracing.WrapDeliveryChannel(p.tracer, deliveryChan, span, wrapEvent)
+
+ p.tracer.SetProduceCheckpoint(tMsg)
- setProduceCheckpoint(p.cfg.dataStreamsEnabled, p.libraryVersion, msg)
err := p.Producer.Produce(msg, deliveryChan)
- // with no delivery channel or enqueue error, finish immediately
- if err != nil || deliveryChan == nil {
- span.Finish(tracer.WithError(err))
+ if err != nil {
+ if errChan != nil {
+ errChan <- err
+ } else {
+ // with no delivery channel or enqueue error, finish immediately
+ span.Finish(tracer.WithError(err))
+ }
}
-
return err
}
@@ -364,57 +216,3 @@ func (p *Producer) Produce(msg *kafka.Message, deliveryChan chan kafka.Event) er
func (p *Producer) ProduceChannel() chan *kafka.Message {
return p.produceChannel
}
-
-func (p *Producer) traceEventsChannel(in chan kafka.Event) chan kafka.Event {
- if in == nil {
- return nil
- }
- out := make(chan kafka.Event, 1)
- go func() {
- defer close(out)
- for evt := range in {
- if msg, ok := evt.(*kafka.Message); ok {
- trackProduceOffsets(p.cfg.dataStreamsEnabled, msg, msg.TopicPartition.Error)
- }
- out <- evt
- }
- }()
- return out
-}
-
-func setConsumeCheckpoint(dataStreamsEnabled bool, groupID string, msg *kafka.Message) {
- if !dataStreamsEnabled || msg == nil {
- return
- }
- edges := []string{"direction:in", "topic:" + *msg.TopicPartition.Topic, "type:kafka"}
- if groupID != "" {
- edges = append(edges, "group:"+groupID)
- }
- carrier := NewMessageCarrier(msg)
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(datastreams.ExtractFromBase64Carrier(context.Background(), carrier), options.CheckpointParams{PayloadSize: getMsgSize(msg)}, edges...)
- if !ok {
- return
- }
- datastreams.InjectToBase64Carrier(ctx, carrier)
-}
-
-func setProduceCheckpoint(dataStreamsEnabled bool, version int, msg *kafka.Message) {
- if !dataStreamsEnabled || msg == nil {
- return
- }
- edges := []string{"direction:out", "topic:" + *msg.TopicPartition.Topic, "type:kafka"}
- carrier := NewMessageCarrier(msg)
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(datastreams.ExtractFromBase64Carrier(context.Background(), carrier), options.CheckpointParams{PayloadSize: getMsgSize(msg)}, edges...)
- if !ok || version < 0x000b0400 {
- // headers not supported before librdkafka >=0.11.4
- return
- }
- datastreams.InjectToBase64Carrier(ctx, carrier)
-}
-
-func getMsgSize(msg *kafka.Message) (size int64) {
- for _, header := range msg.Headers {
- size += int64(len(header.Key) + len(header.Value))
- }
- return size + int64(len(msg.Value)+len(msg.Key))
-}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka_test.go b/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka_test.go
index 77305ab60b..aca781ddbd 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka_test.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka_test.go
@@ -9,7 +9,6 @@ import (
"context"
"errors"
"os"
- "strconv"
"strings"
"testing"
"time"
@@ -17,6 +16,7 @@ import (
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "go.uber.org/goleak"
"github.com/DataDog/dd-trace-go/v2/datastreams"
"github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
@@ -29,82 +29,6 @@ var (
testTopic = "gotest"
)
-type consumerActionFn func(c *Consumer) (*kafka.Message, error)
-
-func produceThenConsume(t *testing.T, consumerAction consumerActionFn, producerOpts []Option, consumerOpts []Option) ([]*mocktracer.Span, *kafka.Message) {
- if _, ok := os.LookupEnv("INTEGRATION"); !ok {
- t.Skip("to enable integration test, set the INTEGRATION environment variable")
- }
- mt := mocktracer.Start()
- defer mt.Stop()
-
- // first write a message to the topic
- p, err := NewProducer(&kafka.ConfigMap{
- "bootstrap.servers": "127.0.0.1:9092",
- "go.delivery.reports": true,
- }, producerOpts...)
- require.NoError(t, err)
-
- delivery := make(chan kafka.Event, 1)
- err = p.Produce(&kafka.Message{
- TopicPartition: kafka.TopicPartition{
- Topic: &testTopic,
- Partition: 0,
- },
- Key: []byte("key2"),
- Value: []byte("value2"),
- }, delivery)
- require.NoError(t, err)
-
- msg1, _ := (<-delivery).(*kafka.Message)
- p.Close()
-
- // next attempt to consume the message
- c, err := NewConsumer(&kafka.ConfigMap{
- "group.id": testGroupID,
- "bootstrap.servers": "127.0.0.1:9092",
- "fetch.wait.max.ms": 500,
- "socket.timeout.ms": 1500,
- "session.timeout.ms": 1500,
- "enable.auto.offset.store": false,
- }, consumerOpts...)
- require.NoError(t, err)
-
- err = c.Assign([]kafka.TopicPartition{
- {Topic: &testTopic, Partition: 0, Offset: msg1.TopicPartition.Offset},
- })
- require.NoError(t, err)
-
- msg2, err := consumerAction(c)
- require.NoError(t, err)
- _, err = c.CommitMessage(msg2)
- require.NoError(t, err)
- assert.Equal(t, msg1.String(), msg2.String())
- err = c.Close()
- require.NoError(t, err)
-
- spans := mt.FinishedSpans()
- require.Len(t, spans, 2)
- // they should be linked via headers
- assert.Equal(t, spans[0].TraceID(), spans[1].TraceID())
-
- if c.cfg.dataStreamsEnabled {
- backlogs := mt.SentDSMBacklogs()
- toMap := func(b []mocktracer.DSMBacklog) map[string]struct{} {
- m := make(map[string]struct{})
- for _, b := range backlogs {
- m[strings.Join(b.Tags, "")] = struct{}{}
- }
- return m
- }
- backlogsMap := toMap(backlogs)
- require.Contains(t, backlogsMap, "consumer_group:"+testGroupID+"partition:0"+"topic:"+testTopic+"type:kafka_commit")
- require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_high_watermark")
- require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_produce")
- }
- return spans, msg2
-}
-
func TestConsumerChannel(t *testing.T) {
// we can test consuming via the Events channel by artifically sending
// messages. Testing .Poll is done via an integration test.
@@ -163,7 +87,7 @@ func TestConsumerChannel(t *testing.T) {
assert.Equal(t, "queue", s.Tag(ext.SpanType))
assert.Equal(t, float64(1), s.Tag(ext.MessagingKafkaPartition))
assert.Equal(t, 0.3, s.Tag(ext.EventSampleRate))
- assert.Equal(t, strconv.Itoa(i+1), s.Tag("offset"))
+ assert.EqualValues(t, kafka.Offset(i+1), s.Tag("offset"))
assert.Equal(t, "confluentinc/confluent-kafka-go/kafka.v2", s.Tag(ext.Component))
assert.Equal(t, ext.SpanKindConsumer, s.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s.Tag(ext.MessagingSystem))
@@ -178,30 +102,6 @@ func TestConsumerChannel(t *testing.T) {
}
}
-/*
-to run the integration test locally:
-
- docker network create confluent
-
- docker run --rm \
- --name zookeeper \
- --network confluent \
- -p 2181:2181 \
- -e ZOOKEEPER_CLIENT_PORT=2181 \
- confluentinc/cp-zookeeper:5.0.0
-
- docker run --rm \
- --name kafka \
- --network confluent \
- -p 9092:9092 \
- -e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
- -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 \
- -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
- -e KAFKA_CREATE_TOPICS=gotest:1:1 \
- -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 \
- confluentinc/cp-kafka:5.0.0
-*/
-
func TestConsumerFunctional(t *testing.T) {
for _, tt := range []struct {
name string
@@ -265,6 +165,75 @@ func TestConsumerFunctional(t *testing.T) {
}
}
+// This tests the deprecated behavior of using cfg.context as the context passed via kafka messages
+// instead of the one passed in the message.
+func TestDeprecatedContext(t *testing.T) {
+ if _, ok := os.LookupEnv("INTEGRATION"); !ok {
+ t.Skip("to enable integration test, set the INTEGRATION environment variable")
+ }
+
+ tracer.Start()
+ defer tracer.Stop()
+
+ // Create the span to be passed
+ parentSpan, ctx := tracer.StartSpanFromContext(context.Background(), "test_parent_context")
+
+ c, err := NewConsumer(&kafka.ConfigMap{
+ "go.events.channel.enable": true, // required for the events channel to be turned on
+ "group.id": testGroupID,
+ "socket.timeout.ms": 10,
+ "session.timeout.ms": 10,
+ "enable.auto.offset.store": false,
+ }, WithContext(ctx)) // Adds the parent context containing a span
+ assert.NoError(t, err)
+
+ err = c.Subscribe(testTopic, nil)
+ assert.NoError(t, err)
+
+ // This span context will be ignored
+ messageSpan, _ := tracer.StartSpanFromContext(context.Background(), "test_context_from_message")
+ messageSpanContext := messageSpan.Context()
+
+ /// Produce a message with a span
+ go func() {
+ msg := &kafka.Message{
+ TopicPartition: kafka.TopicPartition{
+ Topic: &testTopic,
+ Partition: 1,
+ Offset: 1,
+ },
+ Key: []byte("key1"),
+ Value: []byte("value1"),
+ }
+
+ // Inject the span context in the message to be produced
+ carrier := NewMessageCarrier(msg)
+ tracer.Inject(messageSpan.Context(), carrier)
+
+ c.Consumer.Events() <- msg
+
+ }()
+
+ msg := (<-c.Events()).(*kafka.Message)
+
+ // Extract the context from the message
+ carrier := NewMessageCarrier(msg)
+ spanContext, err := tracer.Extract(carrier)
+ assert.NoError(t, err)
+
+ parentContext := parentSpan.Context()
+
+ /// The context passed is the one from the parent context
+ assert.EqualValues(t, parentContext.TraceID(), spanContext.TraceID())
+ /// The context passed is not the one passed in the message
+ assert.NotEqualValues(t, messageSpanContext.TraceID(), spanContext.TraceID())
+
+ c.Close()
+ // wait for the events channel to be closed
+ <-c.Events()
+
+}
+
func TestCustomTags(t *testing.T) {
mt := mocktracer.Start()
defer mt.Stop()
@@ -275,10 +244,10 @@ func TestCustomTags(t *testing.T) {
"socket.timeout.ms": 10,
"session.timeout.ms": 10,
"enable.auto.offset.store": false,
- }, WithCustomTag("foo", func(msg *kafka.Message) interface{} {
+ }, WithCustomTag("foo", func(_ *kafka.Message) interface{} {
return "bar"
}), WithCustomTag("key", func(msg *kafka.Message) interface{} {
- return string(msg.Key)
+ return msg.Key
}))
assert.NoError(t, err)
@@ -310,3 +279,149 @@ func TestCustomTags(t *testing.T) {
assert.Equal(t, "bar", s.Tag("foo"))
assert.Equal(t, "key1", s.Tag("key"))
}
+
+type consumerActionFn func(c *Consumer) (*kafka.Message, error)
+
+// Test we don't leak goroutines and properly close the span when Produce returns an error.
+func TestProduceError(t *testing.T) {
+ defer func() {
+ err := goleak.Find()
+ if err != nil {
+ // if a goroutine is leaking, ensure it is not coming from this package
+ if strings.Contains(err.Error(), "contrib/confluentinc/confluent-kafka-go") {
+ assert.NoError(t, err, "found leaked goroutine(s) from this package")
+ }
+ }
+ }()
+
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // first write a message to the topic
+ p, err := NewProducer(&kafka.ConfigMap{
+ "bootstrap.servers": "127.0.0.1:9092",
+ "go.delivery.reports": true,
+ })
+ require.NoError(t, err)
+ defer p.Close()
+
+ // this empty message should cause an error in the Produce call.
+ topic := ""
+ msg := &kafka.Message{
+ TopicPartition: kafka.TopicPartition{
+ Topic: &topic,
+ },
+ }
+ deliveryChan := make(chan kafka.Event, 1)
+ err = p.Produce(msg, deliveryChan)
+ require.Error(t, err)
+ require.EqualError(t, err, "Local: Invalid argument or configuration")
+
+ select {
+ case <-deliveryChan:
+ assert.Fail(t, "there should be no events in the deliveryChan")
+ case <-time.After(1 * time.Second):
+ // assume there is no event
+ }
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 1)
+}
+
+func produceThenConsume(t *testing.T, consumerAction consumerActionFn, producerOpts []Option, consumerOpts []Option) ([]*mocktracer.Span, *kafka.Message) {
+ if _, ok := os.LookupEnv("INTEGRATION"); !ok {
+ t.Skip("to enable integration test, set the INTEGRATION environment variable")
+ }
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // first write a message to the topic
+ p, err := NewProducer(&kafka.ConfigMap{
+ "bootstrap.servers": "127.0.0.1:9092",
+ "go.delivery.reports": true,
+ }, producerOpts...)
+ require.NoError(t, err)
+
+ delivery := make(chan kafka.Event, 1)
+ err = p.Produce(&kafka.Message{
+ TopicPartition: kafka.TopicPartition{
+ Topic: &testTopic,
+ Partition: 0,
+ },
+ Key: []byte("key2"),
+ Value: []byte("value2"),
+ }, delivery)
+ require.NoError(t, err)
+
+ msg1, _ := (<-delivery).(*kafka.Message)
+ p.Close()
+
+ // next attempt to consume the message
+ c, err := NewConsumer(&kafka.ConfigMap{
+ "group.id": testGroupID,
+ "bootstrap.servers": "127.0.0.1:9092",
+ "fetch.wait.max.ms": 500,
+ "socket.timeout.ms": 1500,
+ "session.timeout.ms": 1500,
+ "enable.auto.offset.store": false,
+ }, consumerOpts...)
+ require.NoError(t, err)
+
+ err = c.Assign([]kafka.TopicPartition{
+ {Topic: &testTopic, Partition: 0, Offset: msg1.TopicPartition.Offset},
+ })
+ require.NoError(t, err)
+
+ msg2, err := consumerAction(c)
+ require.NoError(t, err)
+ _, err = c.CommitMessage(msg2)
+ require.NoError(t, err)
+ assert.Equal(t, msg1.String(), msg2.String())
+ err = c.Close()
+ require.NoError(t, err)
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 2)
+ // they should be linked via headers
+ assert.Equal(t, spans[0].TraceID(), spans[1].TraceID())
+
+ if c.tracer.DSMEnabled() {
+ backlogs := mt.SentDSMBacklogs()
+ toMap := func(_ []mocktracer.DSMBacklog) map[string]struct{} {
+ m := make(map[string]struct{})
+ for _, b := range backlogs {
+ m[strings.Join(b.Tags, "")] = struct{}{}
+ }
+ return m
+ }
+ backlogsMap := toMap(backlogs)
+ require.Contains(t, backlogsMap, "consumer_group:"+testGroupID+"partition:0"+"topic:"+testTopic+"type:kafka_commit")
+ require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_high_watermark")
+ require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_produce")
+ }
+ return spans, msg2
+}
+
+/*
+to run the integration test locally:
+
+ docker network create confluent
+
+ docker run --rm \
+ --name zookeeper \
+ --network confluent \
+ -p 2181:2181 \
+ -e ZOOKEEPER_CLIENT_PORT=2181 \
+ confluentinc/cp-zookeeper:5.0.0
+
+ docker run --rm \
+ --name kafka \
+ --network confluent \
+ -p 9092:9092 \
+ -e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
+ -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 \
+ -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
+ -e KAFKA_CREATE_TOPICS=gotest:1:1 \
+ -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 \
+ confluentinc/cp-kafka:5.0.0
+*/
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/option.go b/contrib/confluentinc/confluent-kafka-go/kafka.v2/option.go
index 1ab1f228f3..0b2c3bbe11 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka.v2/option.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka.v2/option.go
@@ -6,125 +6,48 @@
package kafka
import (
- "context"
- "math"
- "net"
- "strings"
-
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
- "github.com/DataDog/dd-trace-go/v2/instrumentation"
+ tracing "github.com/DataDog/dd-trace-go/v2/contrib/confluentinc/confluent-kafka-go"
)
-type config struct {
- ctx context.Context
- consumerServiceName string
- producerServiceName string
- consumerSpanName string
- producerSpanName string
- analyticsRate float64
- bootstrapServers string
- groupID string
- tagFns map[string]func(msg *kafka.Message) interface{}
- dataStreamsEnabled bool
-}
-
// Option describes an option for the Kafka integration.
-type Option interface {
- apply(*config)
-}
+type Option = tracing.Option
// OptionFn represents options applicable to NewConsumer, NewProducer, WrapConsumer and WrapProducer.
-type OptionFn func(*config)
+type OptionFn = tracing.OptionFn
-func (fn OptionFn) apply(cfg *config) {
- fn(cfg)
-}
-
-func newConfig(opts ...Option) *config {
- cfg := &config{
- ctx: context.Background(),
- analyticsRate: instr.AnalyticsRate(false),
- }
- cfg.dataStreamsEnabled = instr.DataStreamsEnabled()
-
- cfg.consumerServiceName = instr.ServiceName(instrumentation.ComponentConsumer, nil)
- cfg.producerServiceName = instr.ServiceName(instrumentation.ComponentProducer, nil)
- cfg.consumerSpanName = instr.OperationName(instrumentation.ComponentConsumer, nil)
- cfg.producerSpanName = instr.OperationName(instrumentation.ComponentProducer, nil)
-
- for _, opt := range opts {
- if opt == nil {
- continue
- }
- opt.apply(cfg)
- }
- return cfg
-}
+// WithContext sets the config context to ctx.
+// Deprecated: This is deprecated in favor of passing the context
+// via the message headers
+var WithContext = tracing.WithContext
// WithService sets the config service name to serviceName.
-func WithService(serviceName string) OptionFn {
- return func(cfg *config) {
- cfg.consumerServiceName = serviceName
- cfg.producerServiceName = serviceName
- }
-}
+var WithService = tracing.WithService
// WithAnalytics enables Trace Analytics for all started spans.
-func WithAnalytics(on bool) OptionFn {
- return func(cfg *config) {
- if on {
- cfg.analyticsRate = 1.0
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalytics = tracing.WithAnalytics
// WithAnalyticsRate sets the sampling rate for Trace Analytics events
// correlated to started spans.
-func WithAnalyticsRate(rate float64) OptionFn {
- return func(cfg *config) {
- if rate >= 0.0 && rate <= 1.0 {
- cfg.analyticsRate = rate
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalyticsRate = tracing.WithAnalyticsRate
// WithCustomTag will cause the given tagFn to be evaluated after executing
// a query and attach the result to the span tagged by the key.
-func WithCustomTag(tag string, tagFn func(msg *kafka.Message) interface{}) OptionFn {
- return func(cfg *config) {
- if cfg.tagFns == nil {
- cfg.tagFns = make(map[string]func(msg *kafka.Message) interface{})
+func WithCustomTag(tag string, tagFn func(msg *kafka.Message) interface{}) Option {
+ wrapped := func(msg tracing.Message) interface{} {
+ if m, ok := msg.Unwrap().(*kafka.Message); ok {
+ return tagFn(m)
}
- cfg.tagFns[tag] = tagFn
+ return nil
}
+ return tracing.WithCustomTag(tag, wrapped)
}
// WithConfig extracts the config information for the client to be tagged
-func WithConfig(cg *kafka.ConfigMap) OptionFn {
- return func(cfg *config) {
- if groupID, err := cg.Get("group.id", ""); err == nil {
- cfg.groupID = groupID.(string)
- }
- if bs, err := cg.Get("bootstrap.servers", ""); err == nil && bs != "" {
- for _, addr := range strings.Split(bs.(string), ",") {
- host, _, err := net.SplitHostPort(addr)
- if err == nil {
- cfg.bootstrapServers = host
- return
- }
- }
- }
- }
+func WithConfig(cm *kafka.ConfigMap) Option {
+ return tracing.WithConfig(wrapConfigMap(cm))
}
// WithDataStreams enables the Data Streams monitoring product features: https://www.datadoghq.com/product/data-streams-monitoring/
-func WithDataStreams() OptionFn {
- return func(cfg *config) {
- cfg.dataStreamsEnabled = true
- }
-}
+var WithDataStreams = tracing.WithDataStreams
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/tracing.go b/contrib/confluentinc/confluent-kafka-go/kafka.v2/tracing.go
new file mode 100644
index 0000000000..26c22e009c
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/kafka.v2/tracing.go
@@ -0,0 +1,163 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package kafka
+
+import (
+ "github.com/confluentinc/confluent-kafka-go/v2/kafka"
+
+ tracing "github.com/DataDog/dd-trace-go/v2/contrib/confluentinc/confluent-kafka-go"
+)
+
+type wMessage struct {
+ *kafka.Message
+}
+
+func wrapMessage(msg *kafka.Message) tracing.Message {
+ if msg == nil {
+ return nil
+ }
+ return &wMessage{msg}
+}
+
+func (w *wMessage) Unwrap() any {
+ return w.Message
+}
+
+func (w *wMessage) GetValue() []byte {
+ return w.Message.Value
+}
+
+func (w *wMessage) GetKey() []byte {
+ return w.Message.Key
+}
+
+func (w *wMessage) GetHeaders() []tracing.Header {
+ hs := make([]tracing.Header, 0, len(w.Headers))
+ for _, h := range w.Headers {
+ hs = append(hs, wrapHeader(h))
+ }
+ return hs
+}
+
+func (w *wMessage) SetHeaders(headers []tracing.Header) {
+ hs := make([]kafka.Header, 0, len(headers))
+ for _, h := range headers {
+ hs = append(hs, kafka.Header{
+ Key: h.GetKey(),
+ Value: h.GetValue(),
+ })
+ }
+ w.Message.Headers = hs
+}
+
+func (w *wMessage) GetTopicPartition() tracing.TopicPartition {
+ return wrapTopicPartition(w.Message.TopicPartition)
+}
+
+type wHeader struct {
+ kafka.Header
+}
+
+func wrapHeader(h kafka.Header) tracing.Header {
+ return &wHeader{h}
+}
+
+func (w wHeader) GetKey() string {
+ return w.Header.Key
+}
+
+func (w wHeader) GetValue() []byte {
+ return w.Header.Value
+}
+
+type wTopicPartition struct {
+ kafka.TopicPartition
+}
+
+func wrapTopicPartition(tp kafka.TopicPartition) tracing.TopicPartition {
+ return wTopicPartition{tp}
+}
+
+func wrapTopicPartitions(tps []kafka.TopicPartition) []tracing.TopicPartition {
+ wtps := make([]tracing.TopicPartition, 0, len(tps))
+ for _, tp := range tps {
+ wtps = append(wtps, wTopicPartition{tp})
+ }
+ return wtps
+}
+
+func (w wTopicPartition) GetTopic() string {
+ if w.Topic == nil {
+ return ""
+ }
+ return *w.Topic
+}
+
+func (w wTopicPartition) GetPartition() int32 {
+ return w.Partition
+}
+
+func (w wTopicPartition) GetOffset() int64 {
+ return int64(w.Offset)
+}
+
+func (w wTopicPartition) GetError() error {
+ return w.Error
+}
+
+type wEvent struct {
+ kafka.Event
+}
+
+func wrapEvent(event kafka.Event) tracing.Event {
+ return wEvent{event}
+}
+
+func (w wEvent) KafkaMessage() (tracing.Message, bool) {
+ if m, ok := w.Event.(*kafka.Message); ok {
+ return wrapMessage(m), true
+ }
+ return nil, false
+}
+
+func (w wEvent) KafkaOffsetsCommitted() (tracing.OffsetsCommitted, bool) {
+ if oc, ok := w.Event.(kafka.OffsetsCommitted); ok {
+ return wrapOffsetsCommitted(oc), true
+ }
+ return nil, false
+}
+
+type wOffsetsCommitted struct {
+ kafka.OffsetsCommitted
+}
+
+func wrapOffsetsCommitted(oc kafka.OffsetsCommitted) tracing.OffsetsCommitted {
+ return wOffsetsCommitted{oc}
+}
+
+func (w wOffsetsCommitted) GetError() error {
+ return w.Error
+}
+
+func (w wOffsetsCommitted) GetOffsets() []tracing.TopicPartition {
+ ttps := make([]tracing.TopicPartition, 0, len(w.Offsets))
+ for _, tp := range w.Offsets {
+ ttps = append(ttps, wrapTopicPartition(tp))
+ }
+ return ttps
+}
+
+type wConfigMap struct {
+ cfg *kafka.ConfigMap
+}
+
+func wrapConfigMap(cm *kafka.ConfigMap) tracing.ConfigMap {
+ return &wConfigMap{cm}
+}
+
+func (w *wConfigMap) Get(key string, defVal any) (any, error) {
+ return w.cfg.Get(key, defVal)
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/go.mod b/contrib/confluentinc/confluent-kafka-go/kafka/go.mod
index 0057055a77..df6a6be8c0 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka/go.mod
+++ b/contrib/confluentinc/confluent-kafka-go/kafka/go.mod
@@ -2,12 +2,11 @@ module github.com/DataDog/dd-trace-go/contrib/confluentinc/confluent-kafka-go/ka
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/confluentinc/confluent-kafka-go v1.9.2
github.com/stretchr/testify v1.9.0
+ go.uber.org/goleak v1.3.0
)
require (
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/headers.go b/contrib/confluentinc/confluent-kafka-go/kafka/headers.go
index 60d61cecb3..55896156ff 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka/headers.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka/headers.go
@@ -6,48 +6,15 @@
package kafka
import (
- "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
-
"github.com/confluentinc/confluent-kafka-go/kafka"
-)
-
-// A MessageCarrier injects and extracts traces from a sarama.ProducerMessage.
-type MessageCarrier struct {
- msg *kafka.Message
-}
-var _ interface {
- tracer.TextMapReader
- tracer.TextMapWriter
-} = (*MessageCarrier)(nil)
-
-// ForeachKey iterates over every header.
-func (c MessageCarrier) ForeachKey(handler func(key, val string) error) error {
- for _, h := range c.msg.Headers {
- err := handler(string(h.Key), string(h.Value))
- if err != nil {
- return err
- }
- }
- return nil
-}
+ tracing "github.com/DataDog/dd-trace-go/v2/contrib/confluentinc/confluent-kafka-go"
+)
-// Set sets a header.
-func (c MessageCarrier) Set(key, val string) {
- // ensure uniqueness of keys
- for i := 0; i < len(c.msg.Headers); i++ {
- if string(c.msg.Headers[i].Key) == key {
- c.msg.Headers = append(c.msg.Headers[:i], c.msg.Headers[i+1:]...)
- i--
- }
- }
- c.msg.Headers = append(c.msg.Headers, kafka.Header{
- Key: key,
- Value: []byte(val),
- })
-}
+// A MessageCarrier injects and extracts traces from a kafka.Message.
+type MessageCarrier = tracing.MessageCarrier
// NewMessageCarrier creates a new MessageCarrier.
func NewMessageCarrier(msg *kafka.Message) MessageCarrier {
- return MessageCarrier{msg}
+ return tracing.NewMessageCarrier(wrapMessage(msg))
}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/kafka.go b/contrib/confluentinc/confluent-kafka-go/kafka/kafka.go
index 8793edb702..545ae2dc59 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka/kafka.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka/kafka.go
@@ -7,27 +7,28 @@
package kafka // import "github.com/DataDog/dd-trace-go/contrib/confluentinc/confluent-kafka-go/kafka/v2"
import (
- "context"
- "math"
"time"
- "github.com/DataDog/dd-trace-go/v2/datastreams"
- "github.com/DataDog/dd-trace-go/v2/datastreams/options"
- "github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
+ tracing "github.com/DataDog/dd-trace-go/v2/contrib/confluentinc/confluent-kafka-go"
"github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/instrumentation"
"github.com/confluentinc/confluent-kafka-go/kafka"
)
const (
- componentName = instrumentation.PackageConfluentKafkaGo
- pkgPath = "contrib/confluentinc/confluent-kafka-go/kafka"
+ ckgoVersion = tracing.CKGoVersion1
+ pkgPath = "contrib/confluentinc/confluent-kafka-go/kafka"
)
var instr *instrumentation.Instrumentation
func init() {
- instr = instrumentation.Load(instrumentation.PackageConfluentKafkaGo)
+ instr = tracing.Package(ckgoVersion)
+}
+
+func newKafkaTracer(opts ...Option) *tracing.KafkaTracer {
+ v, _ := kafka.LibraryVersion()
+ return tracing.NewKafkaTracer(tracing.CKGoVersion1, v, opts...)
}
// NewConsumer calls kafka.NewConsumer and wraps the resulting Consumer.
@@ -53,93 +54,21 @@ func NewProducer(conf *kafka.ConfigMap, opts ...Option) (*Producer, error) {
// A Consumer wraps a kafka.Consumer.
type Consumer struct {
*kafka.Consumer
- cfg *config
+ tracer *tracing.KafkaTracer
events chan kafka.Event
- prev *tracer.Span
}
// WrapConsumer wraps a kafka.Consumer so that any consumed events are traced.
func WrapConsumer(c *kafka.Consumer, opts ...Option) *Consumer {
wrapped := &Consumer{
Consumer: c,
- cfg: newConfig(opts...),
+ tracer: newKafkaTracer(opts...),
}
- instr.Logger().Debug("%s: Wrapping Consumer: %#v", pkgPath, wrapped.cfg)
- wrapped.events = wrapped.traceEventsChannel(c.Events())
+ instr.Logger().Debug("%s: Wrapping Consumer: %#v", pkgPath, wrapped.tracer)
+ wrapped.events = tracing.WrapConsumeEventsChannel(wrapped.tracer, c.Events(), c, wrapEvent)
return wrapped
}
-func (c *Consumer) traceEventsChannel(in chan kafka.Event) chan kafka.Event {
- // in will be nil when consuming via the events channel is not enabled
- if in == nil {
- return nil
- }
-
- out := make(chan kafka.Event, 1)
- go func() {
- defer close(out)
- for evt := range in {
- var next *tracer.Span
-
- // only trace messages
- if msg, ok := evt.(*kafka.Message); ok {
- next = c.startSpan(msg)
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- } else if offset, ok := evt.(kafka.OffsetsCommitted); ok {
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, offset.Offsets, offset.Error)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, offset.Offsets)
- }
-
- out <- evt
-
- if c.prev != nil {
- c.prev.Finish()
- }
- c.prev = next
- }
- // finish any remaining span
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
- }
- }()
- return out
-}
-
-func (c *Consumer) startSpan(msg *kafka.Message) *tracer.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(c.cfg.consumerServiceName),
- tracer.ResourceName("Consume Topic " + *msg.TopicPartition.Topic),
- tracer.SpanType(ext.SpanTypeMessageConsumer),
- tracer.Tag(ext.MessagingKafkaPartition, msg.TopicPartition.Partition),
- tracer.Tag("offset", msg.TopicPartition.Offset),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Measured(),
- }
- if c.cfg.bootstrapServers != "" {
- opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, c.cfg.bootstrapServers))
- }
- if c.cfg.tagFns != nil {
- for key, tagFn := range c.cfg.tagFns {
- opts = append(opts, tracer.Tag(key, tagFn(msg)))
- }
- }
- if !math.IsNaN(c.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, c.cfg.analyticsRate))
- }
- // kafka supports headers, so try to extract a span context
- carrier := NewMessageCarrier(msg)
- if spanctx, err := tracer.Extract(carrier); err == nil {
- opts = append(opts, tracer.ChildOf(spanctx))
- }
- span, _ := tracer.StartSpanFromContext(c.cfg.ctx, c.cfg.consumerSpanName, opts...)
- // reinject the span context so consumers can pick it up
- tracer.Inject(span.Context(), carrier)
- return span
-}
-
// Close calls the underlying Consumer.Close and if polling is enabled, finishes
// any remaining span.
func (c *Consumer) Close() error {
@@ -147,178 +76,111 @@ func (c *Consumer) Close() error {
// we only close the previous span if consuming via the events channel is
// not enabled, because otherwise there would be a data race from the
// consuming goroutine.
- if c.events == nil && c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.events == nil && c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
return err
}
-// Events returns the kafka Events channel (if enabled). Message events will be
+// Events returns the kafka Events channel (if enabled). msg events will be
// traced.
func (c *Consumer) Events() chan kafka.Event {
return c.events
}
-// Poll polls the consumer for messages or events. Message will be
+// Poll polls the consumer for messages or events. msg will be
// traced.
func (c *Consumer) Poll(timeoutMS int) (event kafka.Event) {
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
evt := c.Consumer.Poll(timeoutMS)
if msg, ok := evt.(*kafka.Message); ok {
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- c.prev = c.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ c.tracer.SetConsumeCheckpoint(tMsg)
+ c.tracer.PrevSpan = c.tracer.StartConsumeSpan(tMsg)
} else if offset, ok := evt.(kafka.OffsetsCommitted); ok {
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, offset.Offsets, offset.Error)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, offset.Offsets)
+ tOffsets := wrapTopicPartitions(offset.Offsets)
+ c.tracer.TrackCommitOffsets(tOffsets, offset.Error)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
}
return evt
}
-func (c *Consumer) trackHighWatermark(dataStreamsEnabled bool, offsets []kafka.TopicPartition) {
- if !dataStreamsEnabled {
- return
- }
- for _, tp := range offsets {
- if _, high, err := c.Consumer.GetWatermarkOffsets(*tp.Topic, tp.Partition); err == nil {
- tracer.TrackKafkaHighWatermarkOffset("", *tp.Topic, tp.Partition, high)
- }
- }
-}
-
-// ReadMessage polls the consumer for a message. Message will be traced.
+// ReadMessage polls the consumer for a message. msg will be traced.
func (c *Consumer) ReadMessage(timeout time.Duration) (*kafka.Message, error) {
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
msg, err := c.Consumer.ReadMessage(timeout)
if err != nil {
return nil, err
}
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- c.prev = c.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ c.tracer.SetConsumeCheckpoint(tMsg)
+ c.tracer.PrevSpan = c.tracer.StartConsumeSpan(tMsg)
return msg, nil
}
// Commit commits current offsets and tracks the commit offsets if data streams is enabled.
func (c *Consumer) Commit() ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.Commit()
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
// CommitMessage commits a message and tracks the commit offsets if data streams is enabled.
func (c *Consumer) CommitMessage(msg *kafka.Message) ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.CommitMessage(msg)
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
// CommitOffsets commits provided offsets and tracks the commit offsets if data streams is enabled.
func (c *Consumer) CommitOffsets(offsets []kafka.TopicPartition) ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.CommitOffsets(offsets)
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
-func commitOffsets(dataStreamsEnabled bool, groupID string, tps []kafka.TopicPartition, err error) {
- if err != nil || groupID == "" || !dataStreamsEnabled {
- return
- }
- for _, tp := range tps {
- tracer.TrackKafkaCommitOffset(groupID, *tp.Topic, tp.Partition, int64(tp.Offset))
- }
-}
-
-func trackProduceOffsets(dataStreamsEnabled bool, msg *kafka.Message, err error) {
- if err != nil || !dataStreamsEnabled || msg.TopicPartition.Topic == nil {
- return
- }
- tracer.TrackKafkaProduceOffset(*msg.TopicPartition.Topic, msg.TopicPartition.Partition, int64(msg.TopicPartition.Offset))
-}
-
// A Producer wraps a kafka.Producer.
type Producer struct {
*kafka.Producer
- cfg *config
+ tracer *tracing.KafkaTracer
produceChannel chan *kafka.Message
events chan kafka.Event
- libraryVersion int
}
// WrapProducer wraps a kafka.Producer so requests are traced.
func WrapProducer(p *kafka.Producer, opts ...Option) *Producer {
- version, _ := kafka.LibraryVersion()
wrapped := &Producer{
- Producer: p,
- cfg: newConfig(opts...),
- events: p.Events(),
- libraryVersion: version,
+ Producer: p,
+ tracer: newKafkaTracer(opts...),
+ events: p.Events(),
}
- instr.Logger().Debug("%s: Wrapping Producer: %#v", pkgPath, wrapped.cfg)
- wrapped.produceChannel = wrapped.traceProduceChannel(p.ProduceChannel())
- if wrapped.cfg.dataStreamsEnabled {
- wrapped.events = wrapped.traceEventsChannel(p.Events())
+ instr.Logger().Debug("%s: Wrapping Producer: %#v", pkgPath, wrapped.tracer)
+ wrapped.produceChannel = tracing.WrapProduceChannel(wrapped.tracer, p.ProduceChannel(), wrapMessage)
+ if wrapped.tracer.DSMEnabled() {
+ wrapped.events = tracing.WrapProduceEventsChannel(wrapped.tracer, p.Events(), wrapEvent)
}
return wrapped
}
-// Events returns the kafka Events channel (if enabled). Message events will be monitored
+// Events returns the kafka Events channel (if enabled). msg events will be monitored
// with data streams monitoring (if enabled)
func (p *Producer) Events() chan kafka.Event {
return p.events
}
-func (p *Producer) traceProduceChannel(out chan *kafka.Message) chan *kafka.Message {
- if out == nil {
- return out
- }
- in := make(chan *kafka.Message, 1)
- go func() {
- for msg := range in {
- span := p.startSpan(msg)
- setProduceCheckpoint(p.cfg.dataStreamsEnabled, p.libraryVersion, msg)
- out <- msg
- span.Finish()
- }
- }()
- return in
-}
-
-func (p *Producer) startSpan(msg *kafka.Message) *tracer.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(p.cfg.producerServiceName),
- tracer.ResourceName("Produce Topic " + *msg.TopicPartition.Topic),
- tracer.SpanType(ext.SpanTypeMessageProducer),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Tag(ext.MessagingKafkaPartition, msg.TopicPartition.Partition),
- }
- if p.cfg.bootstrapServers != "" {
- opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, p.cfg.bootstrapServers))
- }
- if !math.IsNaN(p.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, p.cfg.analyticsRate))
- }
- // if there's a span context in the headers, use that as the parent
- carrier := NewMessageCarrier(msg)
- if spanctx, err := tracer.Extract(carrier); err == nil {
- opts = append(opts, tracer.ChildOf(spanctx))
- }
- span, _ := tracer.StartSpanFromContext(p.cfg.ctx, p.cfg.producerSpanName, opts...)
- // inject the span context so consumers can pick it up
- tracer.Inject(span.Context(), carrier)
- return span
-}
-
// Close calls the underlying Producer.Close and also closes the internal
// wrapping producer channel.
func (p *Producer) Close() {
@@ -328,33 +190,23 @@ func (p *Producer) Close() {
// Produce calls the underlying Producer.Produce and traces the request.
func (p *Producer) Produce(msg *kafka.Message, deliveryChan chan kafka.Event) error {
- span := p.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ span := p.tracer.StartProduceSpan(tMsg)
- // if the user has selected a delivery channel, we will wrap it and
- // wait for the delivery event to finish the span
- if deliveryChan != nil {
- oldDeliveryChan := deliveryChan
- deliveryChan = make(chan kafka.Event)
- go func() {
- var err error
- evt := <-deliveryChan
- if msg, ok := evt.(*kafka.Message); ok {
- // delivery errors are returned via TopicPartition.Error
- err = msg.TopicPartition.Error
- trackProduceOffsets(p.cfg.dataStreamsEnabled, msg, err)
- }
- span.Finish(tracer.WithError(err))
- oldDeliveryChan <- evt
- }()
- }
+ var errChan chan error
+ deliveryChan, errChan = tracing.WrapDeliveryChannel(p.tracer, deliveryChan, span, wrapEvent)
+
+ p.tracer.SetProduceCheckpoint(tMsg)
- setProduceCheckpoint(p.cfg.dataStreamsEnabled, p.libraryVersion, msg)
err := p.Producer.Produce(msg, deliveryChan)
- // with no delivery channel or enqueue error, finish immediately
- if err != nil || deliveryChan == nil {
- span.Finish(tracer.WithError(err))
+ if err != nil {
+ if errChan != nil {
+ errChan <- err
+ } else {
+ // with no delivery channel or enqueue error, finish immediately
+ span.Finish(tracer.WithError(err))
+ }
}
-
return err
}
@@ -363,57 +215,3 @@ func (p *Producer) Produce(msg *kafka.Message, deliveryChan chan kafka.Event) er
func (p *Producer) ProduceChannel() chan *kafka.Message {
return p.produceChannel
}
-
-func (p *Producer) traceEventsChannel(in chan kafka.Event) chan kafka.Event {
- if in == nil {
- return nil
- }
- out := make(chan kafka.Event, 1)
- go func() {
- defer close(out)
- for evt := range in {
- if msg, ok := evt.(*kafka.Message); ok {
- trackProduceOffsets(p.cfg.dataStreamsEnabled, msg, msg.TopicPartition.Error)
- }
- out <- evt
- }
- }()
- return out
-}
-
-func setConsumeCheckpoint(dataStreamsEnabled bool, groupID string, msg *kafka.Message) {
- if !dataStreamsEnabled || msg == nil {
- return
- }
- edges := []string{"direction:in", "topic:" + *msg.TopicPartition.Topic, "type:kafka"}
- if groupID != "" {
- edges = append(edges, "group:"+groupID)
- }
- carrier := NewMessageCarrier(msg)
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(datastreams.ExtractFromBase64Carrier(context.Background(), carrier), options.CheckpointParams{PayloadSize: getMsgSize(msg)}, edges...)
- if !ok {
- return
- }
- datastreams.InjectToBase64Carrier(ctx, carrier)
-}
-
-func setProduceCheckpoint(dataStreamsEnabled bool, version int, msg *kafka.Message) {
- if !dataStreamsEnabled || msg == nil {
- return
- }
- edges := []string{"direction:out", "topic:" + *msg.TopicPartition.Topic, "type:kafka"}
- carrier := NewMessageCarrier(msg)
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(datastreams.ExtractFromBase64Carrier(context.Background(), carrier), options.CheckpointParams{PayloadSize: getMsgSize(msg)}, edges...)
- if !ok || version < 0x000b0400 {
- // headers not supported before librdkafka >=0.11.4
- return
- }
- datastreams.InjectToBase64Carrier(ctx, carrier)
-}
-
-func getMsgSize(msg *kafka.Message) (size int64) {
- for _, header := range msg.Headers {
- size += int64(len(header.Key) + len(header.Value))
- }
- return size + int64(len(msg.Value)+len(msg.Key))
-}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/kafka_test.go b/contrib/confluentinc/confluent-kafka-go/kafka/kafka_test.go
index 5fe0659997..a9a5df9662 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka/kafka_test.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka/kafka_test.go
@@ -9,7 +9,6 @@ import (
"context"
"errors"
"os"
- "strconv"
"strings"
"testing"
"time"
@@ -21,6 +20,7 @@ import (
"github.com/confluentinc/confluent-kafka-go/kafka"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "go.uber.org/goleak"
)
var (
@@ -28,82 +28,6 @@ var (
testTopic = "gotest"
)
-type consumerActionFn func(c *Consumer) (*kafka.Message, error)
-
-func produceThenConsume(t *testing.T, consumerAction consumerActionFn, producerOpts []Option, consumerOpts []Option) ([]*mocktracer.Span, *kafka.Message) {
- if _, ok := os.LookupEnv("INTEGRATION"); !ok {
- t.Skip("to enable integration test, set the INTEGRATION environment variable")
- }
- mt := mocktracer.Start()
- defer mt.Stop()
-
- // first write a message to the topic
- p, err := NewProducer(&kafka.ConfigMap{
- "bootstrap.servers": "127.0.0.1:9092",
- "go.delivery.reports": true,
- }, producerOpts...)
- require.NoError(t, err)
-
- delivery := make(chan kafka.Event, 1)
- err = p.Produce(&kafka.Message{
- TopicPartition: kafka.TopicPartition{
- Topic: &testTopic,
- Partition: 0,
- },
- Key: []byte("key2"),
- Value: []byte("value2"),
- }, delivery)
- require.NoError(t, err)
-
- msg1, _ := (<-delivery).(*kafka.Message)
- p.Close()
-
- // next attempt to consume the message
- c, err := NewConsumer(&kafka.ConfigMap{
- "group.id": testGroupID,
- "bootstrap.servers": "127.0.0.1:9092",
- "fetch.wait.max.ms": 500,
- "socket.timeout.ms": 1500,
- "session.timeout.ms": 1500,
- "enable.auto.offset.store": false,
- }, consumerOpts...)
- require.NoError(t, err)
-
- err = c.Assign([]kafka.TopicPartition{
- {Topic: &testTopic, Partition: 0, Offset: msg1.TopicPartition.Offset},
- })
- require.NoError(t, err)
-
- msg2, err := consumerAction(c)
- require.NoError(t, err)
- _, err = c.CommitMessage(msg2)
- require.NoError(t, err)
- assert.Equal(t, msg1.String(), msg2.String())
- err = c.Close()
- require.NoError(t, err)
-
- spans := mt.FinishedSpans()
- require.Len(t, spans, 2)
- // they should be linked via headers
- assert.Equal(t, spans[0].TraceID(), spans[1].TraceID())
-
- if c.cfg.dataStreamsEnabled {
- backlogs := mt.SentDSMBacklogs()
- toMap := func(b []mocktracer.DSMBacklog) map[string]struct{} {
- m := make(map[string]struct{})
- for _, b := range backlogs {
- m[strings.Join(b.Tags, "")] = struct{}{}
- }
- return m
- }
- backlogsMap := toMap(backlogs)
- require.Contains(t, backlogsMap, "consumer_group:"+testGroupID+"partition:0"+"topic:"+testTopic+"type:kafka_commit")
- require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_high_watermark")
- require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_produce")
- }
- return spans, msg2
-}
-
func TestConsumerChannel(t *testing.T) {
// we can test consuming via the Events channel by artifically sending
// messages. Testing .Poll is done via an integration test.
@@ -162,7 +86,7 @@ func TestConsumerChannel(t *testing.T) {
assert.Equal(t, "queue", s.Tag(ext.SpanType))
assert.Equal(t, float64(1), s.Tag(ext.MessagingKafkaPartition))
assert.Equal(t, 0.3, s.Tag(ext.EventSampleRate))
- assert.Equal(t, strconv.Itoa(i+1), s.Tag("offset"))
+ assert.EqualValues(t, kafka.Offset(i+1), s.Tag("offset"))
assert.Equal(t, "confluentinc/confluent-kafka-go/kafka", s.Tag(ext.Component))
assert.Equal(t, ext.SpanKindConsumer, s.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s.Tag(ext.MessagingSystem))
@@ -177,30 +101,6 @@ func TestConsumerChannel(t *testing.T) {
}
}
-/*
-to run the integration test locally:
-
- docker network create confluent
-
- docker run --rm \
- --name zookeeper \
- --network confluent \
- -p 2181:2181 \
- -e ZOOKEEPER_CLIENT_PORT=2181 \
- confluentinc/cp-zookeeper:5.0.0
-
- docker run --rm \
- --name kafka \
- --network confluent \
- -p 9092:9092 \
- -e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
- -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 \
- -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
- -e KAFKA_CREATE_TOPICS=gotest:1:1 \
- -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 \
- confluentinc/cp-kafka:5.0.0
-*/
-
func TestConsumerFunctional(t *testing.T) {
for _, tt := range []struct {
name string
@@ -264,6 +164,75 @@ func TestConsumerFunctional(t *testing.T) {
}
}
+// This tests the deprecated behavior of using cfg.context as the context passed via kafka messages
+// instead of the one passed in the message.
+func TestDeprecatedContext(t *testing.T) {
+ if _, ok := os.LookupEnv("INTEGRATION"); !ok {
+ t.Skip("to enable integration test, set the INTEGRATION environment variable")
+ }
+
+ tracer.Start()
+ defer tracer.Stop()
+
+ // Create the span to be passed
+ parentSpan, ctx := tracer.StartSpanFromContext(context.Background(), "test_parent_context")
+
+ c, err := NewConsumer(&kafka.ConfigMap{
+ "go.events.channel.enable": true, // required for the events channel to be turned on
+ "group.id": testGroupID,
+ "socket.timeout.ms": 10,
+ "session.timeout.ms": 10,
+ "enable.auto.offset.store": false,
+ }, WithContext(ctx)) // Adds the parent context containing a span
+ assert.NoError(t, err)
+
+ err = c.Subscribe(testTopic, nil)
+ assert.NoError(t, err)
+
+ // This span context will be ignored
+ messageSpan, _ := tracer.StartSpanFromContext(context.Background(), "test_context_from_message")
+ messageSpanContext := messageSpan.Context()
+
+ /// Produce a message with a span
+ go func() {
+ msg := &kafka.Message{
+ TopicPartition: kafka.TopicPartition{
+ Topic: &testTopic,
+ Partition: 1,
+ Offset: 1,
+ },
+ Key: []byte("key1"),
+ Value: []byte("value1"),
+ }
+
+ // Inject the span context in the message to be produced
+ carrier := NewMessageCarrier(msg)
+ tracer.Inject(messageSpan.Context(), carrier)
+
+ c.Consumer.Events() <- msg
+
+ }()
+
+ msg := (<-c.Events()).(*kafka.Message)
+
+ // Extract the context from the message
+ carrier := NewMessageCarrier(msg)
+ spanContext, err := tracer.Extract(carrier)
+ assert.NoError(t, err)
+
+ parentContext := parentSpan.Context()
+
+ /// The context passed is the one from the parent context
+ assert.EqualValues(t, parentContext.TraceID(), spanContext.TraceID())
+ /// The context passed is not the one passed in the message
+ assert.NotEqualValues(t, messageSpanContext.TraceID(), spanContext.TraceID())
+
+ c.Close()
+ // wait for the events channel to be closed
+ <-c.Events()
+
+}
+
func TestCustomTags(t *testing.T) {
mt := mocktracer.Start()
defer mt.Stop()
@@ -274,10 +243,10 @@ func TestCustomTags(t *testing.T) {
"socket.timeout.ms": 10,
"session.timeout.ms": 10,
"enable.auto.offset.store": false,
- }, WithCustomTag("foo", func(msg *kafka.Message) interface{} {
+ }, WithCustomTag("foo", func(_ *kafka.Message) interface{} {
return "bar"
}), WithCustomTag("key", func(msg *kafka.Message) interface{} {
- return string(msg.Key)
+ return msg.Key
}))
assert.NoError(t, err)
@@ -309,3 +278,148 @@ func TestCustomTags(t *testing.T) {
assert.Equal(t, "bar", s.Tag("foo"))
assert.Equal(t, "key1", s.Tag("key"))
}
+
+// Test we don't leak goroutines and properly close the span when Produce returns an error
+func TestProduceError(t *testing.T) {
+ defer func() {
+ err := goleak.Find()
+ if err != nil {
+ // if a goroutine is leaking, ensure it is not coming from this package
+ if strings.Contains(err.Error(), "contrib/confluentinc/confluent-kafka-go") {
+ assert.NoError(t, err, "found leaked goroutine(s) from this package")
+ }
+ }
+ }()
+
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // first write a message to the topic
+ p, err := NewProducer(&kafka.ConfigMap{
+ "bootstrap.servers": "127.0.0.1:9092",
+ "go.delivery.reports": true,
+ })
+ require.NoError(t, err)
+ defer p.Close()
+
+ topic := ""
+ msg := &kafka.Message{
+ TopicPartition: kafka.TopicPartition{
+ Topic: &topic,
+ },
+ }
+ deliveryChan := make(chan kafka.Event, 1)
+ err = p.Produce(msg, deliveryChan)
+ require.Error(t, err)
+ require.EqualError(t, err, "Local: Invalid argument or configuration")
+
+ select {
+ case <-deliveryChan:
+ assert.Fail(t, "there should be no events in the deliveryChan")
+ case <-time.After(1 * time.Second):
+ // assume there is no event
+ }
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 1)
+}
+
+type consumerActionFn func(c *Consumer) (*kafka.Message, error)
+
+func produceThenConsume(t *testing.T, consumerAction consumerActionFn, producerOpts []Option, consumerOpts []Option) ([]*mocktracer.Span, *kafka.Message) {
+ if _, ok := os.LookupEnv("INTEGRATION"); !ok {
+ t.Skip("to enable integration test, set the INTEGRATION environment variable")
+ }
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // first write a message to the topic
+ p, err := NewProducer(&kafka.ConfigMap{
+ "bootstrap.servers": "127.0.0.1:9092",
+ "go.delivery.reports": true,
+ }, producerOpts...)
+ require.NoError(t, err)
+
+ delivery := make(chan kafka.Event, 1)
+ err = p.Produce(&kafka.Message{
+ TopicPartition: kafka.TopicPartition{
+ Topic: &testTopic,
+ Partition: 0,
+ },
+ Key: []byte("key2"),
+ Value: []byte("value2"),
+ }, delivery)
+ require.NoError(t, err)
+
+ msg1, _ := (<-delivery).(*kafka.Message)
+ p.Close()
+
+ // next attempt to consume the message
+ c, err := NewConsumer(&kafka.ConfigMap{
+ "group.id": testGroupID,
+ "bootstrap.servers": "127.0.0.1:9092",
+ "fetch.wait.max.ms": 500,
+ "socket.timeout.ms": 1500,
+ "session.timeout.ms": 1500,
+ "enable.auto.offset.store": false,
+ }, consumerOpts...)
+ require.NoError(t, err)
+
+ err = c.Assign([]kafka.TopicPartition{
+ {Topic: &testTopic, Partition: 0, Offset: msg1.TopicPartition.Offset},
+ })
+ require.NoError(t, err)
+
+ msg2, err := consumerAction(c)
+ require.NoError(t, err)
+ _, err = c.CommitMessage(msg2)
+ require.NoError(t, err)
+ assert.Equal(t, msg1.String(), msg2.String())
+ err = c.Close()
+ require.NoError(t, err)
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 2)
+ // they should be linked via headers
+ assert.Equal(t, spans[0].TraceID(), spans[1].TraceID())
+
+ if c.tracer.DSMEnabled() {
+ backlogs := mt.SentDSMBacklogs()
+ toMap := func(_ []mocktracer.DSMBacklog) map[string]struct{} {
+ m := make(map[string]struct{})
+ for _, b := range backlogs {
+ m[strings.Join(b.Tags, "")] = struct{}{}
+ }
+ return m
+ }
+ backlogsMap := toMap(backlogs)
+ require.Contains(t, backlogsMap, "consumer_group:"+testGroupID+"partition:0"+"topic:"+testTopic+"type:kafka_commit")
+ require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_high_watermark")
+ require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_produce")
+ }
+ return spans, msg2
+}
+
+/*
+to run the integration test locally:
+
+ docker network create confluent
+
+ docker run --rm \
+ --name zookeeper \
+ --network confluent \
+ -p 2181:2181 \
+ -e ZOOKEEPER_CLIENT_PORT=2181 \
+ confluentinc/cp-zookeeper:5.0.0
+
+ docker run --rm \
+ --name kafka \
+ --network confluent \
+ -p 9092:9092 \
+ -e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
+ -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 \
+ -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
+ -e KAFKA_CREATE_TOPICS=gotest:1:1 \
+ -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 \
+ confluentinc/cp-kafka:5.0.0
+*/
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/option.go b/contrib/confluentinc/confluent-kafka-go/kafka/option.go
index 38039c4e3f..08ed0dbd01 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka/option.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka/option.go
@@ -6,125 +6,47 @@
package kafka
import (
- "context"
- "math"
- "net"
- "strings"
-
"github.com/confluentinc/confluent-kafka-go/kafka"
- "github.com/DataDog/dd-trace-go/v2/instrumentation"
+ tracing "github.com/DataDog/dd-trace-go/v2/contrib/confluentinc/confluent-kafka-go"
)
-type config struct {
- ctx context.Context
- consumerServiceName string
- producerServiceName string
- consumerSpanName string
- producerSpanName string
- analyticsRate float64
- bootstrapServers string
- groupID string
- tagFns map[string]func(msg *kafka.Message) interface{}
- dataStreamsEnabled bool
-}
-
-// Option describes options for the Kafka integration.
-type Option interface {
- apply(*config)
-}
+// An Option customizes the config.
+type Option = tracing.Option
-// OptionFn represents options applicable to NewConsumer, NewProducer, WrapConsumer and WrapProducer.
-type OptionFn func(*config)
+type OptionFn = tracing.OptionFn
-func (fn OptionFn) apply(cfg *config) {
- fn(cfg)
-}
-
-func newConfig(opts ...Option) *config {
- cfg := &config{
- ctx: context.Background(),
- analyticsRate: instr.AnalyticsRate(false),
- }
- cfg.dataStreamsEnabled = instr.DataStreamsEnabled()
-
- cfg.consumerServiceName = instr.ServiceName(instrumentation.ComponentConsumer, nil)
- cfg.producerServiceName = instr.ServiceName(instrumentation.ComponentProducer, nil)
- cfg.consumerSpanName = instr.OperationName(instrumentation.ComponentConsumer, nil)
- cfg.producerSpanName = instr.OperationName(instrumentation.ComponentProducer, nil)
-
- for _, opt := range opts {
- if opt == nil {
- continue
- }
- opt.apply(cfg)
- }
- return cfg
-}
+// WithContext sets the config context to ctx.
+// Deprecated: This is deprecated in favor of passing the context
+// via the message headers
+var WithContext = tracing.WithContext
// WithService sets the config service name to serviceName.
-func WithService(serviceName string) OptionFn {
- return func(cfg *config) {
- cfg.consumerServiceName = serviceName
- cfg.producerServiceName = serviceName
- }
-}
+var WithService = tracing.WithService
// WithAnalytics enables Trace Analytics for all started spans.
-func WithAnalytics(on bool) OptionFn {
- return func(cfg *config) {
- if on {
- cfg.analyticsRate = 1.0
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalytics = tracing.WithAnalytics
// WithAnalyticsRate sets the sampling rate for Trace Analytics events
// correlated to started spans.
-func WithAnalyticsRate(rate float64) OptionFn {
- return func(cfg *config) {
- if rate >= 0.0 && rate <= 1.0 {
- cfg.analyticsRate = rate
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalyticsRate = tracing.WithAnalyticsRate
// WithCustomTag will cause the given tagFn to be evaluated after executing
// a query and attach the result to the span tagged by the key.
-func WithCustomTag(tag string, tagFn func(msg *kafka.Message) interface{}) OptionFn {
- return func(cfg *config) {
- if cfg.tagFns == nil {
- cfg.tagFns = make(map[string]func(msg *kafka.Message) interface{})
+func WithCustomTag(tag string, tagFn func(msg *kafka.Message) interface{}) tracing.OptionFn {
+ wrapped := func(msg tracing.Message) interface{} {
+ if m, ok := msg.Unwrap().(*kafka.Message); ok {
+ return tagFn(m)
}
- cfg.tagFns[tag] = tagFn
+ return nil
}
+ return tracing.WithCustomTag(tag, wrapped)
}
// WithConfig extracts the config information for the client to be tagged
-func WithConfig(cg *kafka.ConfigMap) OptionFn {
- return func(cfg *config) {
- if groupID, err := cg.Get("group.id", ""); err == nil {
- cfg.groupID = groupID.(string)
- }
- if bs, err := cg.Get("bootstrap.servers", ""); err == nil && bs != "" {
- for _, addr := range strings.Split(bs.(string), ",") {
- host, _, err := net.SplitHostPort(addr)
- if err == nil {
- cfg.bootstrapServers = host
- return
- }
- }
- }
- }
+func WithConfig(cm *kafka.ConfigMap) tracing.OptionFn {
+ return tracing.WithConfig(wrapConfigMap(cm))
}
// WithDataStreams enables the Data Streams monitoring product features: https://www.datadoghq.com/product/data-streams-monitoring/
-func WithDataStreams() OptionFn {
- return func(cfg *config) {
- cfg.dataStreamsEnabled = true
- }
-}
+var WithDataStreams = tracing.WithDataStreams
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/tracing.go b/contrib/confluentinc/confluent-kafka-go/kafka/tracing.go
new file mode 100644
index 0000000000..9b0eec52a6
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/kafka/tracing.go
@@ -0,0 +1,163 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package kafka
+
+import (
+ "github.com/confluentinc/confluent-kafka-go/kafka"
+
+ tracing "github.com/DataDog/dd-trace-go/v2/contrib/confluentinc/confluent-kafka-go"
+)
+
+type wMessage struct {
+ *kafka.Message
+}
+
+func wrapMessage(msg *kafka.Message) tracing.Message {
+ if msg == nil {
+ return nil
+ }
+ return &wMessage{msg}
+}
+
+func (w *wMessage) Unwrap() any {
+ return w.Message
+}
+
+func (w *wMessage) GetValue() []byte {
+ return w.Message.Value
+}
+
+func (w *wMessage) GetKey() []byte {
+ return w.Message.Key
+}
+
+func (w *wMessage) GetHeaders() []tracing.Header {
+ hs := make([]tracing.Header, 0, len(w.Headers))
+ for _, h := range w.Headers {
+ hs = append(hs, wrapHeader(h))
+ }
+ return hs
+}
+
+func (w *wMessage) SetHeaders(headers []tracing.Header) {
+ hs := make([]kafka.Header, 0, len(headers))
+ for _, h := range headers {
+ hs = append(hs, kafka.Header{
+ Key: h.GetKey(),
+ Value: h.GetValue(),
+ })
+ }
+ w.Message.Headers = hs
+}
+
+func (w *wMessage) GetTopicPartition() tracing.TopicPartition {
+ return wrapTopicPartition(w.Message.TopicPartition)
+}
+
+type wHeader struct {
+ kafka.Header
+}
+
+func wrapHeader(h kafka.Header) tracing.Header {
+ return &wHeader{h}
+}
+
+func (w wHeader) GetKey() string {
+ return w.Header.Key
+}
+
+func (w wHeader) GetValue() []byte {
+ return w.Header.Value
+}
+
+type wTopicPartition struct {
+ kafka.TopicPartition
+}
+
+func wrapTopicPartition(tp kafka.TopicPartition) tracing.TopicPartition {
+ return wTopicPartition{tp}
+}
+
+func wrapTopicPartitions(tps []kafka.TopicPartition) []tracing.TopicPartition {
+ wtps := make([]tracing.TopicPartition, 0, len(tps))
+ for _, tp := range tps {
+ wtps = append(wtps, wTopicPartition{tp})
+ }
+ return wtps
+}
+
+func (w wTopicPartition) GetTopic() string {
+ if w.Topic == nil {
+ return ""
+ }
+ return *w.Topic
+}
+
+func (w wTopicPartition) GetPartition() int32 {
+ return w.Partition
+}
+
+func (w wTopicPartition) GetOffset() int64 {
+ return int64(w.Offset)
+}
+
+func (w wTopicPartition) GetError() error {
+ return w.Error
+}
+
+type wEvent struct {
+ kafka.Event
+}
+
+func wrapEvent(event kafka.Event) tracing.Event {
+ return wEvent{event}
+}
+
+func (w wEvent) KafkaMessage() (tracing.Message, bool) {
+ if m, ok := w.Event.(*kafka.Message); ok {
+ return wrapMessage(m), true
+ }
+ return nil, false
+}
+
+func (w wEvent) KafkaOffsetsCommitted() (tracing.OffsetsCommitted, bool) {
+ if oc, ok := w.Event.(kafka.OffsetsCommitted); ok {
+ return wrapOffsetsCommitted(oc), true
+ }
+ return nil, false
+}
+
+type wOffsetsCommitted struct {
+ kafka.OffsetsCommitted
+}
+
+func wrapOffsetsCommitted(oc kafka.OffsetsCommitted) tracing.OffsetsCommitted {
+ return wOffsetsCommitted{oc}
+}
+
+func (w wOffsetsCommitted) GetError() error {
+ return w.Error
+}
+
+func (w wOffsetsCommitted) GetOffsets() []tracing.TopicPartition {
+ ttps := make([]tracing.TopicPartition, 0, len(w.Offsets))
+ for _, tp := range w.Offsets {
+ ttps = append(ttps, wrapTopicPartition(tp))
+ }
+ return ttps
+}
+
+type wConfigMap struct {
+ cfg *kafka.ConfigMap
+}
+
+func wrapConfigMap(cm *kafka.ConfigMap) tracing.ConfigMap {
+ return &wConfigMap{cm}
+}
+
+func (w *wConfigMap) Get(key string, defVal any) (any, error) {
+ return w.cfg.Get(key, defVal)
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka_tracer.go b/contrib/confluentinc/confluent-kafka-go/kafka_tracer.go
new file mode 100644
index 0000000000..76d3d3d0ca
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/kafka_tracer.go
@@ -0,0 +1,156 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import (
+ "context"
+ "math"
+ "net"
+ "strings"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+ "github.com/DataDog/dd-trace-go/v2/instrumentation"
+ "github.com/DataDog/dd-trace-go/v2/internal"
+)
+
+type KafkaTracer struct {
+ PrevSpan *tracer.Span
+ ctx context.Context
+ consumerServiceName string
+ producerServiceName string
+ consumerSpanName string
+ producerSpanName string
+ analyticsRate float64
+ bootstrapServers string
+ groupID string
+ tagFns map[string]func(msg Message) interface{}
+ dsmEnabled bool
+ ckgoVersion CKGoVersion
+ librdKafkaVersion int
+}
+
+var instr *instrumentation.Instrumentation
+
+func (tr *KafkaTracer) DSMEnabled() bool {
+ return tr.dsmEnabled
+}
+
+type Option interface {
+ apply(*KafkaTracer)
+}
+
+// OptionFn represents options applicable to NewConsumer, NewProducer, WrapConsumer and WrapProducer.
+type OptionFn func(*KafkaTracer)
+
+func (fn OptionFn) apply(cfg *KafkaTracer) {
+ fn(cfg)
+}
+
+func NewKafkaTracer(ckgoVersion CKGoVersion, librdKafkaVersion int, opts ...Option) *KafkaTracer {
+ if instr == nil {
+ instr = Package(ckgoVersion)
+ }
+ tr := &KafkaTracer{
+ ctx: context.Background(),
+ analyticsRate: instr.AnalyticsRate(false),
+ ckgoVersion: ckgoVersion,
+ librdKafkaVersion: librdKafkaVersion,
+ }
+ if internal.BoolEnv("DD_TRACE_KAFKA_ANALYTICS_ENABLED", false) {
+ tr.analyticsRate = 1.0
+ }
+
+ tr.dsmEnabled = instr.DataStreamsEnabled()
+
+ tr.consumerServiceName = instr.ServiceName(instrumentation.ComponentConsumer, nil)
+ tr.producerServiceName = instr.ServiceName(instrumentation.ComponentProducer, nil)
+ tr.consumerSpanName = instr.OperationName(instrumentation.ComponentConsumer, nil)
+ tr.producerSpanName = instr.OperationName(instrumentation.ComponentProducer, nil)
+
+ for _, opt := range opts {
+ if opt == nil {
+ continue
+ }
+ opt.apply(tr)
+ }
+ return tr
+}
+
+// WithContext sets the config context to ctx.
+// Deprecated: This is deprecated in favor of passing the context
+// via the message headers
+func WithContext(ctx context.Context) OptionFn {
+ return func(tr *KafkaTracer) {
+ tr.ctx = ctx
+ }
+}
+
+// WithService sets the config service name to serviceName.
+func WithService(serviceName string) OptionFn {
+ return func(cfg *KafkaTracer) {
+ cfg.consumerServiceName = serviceName
+ cfg.producerServiceName = serviceName
+ }
+}
+
+// WithAnalytics enables Trace Analytics for all started spans.
+func WithAnalytics(on bool) OptionFn {
+ return func(cfg *KafkaTracer) {
+ if on {
+ cfg.analyticsRate = 1.0
+ } else {
+ cfg.analyticsRate = math.NaN()
+ }
+ }
+}
+
+// WithAnalyticsRate sets the sampling rate for Trace Analytics events
+// correlated to started spans.
+func WithAnalyticsRate(rate float64) OptionFn {
+ return func(cfg *KafkaTracer) {
+ if rate >= 0.0 && rate <= 1.0 {
+ cfg.analyticsRate = rate
+ } else {
+ cfg.analyticsRate = math.NaN()
+ }
+ }
+}
+
+// WithCustomTag will cause the given tagFn to be evaluated after executing
+// a query and attach the result to the span tagged by the key.
+func WithCustomTag(tag string, tagFn func(msg Message) interface{}) OptionFn {
+ return func(cfg *KafkaTracer) {
+ if cfg.tagFns == nil {
+ cfg.tagFns = make(map[string]func(msg Message) interface{})
+ }
+ cfg.tagFns[tag] = tagFn
+ }
+}
+
+// WithConfig extracts the config information for the client to be tagged
+func WithConfig(cg ConfigMap) OptionFn {
+ return func(tr *KafkaTracer) {
+ if groupID, err := cg.Get("group.id", ""); err == nil {
+ tr.groupID = groupID.(string)
+ }
+ if bs, err := cg.Get("bootstrap.servers", ""); err == nil && bs != "" {
+ for _, addr := range strings.Split(bs.(string), ",") {
+ host, _, err := net.SplitHostPort(addr)
+ if err == nil {
+ tr.bootstrapServers = host
+ return
+ }
+ }
+ }
+ }
+}
+
+// WithDataStreams enables the Data Streams monitoring product features: https://www.datadoghq.com/product/data-streams-monitoring/
+func WithDataStreams() OptionFn {
+ return func(tr *KafkaTracer) {
+ tr.dsmEnabled = true
+ }
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/option_test.go b/contrib/confluentinc/confluent-kafka-go/kafka_tracer_test.go
similarity index 62%
rename from contrib/confluentinc/confluent-kafka-go/kafka.v2/option_test.go
rename to contrib/confluentinc/confluent-kafka-go/kafka_tracer_test.go
index 7c4f40450f..bc82a465e2 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka.v2/option_test.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka_tracer_test.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016 Datadog, Inc.
-package kafka
+package tracing
import (
"math"
@@ -16,48 +16,48 @@ import (
func TestDataStreamsActivation(t *testing.T) {
t.Run("default", func(t *testing.T) {
- cfg := newConfig()
- assert.False(t, cfg.dataStreamsEnabled)
+ tr := NewKafkaTracer(0, 0)
+ assert.False(t, tr.DSMEnabled())
})
t.Run("withOption", func(t *testing.T) {
- cfg := newConfig(WithDataStreams())
- assert.True(t, cfg.dataStreamsEnabled)
+ tr := NewKafkaTracer(0, 0, WithDataStreams())
+ assert.True(t, tr.DSMEnabled())
})
t.Run("withEnv", func(t *testing.T) {
t.Setenv("DD_DATA_STREAMS_ENABLED", "true")
- cfg := newConfig()
- assert.True(t, cfg.dataStreamsEnabled)
+ tr := NewKafkaTracer(0, 0)
+ assert.True(t, tr.DSMEnabled())
})
t.Run("optionOverridesEnv", func(t *testing.T) {
t.Setenv("DD_DATA_STREAMS_ENABLED", "false")
- cfg := newConfig(WithDataStreams())
- assert.True(t, cfg.dataStreamsEnabled)
+ tr := NewKafkaTracer(0, 0, WithDataStreams())
+ assert.True(t, tr.DSMEnabled())
})
}
func TestAnalyticsSettings(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
- cfg := newConfig()
- assert.True(t, math.IsNaN(cfg.analyticsRate))
+ tr := NewKafkaTracer(0, 0)
+ assert.True(t, math.IsNaN(tr.analyticsRate))
})
t.Run("global", func(t *testing.T) {
t.Skip("global flag disabled")
testutils.SetGlobalAnalyticsRate(t, 0.4)
- cfg := newConfig()
- assert.Equal(t, 0.4, cfg.analyticsRate)
+ tr := NewKafkaTracer(0, 0)
+ assert.Equal(t, 0.4, tr.analyticsRate)
})
t.Run("enabled", func(t *testing.T) {
- cfg := newConfig(WithAnalytics(true))
- assert.Equal(t, 1.0, cfg.analyticsRate)
+ tr := NewKafkaTracer(0, 0, WithAnalytics(true))
+ assert.Equal(t, 1.0, tr.analyticsRate)
})
t.Run("override", func(t *testing.T) {
testutils.SetGlobalAnalyticsRate(t, 0.4)
- cfg := newConfig(WithAnalyticsRate(0.2))
- assert.Equal(t, 0.2, cfg.analyticsRate)
+ tr := NewKafkaTracer(0, 0, WithAnalyticsRate(0.2))
+ assert.Equal(t, 0.2, tr.analyticsRate)
})
}
diff --git a/contrib/confluentinc/confluent-kafka-go/message_carrier.go b/contrib/confluentinc/confluent-kafka-go/message_carrier.go
new file mode 100644
index 0000000000..3a78ccc4d9
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/message_carrier.go
@@ -0,0 +1,50 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+
+// A MessageCarrier implements TextMapReader/TextMapWriter for extracting/injecting traces on a kafka.msg
+type MessageCarrier struct {
+ msg Message
+}
+
+var _ interface {
+ tracer.TextMapReader
+ tracer.TextMapWriter
+} = (*MessageCarrier)(nil)
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c MessageCarrier) ForeachKey(handler func(key, val string) error) error {
+ for _, h := range c.msg.GetHeaders() {
+ err := handler(h.GetKey(), string(h.GetValue()))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Set implements TextMapWriter
+func (c MessageCarrier) Set(key, val string) {
+ headers := c.msg.GetHeaders()
+ // ensure uniqueness of keys
+ for i := 0; i < len(headers); i++ {
+ if headers[i].GetKey() == key {
+ headers = append(headers[:i], headers[i+1:]...)
+ i--
+ }
+ }
+ headers = append(headers, KafkaHeader{
+ Key: key,
+ Value: []byte(val),
+ })
+ c.msg.SetHeaders(headers)
+}
+
+func NewMessageCarrier(msg Message) MessageCarrier {
+ return MessageCarrier{msg: msg}
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/producer.go b/contrib/confluentinc/confluent-kafka-go/producer.go
new file mode 100644
index 0000000000..48ed21c7a0
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/producer.go
@@ -0,0 +1,102 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import (
+ "math"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+)
+
+func WrapProduceChannel[M any, TM Message](tr *KafkaTracer, out chan M, translateFn func(M) TM) chan M {
+ if out == nil {
+ return out
+ }
+ in := make(chan M, 1)
+ go func() {
+ for msg := range in {
+ tMsg := translateFn(msg)
+ span := tr.StartProduceSpan(tMsg)
+ tr.SetProduceCheckpoint(tMsg)
+ out <- msg
+ span.Finish()
+ }
+ }()
+ return in
+}
+
+func WrapProduceEventsChannel[E any, TE Event](tr *KafkaTracer, in chan E, translateFn func(E) TE) chan E {
+ if in == nil {
+ return nil
+ }
+ out := make(chan E, 1)
+ go func() {
+ defer close(out)
+ for evt := range in {
+ tEvt := translateFn(evt)
+ if msg, ok := tEvt.KafkaMessage(); ok {
+ tr.TrackProduceOffsets(msg)
+ }
+ out <- evt
+ }
+ }()
+ return out
+}
+
+func (tr *KafkaTracer) StartProduceSpan(msg Message) *tracer.Span {
+ opts := []tracer.StartSpanOption{
+ tracer.ServiceName(tr.producerServiceName),
+ tracer.ResourceName("Produce Topic " + msg.GetTopicPartition().GetTopic()),
+ tracer.SpanType(ext.SpanTypeMessageProducer),
+ tracer.Tag(ext.Component, ComponentName(tr.ckgoVersion)),
+ tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
+ tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
+ tracer.Tag(ext.MessagingKafkaPartition, msg.GetTopicPartition().GetPartition()),
+ }
+ if tr.bootstrapServers != "" {
+ opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, tr.bootstrapServers))
+ }
+ if !math.IsNaN(tr.analyticsRate) {
+ opts = append(opts, tracer.Tag(ext.EventSampleRate, tr.analyticsRate))
+ }
+ // if there's a span context in the headers, use that as the parent
+ carrier := NewMessageCarrier(msg)
+ if spanctx, err := tracer.Extract(carrier); err == nil {
+ opts = append(opts, tracer.ChildOf(spanctx))
+ }
+ span, _ := tracer.StartSpanFromContext(tr.ctx, tr.producerSpanName, opts...)
+ // inject the span context so consumers can pick it up
+ tracer.Inject(span.Context(), carrier)
+ return span
+}
+
+func WrapDeliveryChannel[E any, TE Event](tr *KafkaTracer, deliveryChan chan E, span *tracer.Span, translateFn func(E) TE) (chan E, chan error) {
+ // if the user has selected a delivery channel, we will wrap it and
+ // wait for the delivery event to finish the span
+ if deliveryChan == nil {
+ return nil, nil
+ }
+ wrapped := make(chan E)
+ errChan := make(chan error, 1)
+ go func() {
+ var err error
+ select {
+ case evt := <-wrapped:
+ tEvt := translateFn(evt)
+ if msg, ok := tEvt.KafkaMessage(); ok {
+ // delivery errors are returned via TopicPartition.Error
+ err = msg.GetTopicPartition().GetError()
+ tr.TrackProduceOffsets(msg)
+ }
+ deliveryChan <- evt
+ case e := <-errChan:
+ err = e
+ }
+ span.Finish(tracer.WithError(err))
+ }()
+ return wrapped, errChan
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/tracing.go b/contrib/confluentinc/confluent-kafka-go/tracing.go
new file mode 100644
index 0000000000..dae0ac6440
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/tracing.go
@@ -0,0 +1,46 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import "github.com/DataDog/dd-trace-go/v2/instrumentation"
+
+type CKGoVersion int32
+
+const (
+ CKGoVersion1 CKGoVersion = 1
+ CKGoVersion2 CKGoVersion = 2
+)
+
+func ComponentName(v CKGoVersion) string {
+ switch v {
+ case CKGoVersion1:
+ return "confluentinc/confluent-kafka-go/kafka"
+ case CKGoVersion2:
+ return "confluentinc/confluent-kafka-go/kafka.v2"
+ default:
+ return ""
+ }
+}
+
+func IntegrationName(v CKGoVersion) string {
+ switch v {
+ case CKGoVersion1:
+ return "github.com/confluentinc/confluent-kafka-go"
+ case CKGoVersion2:
+ return "github.com/confluentinc/confluent-kafka-go/v2"
+ default:
+ return ""
+ }
+}
+
+func Package(v CKGoVersion) *instrumentation.Instrumentation {
+ switch v {
+ case CKGoVersion2:
+ return instrumentation.Load(instrumentation.PackageConfluentKafkaGoV2)
+ default:
+ return instrumentation.Load(instrumentation.PackageConfluentKafkaGo)
+ }
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/types.go b/contrib/confluentinc/confluent-kafka-go/types.go
new file mode 100644
index 0000000000..35bf44eb30
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/types.go
@@ -0,0 +1,64 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+
+type Message interface {
+ GetValue() []byte
+ GetKey() []byte
+ GetHeaders() []Header
+ SetHeaders([]Header)
+ GetTopicPartition() TopicPartition
+ Unwrap() any
+}
+
+type Header interface {
+ GetKey() string
+ GetValue() []byte
+}
+
+type KafkaHeader struct {
+ Key string
+ Value []byte
+}
+
+func (h KafkaHeader) GetKey() string {
+ return h.Key
+}
+
+func (h KafkaHeader) GetValue() []byte {
+ return h.Value
+}
+
+type OffsetsCommitted interface {
+ GetError() error
+ GetOffsets() []TopicPartition
+}
+
+type TopicPartition interface {
+ GetTopic() string
+ GetPartition() int32
+ GetOffset() int64
+ GetError() error
+}
+
+type Event interface {
+ KafkaMessage() (Message, bool)
+ KafkaOffsetsCommitted() (OffsetsCommitted, bool)
+}
+
+type Consumer interface {
+ GetWatermarkOffsets(topic string, partition int32) (low int64, high int64, err error)
+}
+
+type ConfigMap interface {
+ Get(key string, defval any) (any, error)
+}
+
+type SpanStore struct {
+ Prev *tracer.Span
+}
diff --git a/contrib/database/sql/go.mod b/contrib/database/sql/go.mod
index 7b48b7326c..45d09e43d2 100644
--- a/contrib/database/sql/go.mod
+++ b/contrib/database/sql/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/database/sql/v2
go 1.22.0
-toolchain go1.23.1
-
require (
github.com/DataDog/datadog-go/v5 v5.5.0
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/denisenkom/go-mssqldb v0.11.0
github.com/go-sql-driver/mysql v1.6.0
github.com/lib/pq v1.10.2
diff --git a/contrib/dimfeld/httptreemux.v5/go.mod b/contrib/dimfeld/httptreemux.v5/go.mod
index f8d8fbf0b8..175918bf2c 100644
--- a/contrib/dimfeld/httptreemux.v5/go.mod
+++ b/contrib/dimfeld/httptreemux.v5/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/dimfeld/httptreemux.v5/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/dimfeld/httptreemux/v5 v5.5.0
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/dimfeld/httptreemux.v5/httptreemux.go b/contrib/dimfeld/httptreemux.v5/httptreemux.go
index f207fc85fb..401e929c4a 100644
--- a/contrib/dimfeld/httptreemux.v5/httptreemux.go
+++ b/contrib/dimfeld/httptreemux.v5/httptreemux.go
@@ -145,7 +145,10 @@ func getRoute(router *httptreemux.TreeMux, w http.ResponseWriter, req *http.Requ
// replace parameter at end of the path, i.e. "../:param"
oldP = "/" + v
newP = "/:" + k
- route = strings.Replace(route, oldP, newP, 1)
+ if strings.HasSuffix(route, oldP) {
+ endPos := strings.LastIndex(route, oldP)
+ route = route[:endPos] + newP
+ }
}
return route, true
}
diff --git a/contrib/dimfeld/httptreemux.v5/httptreemux_test.go b/contrib/dimfeld/httptreemux.v5/httptreemux_test.go
index e9e0a2a12c..74b4fdd857 100644
--- a/contrib/dimfeld/httptreemux.v5/httptreemux_test.go
+++ b/contrib/dimfeld/httptreemux.v5/httptreemux_test.go
@@ -649,6 +649,44 @@ func TestTrailingSlashRoutesWithBehaviorUseHandler(t *testing.T) {
})
}
+func TestDuplicateWordsParamsHandler(t *testing.T) {
+ tests := []struct {
+ name string
+ route string
+ url string
+ }{
+ {
+ name: "Test minimal case",
+ route: "/1a/:n",
+ url: "/1a/1",
+ },
+ {
+ name: "Test string with separators",
+ route: "/foo/2by4/bar/:n",
+ url: "/foo/2by4/bar/2",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ router := New()
+ router.GET(tt.route, handler200)
+
+ r := httptest.NewRequest("GET", tt.url, nil)
+ w := httptest.NewRecorder()
+ router.ServeHTTP(w, r)
+
+ spans := mt.FinishedSpans()
+ assert.Equal(1, len(spans))
+ assert.Equal("GET "+tt.route, spans[0].Tag(ext.ResourceName))
+ })
+ }
+}
+
func TestIsSupportedRedirectStatus(t *testing.T) {
tests := []struct {
name string
diff --git a/contrib/elastic/go-elasticsearch.v6/go.mod b/contrib/elastic/go-elasticsearch.v6/go.mod
index 8e43680b55..346ee7c018 100644
--- a/contrib/elastic/go-elasticsearch.v6/go.mod
+++ b/contrib/elastic/go-elasticsearch.v6/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/elastic/go-elasticsearch.v6/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/elastic/go-elasticsearch/v6 v6.8.5
github.com/elastic/go-elasticsearch/v7 v7.17.1
github.com/elastic/go-elasticsearch/v8 v8.4.0
diff --git a/contrib/emicklei/go-restful.v3/go.mod b/contrib/emicklei/go-restful.v3/go.mod
index f09dcbaed6..45dbc993f3 100644
--- a/contrib/emicklei/go-restful.v3/go.mod
+++ b/contrib/emicklei/go-restful.v3/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/emicklei/go-restful.v3/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/emicklei/go-restful/v3 v3.11.0
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/emicklei/go-restful.v3/restful_test.go b/contrib/emicklei/go-restful.v3/restful_test.go
index c6fced0647..083d249957 100644
--- a/contrib/emicklei/go-restful.v3/restful_test.go
+++ b/contrib/emicklei/go-restful.v3/restful_test.go
@@ -182,7 +182,7 @@ func TestError(t *testing.T) {
span := spans[0]
assert.Equal("http.request", span.OperationName())
assert.Equal("500", span.Tag(ext.HTTPCode))
- assert.Equal("500: Internal Server Error", span.Tag(ext.ErrorMsg))
+ assert.Equal(wantErr.Error(), span.Tag(ext.ErrorMsg))
assert.Equal(ext.SpanKindServer, span.Tag(ext.SpanKind))
assert.Equal("emicklei/go-restful.v3", span.Tag(ext.Component))
}
diff --git a/contrib/gin-gonic/gin/appsec.go b/contrib/gin-gonic/gin/appsec.go
index 1129cb23f7..882bdadab4 100644
--- a/contrib/gin-gonic/gin/appsec.go
+++ b/contrib/gin-gonic/gin/appsec.go
@@ -8,14 +8,14 @@ package gin
import (
"net/http"
- "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec"
+ "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace"
"github.com/gin-gonic/gin"
)
// useAppSec executes the AppSec logic related to the operation start
-func useAppSec(c *gin.Context, span *tracer.Span) {
+func useAppSec(c *gin.Context, span trace.TagSetter) {
var params map[string]string
if l := len(c.Params); l > 0 {
params = make(map[string]string, l)
diff --git a/contrib/gin-gonic/gin/go.mod b/contrib/gin-gonic/gin/go.mod
index 1b83f8a1b9..d944a149ac 100644
--- a/contrib/gin-gonic/gin/go.mod
+++ b/contrib/gin-gonic/gin/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/gin-gonic/gin/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/gin-gonic/gin v1.9.1
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/globalsign/mgo/go.mod b/contrib/globalsign/mgo/go.mod
index 31ee17d783..16b636812c 100644
--- a/contrib/globalsign/mgo/go.mod
+++ b/contrib/globalsign/mgo/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/globalsign/mgo/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/go-chi/chi.v5/go.mod b/contrib/go-chi/chi.v5/go.mod
index cadc5b4ed8..46528aca2a 100644
--- a/contrib/go-chi/chi.v5/go.mod
+++ b/contrib/go-chi/chi.v5/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/go-chi/chi.v5/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/go-chi/chi/v5 v5.0.10
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/go-chi/chi/go.mod b/contrib/go-chi/chi/go.mod
index 441ac2271f..d63d249bd5 100644
--- a/contrib/go-chi/chi/go.mod
+++ b/contrib/go-chi/chi/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/go-chi/chi v1.5.4
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/go-pg/pg.v10/go.mod b/contrib/go-pg/pg.v10/go.mod
index 3fe3b56379..2c9414a36b 100644
--- a/contrib/go-pg/pg.v10/go.mod
+++ b/contrib/go-pg/pg.v10/go.mod
@@ -2,8 +2,6 @@ module github.com/DataDog/dd-trace-go/contrib/go-pg/pg.v10/v2
go 1.22.0
-toolchain go1.23.1
-
require (
github.com/go-pg/pg/v10 v10.11.1
github.com/stretchr/testify v1.9.0
@@ -30,7 +28,7 @@ require (
github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 // indirect
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.5 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
diff --git a/contrib/go-redis/redis.v7/go.mod b/contrib/go-redis/redis.v7/go.mod
index 968e5bc5c5..c8403b0b3d 100644
--- a/contrib/go-redis/redis.v7/go.mod
+++ b/contrib/go-redis/redis.v7/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/go-redis/redis.v7/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/go-redis/redis/v7 v7.4.1
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/go-redis/redis.v8/go.mod b/contrib/go-redis/redis.v8/go.mod
index 45218635bd..8fe189f226 100644
--- a/contrib/go-redis/redis.v8/go.mod
+++ b/contrib/go-redis/redis.v8/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/go-redis/redis.v8/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/go-redis/redis/v8 v8.11.5
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/go-redis/redis/go.mod b/contrib/go-redis/redis/go.mod
index 1117cfbdfb..6d3d77f081 100644
--- a/contrib/go-redis/redis/go.mod
+++ b/contrib/go-redis/redis/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/go-redis/redis/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/go-redis/redis v6.15.9+incompatible
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/go.mongodb.org/mongo-driver/go.mod b/contrib/go.mongodb.org/mongo-driver/go.mod
index 0a677eb2ad..35cab55047 100644
--- a/contrib/go.mongodb.org/mongo-driver/go.mod
+++ b/contrib/go.mongodb.org/mongo-driver/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/go.mongodb.org/mongo-driver/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
go.mongodb.org/mongo-driver v1.12.1
)
diff --git a/contrib/gocql/gocql/go.mod b/contrib/gocql/gocql/go.mod
index 5b4971cc73..e54cee4b48 100644
--- a/contrib/gocql/gocql/go.mod
+++ b/contrib/gocql/gocql/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/gocql/gocql/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/gocql/gocql v0.0.0-20220224095938-0eacd3183625
github.com/stretchr/testify v1.9.0
golang.org/x/mod v0.18.0
diff --git a/contrib/gofiber/fiber.v2/go.mod b/contrib/gofiber/fiber.v2/go.mod
index c63d1ebf3b..4747d2b9ec 100644
--- a/contrib/gofiber/fiber.v2/go.mod
+++ b/contrib/gofiber/fiber.v2/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/gofiber/fiber.v2/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/gofiber/fiber/v2 v2.50.0
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/gomodule/redigo/go.mod b/contrib/gomodule/redigo/go.mod
index f0aaf9186c..4ef6d15e3e 100644
--- a/contrib/gomodule/redigo/go.mod
+++ b/contrib/gomodule/redigo/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/gomodule/redigo/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/gomodule/redigo v1.8.9
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/google.golang.org/api/go.mod b/contrib/google.golang.org/api/go.mod
index 34bc3cbee0..c497137c8a 100644
--- a/contrib/google.golang.org/api/go.mod
+++ b/contrib/google.golang.org/api/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/google.golang.org/api/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
golang.org/x/oauth2 v0.18.0
google.golang.org/api v0.169.0
diff --git a/contrib/google.golang.org/api/internal/gen_endpoints/go.mod b/contrib/google.golang.org/api/internal/gen_endpoints/go.mod
index f3d7dd928d..dd68a78e72 100644
--- a/contrib/google.golang.org/api/internal/gen_endpoints/go.mod
+++ b/contrib/google.golang.org/api/internal/gen_endpoints/go.mod
@@ -2,6 +2,4 @@ module github.com/DataDog/dd-trace-go/contrib/google.golang.org/api/internal/gen
go 1.22.0
-toolchain go1.23.1
-
require github.com/yosida95/uritemplate/v3 v3.0.2
diff --git a/contrib/google.golang.org/grpc/go.mod b/contrib/google.golang.org/grpc/go.mod
index 0526107e3b..2bf7e345f5 100644
--- a/contrib/google.golang.org/grpc/go.mod
+++ b/contrib/google.golang.org/grpc/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/google.golang.org/grpc/v2
go 1.22.0
-toolchain go1.23.1
-
require (
github.com/DataDog/dd-trace-go/instrumentation/testutils/grpc/v2 v2.0.0-20240827110213-c6fc4fe2047a
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
github.com/tinylib/msgp v1.2.1
google.golang.org/grpc v1.65.0
diff --git a/contrib/gorilla/mux/go.mod b/contrib/gorilla/mux/go.mod
index 6affe5ca4b..c717f40120 100644
--- a/contrib/gorilla/mux/go.mod
+++ b/contrib/gorilla/mux/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/gorilla/mux v1.8.0
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/gorilla/mux/mux_test.go b/contrib/gorilla/mux/mux_test.go
index 6c4352df22..3e68261f5e 100644
--- a/contrib/gorilla/mux/mux_test.go
+++ b/contrib/gorilla/mux/mux_test.go
@@ -249,7 +249,7 @@ func TestNoDebugStack(t *testing.T) {
assert.Equal(1, len(spans))
s := spans[0]
assert.Equal(s.Tags()[ext.ErrorMsg], "500: Internal Server Error")
- assert.Empty(spans[0].Tags()[ext.ErrorStack])
+ assert.Nil(spans[0].Tags()[ext.ErrorStack])
}
// TestImplementingMethods is a regression tests asserting that all the mux.Router methods
diff --git a/contrib/gorm.io/gorm.v1/go.mod b/contrib/gorm.io/gorm.v1/go.mod
index 70030b74ea..b11e2ab8fa 100644
--- a/contrib/gorm.io/gorm.v1/go.mod
+++ b/contrib/gorm.io/gorm.v1/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/gorm.io/gorm.v1/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/database/sql/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/database/sql/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/go-sql-driver/mysql v1.6.0
github.com/jackc/pgx/v5 v5.4.2
github.com/lib/pq v1.10.2
diff --git a/contrib/graph-gophers/graphql-go/appsec_test.go b/contrib/graph-gophers/graphql-go/appsec_test.go
index 33b64d3424..3d14eb9787 100644
--- a/contrib/graph-gophers/graphql-go/appsec_test.go
+++ b/contrib/graph-gophers/graphql-go/appsec_test.go
@@ -94,7 +94,7 @@ func TestAppSec(t *testing.T) {
// The last finished span (which is GraphQL entry) should have the "_dd.appsec.enabled" tag.
span := spans[len(spans)-1]
- require.Equal(t, 1, span.Tag("_dd.appsec.enabled"))
+ require.Equal(t, float64(1), span.Tag("_dd.appsec.enabled"))
type ddAppsecJSON struct {
Triggers []struct {
Rule struct {
diff --git a/contrib/graph-gophers/graphql-go/go.mod b/contrib/graph-gophers/graphql-go/go.mod
index b0641a4a03..35e5a8e90d 100644
--- a/contrib/graph-gophers/graphql-go/go.mod
+++ b/contrib/graph-gophers/graphql-go/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/graph-gophers/graphql-go/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/graph-gophers/graphql-go v1.5.0
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/graphql-go/graphql/appsec_test.go b/contrib/graphql-go/graphql/appsec_test.go
index f6ea2c4abc..74215f0e3e 100644
--- a/contrib/graphql-go/graphql/appsec_test.go
+++ b/contrib/graphql-go/graphql/appsec_test.go
@@ -140,7 +140,7 @@ func TestAppSec(t *testing.T) {
require.NotEmpty(t, spans)
// The last finished span (which is GraphQL entry) should have the "_dd.appsec.enabled" tag.
span := spans[len(spans)-1]
- require.Equal(t, 1, span.Tag("_dd.appsec.enabled"))
+ require.Equal(t, float64(1), span.Tag("_dd.appsec.enabled"))
type ddAppsecJSON struct {
Triggers []struct {
Rule struct {
diff --git a/contrib/graphql-go/graphql/go.mod b/contrib/graphql-go/graphql/go.mod
index f8e1089e5c..bfee1fd05c 100644
--- a/contrib/graphql-go/graphql/go.mod
+++ b/contrib/graphql-go/graphql/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/graphql-go/graphql/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/graphql-go/graphql v0.8.1
github.com/graphql-go/handler v0.2.3
github.com/hashicorp/go-multierror v1.1.1
diff --git a/contrib/hashicorp/consul/go.mod b/contrib/hashicorp/consul/go.mod
index 652880bc48..769957eb4b 100644
--- a/contrib/hashicorp/consul/go.mod
+++ b/contrib/hashicorp/consul/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/hashicorp/consul/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/hashicorp/consul/api v1.24.0
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/hashicorp/vault/go.mod b/contrib/hashicorp/vault/go.mod
index 6eabb494a7..b72573b910 100644
--- a/contrib/hashicorp/vault/go.mod
+++ b/contrib/hashicorp/vault/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/hashicorp/vault/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/hashicorp/vault/api v1.9.2
github.com/hashicorp/vault/sdk v0.9.2
github.com/stretchr/testify v1.9.0
diff --git a/contrib/internal/telemetrytest/telemetry_test.go b/contrib/internal/telemetrytest/telemetry_test.go
deleted file mode 100644
index 303b8dc43b..0000000000
--- a/contrib/internal/telemetrytest/telemetry_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2023 Datadog, Inc.
-package telemetrytest
-
-import (
- "encoding/json"
- "os"
- "os/exec"
- "strings"
- "testing"
-
- mux "github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2"
- "github.com/DataDog/dd-trace-go/v2/internal/telemetry"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-// TestIntegrationInfo verifies that an integration leveraging instrumentation telemetry
-// sends the correct data to the telemetry client.
-func TestIntegrationInfo(t *testing.T) {
- // mux.NewRouter() uses the net/http and gorilla/mux integration
- mux.NewRouter()
- integrations := telemetry.Integrations()
- require.Len(t, integrations, 2)
- assert.Equal(t, integrations[0].Name, "net/http")
- assert.True(t, integrations[0].Enabled)
- assert.Equal(t, integrations[1].Name, "gorilla/mux")
- assert.True(t, integrations[1].Enabled)
-}
-
-type contribPkg struct {
- ImportPath string
- Name string
- Imports []string
- Dir string
-}
-
-var TelemetryImport = "github.com/DataDog/dd-trace-go/v2/internal/telemetry"
-
-func readPackage(t *testing.T, path string) contribPkg {
- cmd := exec.Command("go", "list", "-json", path)
- cmd.Stderr = os.Stderr
- output, err := cmd.Output()
- require.NoError(t, err)
- p := contribPkg{}
- err = json.Unmarshal(output, &p)
- require.NoError(t, err)
- return p
-}
-
-func (p *contribPkg) hasTelemetryImport(t *testing.T) bool {
- for _, imp := range p.Imports {
- if imp == TelemetryImport {
- return true
- }
- }
- // if we didn't find it imported directly, it might be imported in one of sub-package imports
- for _, imp := range p.Imports {
- if strings.HasPrefix(imp, p.ImportPath) {
- p := readPackage(t, imp)
- if p.hasTelemetryImport(t) {
- return true
- }
- }
- }
- return false
-}
-
-// TestTelemetryEnabled verifies that the expected contrib packages leverage instrumentation telemetry
-func TestTelemetryEnabled(t *testing.T) {
- body, err := exec.Command("go", "list", "-json", "../../...").Output()
- require.NoError(t, err)
-
- var packages []contribPkg
- stream := json.NewDecoder(strings.NewReader(string(body)))
- for stream.More() {
- var out contribPkg
- err := stream.Decode(&out)
- require.NoError(t, err)
- packages = append(packages, out)
- }
- for _, pkg := range packages {
- if strings.Contains(pkg.ImportPath, "/test") || strings.Contains(pkg.ImportPath, "/internal") {
- continue
- }
- if !pkg.hasTelemetryImport(t) {
- t.Fatalf(`package %q is expected use instrumentation telemetry. For more info see https://github.com/DataDog/dd-trace-go/blob/main/contrib/README.md#instrumentation-telemetry`, pkg.ImportPath)
- }
- }
-}
diff --git a/contrib/jackc/pgx.v5/go.mod b/contrib/jackc/pgx.v5/go.mod
index bfedadfb03..119a6ff016 100644
--- a/contrib/jackc/pgx.v5/go.mod
+++ b/contrib/jackc/pgx.v5/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/jackc/pgx.v5/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/jackc/pgx/v5 v5.7.1
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/jackc/pgx.v5/pgx.go b/contrib/jackc/pgx.v5/pgx.go
index fa4c631253..8107fa1e2e 100644
--- a/contrib/jackc/pgx.v5/pgx.go
+++ b/contrib/jackc/pgx.v5/pgx.go
@@ -23,22 +23,33 @@ func init() {
instr = instrumentation.Load(instrumentation.PackageJackcPGXV5)
}
+// Deprecated: this type is unused internally so it will be removed in a future release, please use pgx.Batch instead.
type Batch = pgx.Batch
+// Connect is equivalent to pgx.Connect providing a connection augmented with tracing.
func Connect(ctx context.Context, connString string, opts ...Option) (*pgx.Conn, error) {
connConfig, err := pgx.ParseConfig(connString)
if err != nil {
return nil, err
}
-
return ConnectConfig(ctx, connConfig, opts...)
}
+// ConnectConfig is equivalent to pgx.ConnectConfig providing a connection augmented with tracing.
func ConnectConfig(ctx context.Context, connConfig *pgx.ConnConfig, opts ...Option) (*pgx.Conn, error) {
// The tracer must be set in the config before calling connect
// as pgx takes ownership of the config. QueryTracer traces
// may work, but none of the others will, as they're set in
// unexported fields in the config in the pgx.connect function.
- connConfig.Tracer = newPgxTracer(opts...)
+ connConfig.Tracer = wrapPgxTracer(connConfig.Tracer, opts...)
return pgx.ConnectConfig(ctx, connConfig)
}
+
+// ConnectWithOptions is equivalent to pgx.ConnectWithOptions providing a connection augmented with tracing.
+func ConnectWithOptions(ctx context.Context, connString string, options pgx.ParseConfigOptions, tracerOpts ...Option) (*pgx.Conn, error) {
+ connConfig, err := pgx.ParseConfigWithOptions(connString, options)
+ if err != nil {
+ return nil, err
+ }
+ return ConnectConfig(ctx, connConfig, tracerOpts...)
+}
diff --git a/contrib/jackc/pgx.v5/pgx_tracer.go b/contrib/jackc/pgx.v5/pgx_tracer.go
index 3d1697a73a..5a815a9ddb 100644
--- a/contrib/jackc/pgx.v5/pgx_tracer.go
+++ b/contrib/jackc/pgx.v5/pgx_tracer.go
@@ -44,42 +44,82 @@ func (tb *tracedBatchQuery) finish() {
tb.span.Finish(tracer.WithError(tb.data.Err))
}
+type allPgxTracers interface {
+ pgx.QueryTracer
+ pgx.BatchTracer
+ pgx.ConnectTracer
+ pgx.PrepareTracer
+ pgx.CopyFromTracer
+ pgxpool.AcquireTracer
+}
+
+type wrappedPgxTracer struct {
+ query pgx.QueryTracer
+ batch pgx.BatchTracer
+ connect pgx.ConnectTracer
+ prepare pgx.PrepareTracer
+ copyFrom pgx.CopyFromTracer
+ poolAcquire pgxpool.AcquireTracer
+}
+
type pgxTracer struct {
cfg *config
prevBatchQuery *tracedBatchQuery
+ wrapped wrappedPgxTracer
}
var (
- _ pgx.QueryTracer = (*pgxTracer)(nil)
- _ pgx.BatchTracer = (*pgxTracer)(nil)
- _ pgx.ConnectTracer = (*pgxTracer)(nil)
- _ pgx.PrepareTracer = (*pgxTracer)(nil)
- _ pgx.CopyFromTracer = (*pgxTracer)(nil)
- _ pgxpool.AcquireTracer = (*pgxTracer)(nil)
+ _ allPgxTracers = (*pgxTracer)(nil)
)
-func newPgxTracer(opts ...Option) *pgxTracer {
+func wrapPgxTracer(prev pgx.QueryTracer, opts ...Option) *pgxTracer {
cfg := defaultConfig()
for _, opt := range opts {
opt(cfg)
}
cfg.checkStatsdRequired()
- return &pgxTracer{cfg: cfg}
+ tr := &pgxTracer{cfg: cfg}
+ if prev != nil {
+ tr.wrapped.query = prev
+ if batchTr, ok := prev.(pgx.BatchTracer); ok {
+ tr.wrapped.batch = batchTr
+ }
+ if connTr, ok := prev.(pgx.ConnectTracer); ok {
+ tr.wrapped.connect = connTr
+ }
+ if prepareTr, ok := prev.(pgx.PrepareTracer); ok {
+ tr.wrapped.prepare = prepareTr
+ }
+ if copyFromTr, ok := prev.(pgx.CopyFromTracer); ok {
+ tr.wrapped.copyFrom = copyFromTr
+ }
+ if poolAcquireTr, ok := prev.(pgxpool.AcquireTracer); ok {
+ tr.wrapped.poolAcquire = poolAcquireTr
+ }
+ }
+
+ return tr
}
func (t *pgxTracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context {
if !t.cfg.traceQuery {
return ctx
}
+ if t.wrapped.query != nil {
+ ctx = t.wrapped.query.TraceQueryStart(ctx, conn, data)
+ }
opts := t.spanOptions(conn.Config(), operationTypeQuery, data.SQL)
_, ctx = tracer.StartSpanFromContext(ctx, "pgx.query", opts...)
return ctx
}
-func (t *pgxTracer) TraceQueryEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceQueryEndData) {
+func (t *pgxTracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) {
if !t.cfg.traceQuery {
return
}
+ if t.wrapped.query != nil {
+ t.wrapped.query.TraceQueryEnd(ctx, conn, data)
+ }
span, ok := tracer.SpanFromContext(ctx)
if ok {
span.SetTag(tagRowsAffected, data.CommandTag.RowsAffected())
@@ -91,6 +131,9 @@ func (t *pgxTracer) TraceBatchStart(ctx context.Context, conn *pgx.Conn, data pg
if !t.cfg.traceBatch {
return ctx
}
+ if t.wrapped.batch != nil {
+ ctx = t.wrapped.batch.TraceBatchStart(ctx, conn, data)
+ }
opts := t.spanOptions(conn.Config(), operationTypeBatch, "",
tracer.Tag(tagBatchNumQueries, data.Batch.Len()),
)
@@ -102,6 +145,9 @@ func (t *pgxTracer) TraceBatchQuery(ctx context.Context, conn *pgx.Conn, data pg
if !t.cfg.traceBatch {
return
}
+ if t.wrapped.batch != nil {
+ t.wrapped.batch.TraceBatchQuery(ctx, conn, data)
+ }
// Finish the previous batch query span before starting the next one, since pgx doesn't provide hooks or timestamp
// information about when the actual operation started or finished.
// pgx.Batch* types don't support concurrency. This function doesn't support it either.
@@ -118,10 +164,13 @@ func (t *pgxTracer) TraceBatchQuery(ctx context.Context, conn *pgx.Conn, data pg
}
}
-func (t *pgxTracer) TraceBatchEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceBatchEndData) {
+func (t *pgxTracer) TraceBatchEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchEndData) {
if !t.cfg.traceBatch {
return
}
+ if t.wrapped.batch != nil {
+ t.wrapped.batch.TraceBatchEnd(ctx, conn, data)
+ }
if t.prevBatchQuery != nil {
t.prevBatchQuery.finish()
t.prevBatchQuery = nil
@@ -133,6 +182,9 @@ func (t *pgxTracer) TraceCopyFromStart(ctx context.Context, conn *pgx.Conn, data
if !t.cfg.traceCopyFrom {
return ctx
}
+ if t.wrapped.copyFrom != nil {
+ ctx = t.wrapped.copyFrom.TraceCopyFromStart(ctx, conn, data)
+ }
opts := t.spanOptions(conn.Config(), operationTypeCopyFrom, "",
tracer.Tag(tagCopyFromTables, data.TableName),
tracer.Tag(tagCopyFromColumns, data.ColumnNames),
@@ -141,10 +193,13 @@ func (t *pgxTracer) TraceCopyFromStart(ctx context.Context, conn *pgx.Conn, data
return ctx
}
-func (t *pgxTracer) TraceCopyFromEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceCopyFromEndData) {
+func (t *pgxTracer) TraceCopyFromEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromEndData) {
if !t.cfg.traceCopyFrom {
return
}
+ if t.wrapped.copyFrom != nil {
+ t.wrapped.copyFrom.TraceCopyFromEnd(ctx, conn, data)
+ }
finishSpan(ctx, data.Err)
}
@@ -152,15 +207,21 @@ func (t *pgxTracer) TracePrepareStart(ctx context.Context, conn *pgx.Conn, data
if !t.cfg.tracePrepare {
return ctx
}
+ if t.wrapped.prepare != nil {
+ ctx = t.wrapped.prepare.TracePrepareStart(ctx, conn, data)
+ }
opts := t.spanOptions(conn.Config(), operationTypePrepare, data.SQL)
_, ctx = tracer.StartSpanFromContext(ctx, "pgx.prepare", opts...)
return ctx
}
-func (t *pgxTracer) TracePrepareEnd(ctx context.Context, _ *pgx.Conn, data pgx.TracePrepareEndData) {
+func (t *pgxTracer) TracePrepareEnd(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareEndData) {
if !t.cfg.tracePrepare {
return
}
+ if t.wrapped.prepare != nil {
+ t.wrapped.prepare.TracePrepareEnd(ctx, conn, data)
+ }
finishSpan(ctx, data.Err)
}
@@ -168,6 +229,9 @@ func (t *pgxTracer) TraceConnectStart(ctx context.Context, data pgx.TraceConnect
if !t.cfg.traceConnect {
return ctx
}
+ if t.wrapped.connect != nil {
+ ctx = t.wrapped.connect.TraceConnectStart(ctx, data)
+ }
opts := t.spanOptions(data.ConnConfig, operationTypeConnect, "")
_, ctx = tracer.StartSpanFromContext(ctx, "pgx.connect", opts...)
return ctx
@@ -177,23 +241,31 @@ func (t *pgxTracer) TraceConnectEnd(ctx context.Context, data pgx.TraceConnectEn
if !t.cfg.traceConnect {
return
}
+ if t.wrapped.connect != nil {
+ t.wrapped.connect.TraceConnectEnd(ctx, data)
+ }
finishSpan(ctx, data.Err)
}
-func (t *pgxTracer) TraceAcquireStart(ctx context.Context, pool *pgxpool.Pool, _ pgxpool.TraceAcquireStartData) context.Context {
+func (t *pgxTracer) TraceAcquireStart(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireStartData) context.Context {
if !t.cfg.traceAcquire {
return ctx
}
+ if t.wrapped.poolAcquire != nil {
+ ctx = t.wrapped.poolAcquire.TraceAcquireStart(ctx, pool, data)
+ }
opts := t.spanOptions(pool.Config().ConnConfig, operationTypeAcquire, "")
_, ctx = tracer.StartSpanFromContext(ctx, "pgx.pool.acquire", opts...)
return ctx
}
-func (t *pgxTracer) TraceAcquireEnd(ctx context.Context, _ *pgxpool.Pool, data pgxpool.TraceAcquireEndData) {
+func (t *pgxTracer) TraceAcquireEnd(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireEndData) {
if !t.cfg.traceAcquire {
return
}
-
+ if t.wrapped.poolAcquire != nil {
+ t.wrapped.poolAcquire.TraceAcquireEnd(ctx, pool, data)
+ }
finishSpan(ctx, data.Err)
}
diff --git a/contrib/jackc/pgx.v5/pgx_tracer_test.go b/contrib/jackc/pgx.v5/pgx_tracer_test.go
index 8236c7bbd5..561854e76f 100644
--- a/contrib/jackc/pgx.v5/pgx_tracer_test.go
+++ b/contrib/jackc/pgx.v5/pgx_tracer_test.go
@@ -8,7 +8,6 @@ package pgx
import (
"context"
"fmt"
-
"log"
"os"
"testing"
@@ -20,6 +19,8 @@ import (
"github.com/DataDog/dd-trace-go/v2/instrumentation"
"github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/pgx/v5/pgxpool"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -70,26 +71,56 @@ func TestMain(m *testing.M) {
}
func TestConnect(t *testing.T) {
- mt := mocktracer.Start()
- defer mt.Stop()
-
- opts := append(tracingAllDisabled(), WithTraceConnect(true))
- runAllOperations(t, opts...)
-
- spans := mt.FinishedSpans()
- require.Len(t, spans, 2)
-
- ps := spans[1]
- assert.Equal(t, "parent", ps.OperationName())
- assert.Equal(t, "parent", ps.Tag(ext.ResourceName))
-
- s := spans[0]
- assertCommonTags(t, s)
- assert.Equal(t, "pgx.connect", s.OperationName())
- assert.Equal(t, "Connect", s.Tag(ext.ResourceName))
- assert.Equal(t, "Connect", s.Tag("db.operation"))
- assert.Equal(t, nil, s.Tag(ext.DBStatement))
- assert.Equal(t, ps.SpanID(), s.ParentID())
+ testCases := []struct {
+ name string
+ newConnCreator func(t *testing.T, prev *pgxMockTracer) createConnFn
+ }{
+ {
+ name: "pool",
+ newConnCreator: func(t *testing.T, prev *pgxMockTracer) createConnFn {
+ opts := append(tracingAllDisabled(), WithTraceConnect(true))
+ return newPoolCreator(nil, opts...)
+ },
+ },
+ {
+ name: "conn",
+ newConnCreator: func(t *testing.T, prev *pgxMockTracer) createConnFn {
+ opts := append(tracingAllDisabled(), WithTraceConnect(true))
+ return newConnCreator(nil, nil, opts...)
+ },
+ },
+ {
+ name: "conn_with_options",
+ newConnCreator: func(t *testing.T, prev *pgxMockTracer) createConnFn {
+ opts := append(tracingAllDisabled(), WithTraceConnect(true))
+ return newConnCreator(nil, &pgx.ParseConfigOptions{}, opts...)
+ },
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ opts := append(tracingAllDisabled(), WithTraceConnect(true))
+ runAllOperations(t, newPoolCreator(nil, opts...))
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 2)
+
+ ps := spans[1]
+ assert.Equal(t, "parent", ps.OperationName())
+ assert.Equal(t, "parent", ps.Tag(ext.ResourceName))
+
+ s := spans[0]
+ assertCommonTags(t, s)
+ assert.Equal(t, "pgx.connect", s.OperationName())
+ assert.Equal(t, "Connect", s.Tag(ext.ResourceName))
+ assert.Equal(t, "Connect", s.Tag("db.operation"))
+ assert.Equal(t, nil, s.Tag(ext.DBStatement))
+ assert.Equal(t, ps.SpanID(), s.ParentID())
+ })
+ }
}
func TestQuery(t *testing.T) {
@@ -97,7 +128,7 @@ func TestQuery(t *testing.T) {
defer mt.Stop()
opts := append(tracingAllDisabled(), WithTraceQuery(true))
- runAllOperations(t, opts...)
+ runAllOperations(t, newPoolCreator(nil, opts...))
spans := mt.FinishedSpans()
require.Len(t, spans, 3)
@@ -130,7 +161,7 @@ func TestPrepare(t *testing.T) {
defer mt.Stop()
opts := append(tracingAllDisabled(), WithTracePrepare(true))
- runAllOperations(t, opts...)
+ runAllOperations(t, newPoolCreator(nil, opts...))
spans := mt.FinishedSpans()
require.Len(t, spans, 3)
@@ -163,7 +194,7 @@ func TestBatch(t *testing.T) {
defer mt.Stop()
opts := append(tracingAllDisabled(), WithTraceBatch(true))
- runAllOperations(t, opts...)
+ runAllOperations(t, newPoolCreator(nil, opts...))
spans := mt.FinishedSpans()
require.Len(t, spans, 5)
@@ -212,7 +243,7 @@ func TestCopyFrom(t *testing.T) {
defer mt.Stop()
opts := append(tracingAllDisabled(), WithTraceCopyFrom(true))
- runAllOperations(t, opts...)
+ runAllOperations(t, newPoolCreator(nil, opts...))
spans := mt.FinishedSpans()
require.Len(t, spans, 2)
@@ -237,7 +268,7 @@ func TestAcquire(t *testing.T) {
defer mt.Stop()
opts := append(tracingAllDisabled(), WithTraceAcquire(true))
- runAllOperations(t, opts...)
+ runAllOperations(t, newPoolCreator(nil, opts...))
spans := mt.FinishedSpans()
require.Len(t, spans, 5)
@@ -255,6 +286,54 @@ func TestAcquire(t *testing.T) {
assert.Equal(t, ps.SpanID(), s.ParentID())
}
+// https://github.com/DataDog/dd-trace-go/issues/2908
+func TestWrapTracer(t *testing.T) {
+ testCases := []struct {
+ name string
+ newConnCreator func(t *testing.T, prev *pgxMockTracer) createConnFn
+ wantSpans int
+ wantHooks int
+ }{
+ {
+ name: "pool",
+ newConnCreator: func(t *testing.T, prev *pgxMockTracer) createConnFn {
+ cfg, err := pgxpool.ParseConfig(postgresDSN)
+ require.NoError(t, err)
+ cfg.ConnConfig.Tracer = prev
+ return newPoolCreator(cfg)
+ },
+ wantSpans: 15,
+ wantHooks: 13,
+ },
+ {
+ name: "conn",
+ newConnCreator: func(t *testing.T, prev *pgxMockTracer) createConnFn {
+ cfg, err := pgx.ParseConfig(postgresDSN)
+ require.NoError(t, err)
+ cfg.Tracer = prev
+ return newConnCreator(cfg, nil)
+ },
+ wantSpans: 11,
+ wantHooks: 11, // 13 - 2 pool tracer hooks
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ prevTracer := &pgxMockTracer{
+ called: make(map[string]bool),
+ }
+ runAllOperations(t, tc.newConnCreator(t, prevTracer))
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, tc.wantSpans)
+ assert.Len(t, prevTracer.called, tc.wantHooks, "some hook(s) on the previous tracer were not called")
+ })
+ }
+}
+
func tracingAllDisabled() []Option {
return []Option{
WithTraceConnect(false),
@@ -266,18 +345,65 @@ func tracingAllDisabled() []Option {
}
}
-func runAllOperations(t *testing.T, opts ...Option) {
+type pgxConn interface {
+ QueryRow(ctx context.Context, sql string, args ...any) pgx.Row
+ SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults
+ Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error)
+ CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error)
+}
+
+type createConnFn func(t *testing.T, ctx context.Context) pgxConn
+
+func newPoolCreator(cfg *pgxpool.Config, opts ...Option) createConnFn {
+ return func(t *testing.T, ctx context.Context) pgxConn {
+ var (
+ pool *pgxpool.Pool
+ err error
+ )
+ if cfg == nil {
+ pool, err = NewPool(ctx, postgresDSN, opts...)
+ } else {
+ pool, err = NewPoolWithConfig(ctx, cfg, opts...)
+ }
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ pool.Close()
+ })
+ return pool
+ }
+}
+
+func newConnCreator(cfg *pgx.ConnConfig, connOpts *pgx.ParseConfigOptions, opts ...Option) createConnFn {
+ return func(t *testing.T, ctx context.Context) pgxConn {
+ var (
+ conn *pgx.Conn
+ err error
+ )
+ if cfg != nil {
+ conn, err = ConnectConfig(ctx, cfg, opts...)
+ } else if connOpts != nil {
+ conn, err = ConnectWithOptions(ctx, postgresDSN, *connOpts, opts...)
+ } else {
+ conn, err = Connect(ctx, postgresDSN, opts...)
+ }
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ assert.NoError(t, conn.Close(ctx))
+ })
+ return conn
+ }
+}
+
+func runAllOperations(t *testing.T, createConn createConnFn) {
parent, ctx := tracer.StartSpanFromContext(context.Background(), "parent")
defer parent.Finish()
// Connect
- conn, err := NewPool(ctx, postgresDSN, opts...)
- require.NoError(t, err)
- defer conn.Close()
+ conn := createConn(t, ctx)
// Query
var x int
- err = conn.QueryRow(ctx, `SELECT 1`).Scan(&x)
+ err := conn.QueryRow(ctx, `SELECT 1`).Scan(&x)
require.NoError(t, err)
require.Equal(t, 1, x)
@@ -328,3 +454,69 @@ func assertCommonTags(t *testing.T, s *mocktracer.Span) {
assert.Equal(t, "postgres", s.Tag(ext.DBName))
assert.Equal(t, "postgres", s.Tag(ext.DBUser))
}
+
+type pgxMockTracer struct {
+ called map[string]bool
+}
+
+var (
+ _ allPgxTracers = (*pgxMockTracer)(nil)
+)
+
+func (p *pgxMockTracer) TraceQueryStart(ctx context.Context, _ *pgx.Conn, _ pgx.TraceQueryStartData) context.Context {
+ p.called["query.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TraceQueryEnd(_ context.Context, _ *pgx.Conn, _ pgx.TraceQueryEndData) {
+ p.called["query.end"] = true
+}
+
+func (p *pgxMockTracer) TraceBatchStart(ctx context.Context, _ *pgx.Conn, _ pgx.TraceBatchStartData) context.Context {
+ p.called["batch.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TraceBatchQuery(_ context.Context, _ *pgx.Conn, _ pgx.TraceBatchQueryData) {
+ p.called["batch.query"] = true
+}
+
+func (p *pgxMockTracer) TraceBatchEnd(_ context.Context, _ *pgx.Conn, _ pgx.TraceBatchEndData) {
+ p.called["batch.end"] = true
+}
+
+func (p *pgxMockTracer) TraceConnectStart(ctx context.Context, _ pgx.TraceConnectStartData) context.Context {
+ p.called["connect.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TraceConnectEnd(_ context.Context, _ pgx.TraceConnectEndData) {
+ p.called["connect.end"] = true
+}
+
+func (p *pgxMockTracer) TracePrepareStart(ctx context.Context, _ *pgx.Conn, _ pgx.TracePrepareStartData) context.Context {
+ p.called["prepare.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TracePrepareEnd(_ context.Context, _ *pgx.Conn, _ pgx.TracePrepareEndData) {
+ p.called["prepare.end"] = true
+}
+
+func (p *pgxMockTracer) TraceCopyFromStart(ctx context.Context, _ *pgx.Conn, _ pgx.TraceCopyFromStartData) context.Context {
+ p.called["copyfrom.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TraceCopyFromEnd(_ context.Context, _ *pgx.Conn, _ pgx.TraceCopyFromEndData) {
+ p.called["copyfrom.end"] = true
+}
+
+func (p *pgxMockTracer) TraceAcquireStart(ctx context.Context, _ *pgxpool.Pool, _ pgxpool.TraceAcquireStartData) context.Context {
+ p.called["pool.acquire.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TraceAcquireEnd(_ context.Context, _ *pgxpool.Pool, _ pgxpool.TraceAcquireEndData) {
+ p.called["pool.acquire.end"] = true
+}
diff --git a/contrib/jackc/pgx.v5/pgxpool.go b/contrib/jackc/pgx.v5/pgxpool.go
index f4a8cb7ed0..11d93b0859 100644
--- a/contrib/jackc/pgx.v5/pgxpool.go
+++ b/contrib/jackc/pgx.v5/pgxpool.go
@@ -20,7 +20,10 @@ func NewPool(ctx context.Context, connString string, opts ...Option) (*pgxpool.P
}
func NewPoolWithConfig(ctx context.Context, config *pgxpool.Config, opts ...Option) (*pgxpool.Pool, error) {
- tracer := newPgxTracer(opts...)
+ // pgxpool.NewWithConfig panics if the config was not created using pgxpool.ParseConfig, which should ensure everything
+ // is properly initialized, so it doesn't make sense to check for a nil config here.
+
+ tracer := wrapPgxTracer(config.ConnConfig.Tracer, opts...)
config.ConnConfig.Tracer = tracer
pool, err := pgxpool.NewWithConfig(ctx, config)
if err != nil {
diff --git a/contrib/jmoiron/sqlx/go.mod b/contrib/jmoiron/sqlx/go.mod
index 68229400cd..e87c55326e 100644
--- a/contrib/jmoiron/sqlx/go.mod
+++ b/contrib/jmoiron/sqlx/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/jmoiron/sqlx/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/database/sql/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/database/sql/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/denisenkom/go-mssqldb v0.11.0
github.com/go-sql-driver/mysql v1.6.0
github.com/jmoiron/sqlx v1.3.5
diff --git a/contrib/jmoiron/sqlx/go.sum b/contrib/jmoiron/sqlx/go.sum
index fdc41d7303..d70d4497db 100644
--- a/contrib/jmoiron/sqlx/go.sum
+++ b/contrib/jmoiron/sqlx/go.sum
@@ -46,8 +46,8 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2V
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo=
diff --git a/contrib/julienschmidt/httprouter/go.mod b/contrib/julienschmidt/httprouter/go.mod
index 4e4f214e54..7b45afdb3a 100644
--- a/contrib/julienschmidt/httprouter/go.mod
+++ b/contrib/julienschmidt/httprouter/go.mod
@@ -2,11 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/julienschmidt/httprouter/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/julienschmidt/httprouter v1.3.0
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/julienschmidt/httprouter/httprouter.go b/contrib/julienschmidt/httprouter/httprouter.go
index 7a25b48bb7..368f4fc64b 100644
--- a/contrib/julienschmidt/httprouter/httprouter.go
+++ b/contrib/julienschmidt/httprouter/httprouter.go
@@ -7,18 +7,13 @@
package httprouter // import "github.com/DataDog/dd-trace-go/contrib/julienschmidt/httprouter/v2"
import (
- "math"
"net/http"
- "strings"
- httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2"
- "github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
- "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/instrumentation"
- httptraceinstr "github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace"
- "github.com/DataDog/dd-trace-go/v2/instrumentation/options"
"github.com/julienschmidt/httprouter"
+
+ "github.com/DataDog/dd-trace-go/contrib/julienschmidt/httprouter/v2/internal/tracing"
)
var instr *instrumentation.Instrumentation
@@ -30,43 +25,55 @@ func init() {
// Router is a traced version of httprouter.Router.
type Router struct {
*httprouter.Router
- config *routerConfig
+ config *tracing.Config
}
// New returns a new router augmented with tracing.
func New(opts ...RouterOption) *Router {
- cfg := new(routerConfig)
- defaults(cfg)
- for _, fn := range opts {
- fn.apply(cfg)
- }
- if !math.IsNaN(cfg.analyticsRate) {
- cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.EventSampleRate, cfg.analyticsRate))
- }
-
- cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.SpanKind, ext.SpanKindServer))
- cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.Component, instrumentation.PackageJulienschmidtHTTPRouter))
-
+ cfg := tracing.NewConfig(opts...)
instr.Logger().Debug("contrib/julienschmidt/httprouter: Configuring Router: %#v", cfg)
return &Router{httprouter.New(), cfg}
}
// ServeHTTP implements http.Handler.
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- // get the resource associated to this request
- route := req.URL.Path
- _, ps, _ := r.Router.Lookup(req.Method, route)
- for _, param := range ps {
- route = strings.Replace(route, param.Value, ":"+param.Key, 1)
+ tw, treq, afterHandle, handled := tracing.BeforeHandle(r.config, r.Router, wrapRouter, w, req)
+ defer afterHandle()
+ if handled {
+ return
}
- resource := req.Method + " " + route
- spanOpts := options.Expand(r.config.spanOpts, 0, 1) // spanOpts must be a copy of r.config.spanOpts, locally scoped, to avoid races.
- spanOpts = append(spanOpts, httptraceinstr.HeaderTagsFromRequest(req, r.config.headerTags))
-
- httptrace.TraceAndServe(r.Router, w, req, &httptrace.ServeConfig{
- Service: r.config.serviceName,
- Resource: resource,
- SpanOpts: spanOpts,
- Route: route,
- })
+ r.Router.ServeHTTP(tw, treq)
+}
+
+type wRouter struct {
+ *httprouter.Router
+}
+
+func wrapRouter(r *httprouter.Router) tracing.Router {
+ return &wRouter{r}
+}
+
+func (w wRouter) Lookup(method string, path string) (any, []tracing.Param, bool) {
+ h, params, ok := w.Router.Lookup(method, path)
+ return h, wrapParams(params), ok
+}
+
+type wParam struct {
+ httprouter.Param
+}
+
+func wrapParams(params httprouter.Params) []tracing.Param {
+ wParams := make([]tracing.Param, len(params))
+ for i, p := range params {
+ wParams[i] = wParam{p}
+ }
+ return wParams
+}
+
+func (w wParam) GetKey() string {
+ return w.Key
+}
+
+func (w wParam) GetValue() string {
+ return w.Value
}
diff --git a/contrib/julienschmidt/httprouter/internal/tracing/config.go b/contrib/julienschmidt/httprouter/internal/tracing/config.go
new file mode 100644
index 0000000000..7628f25ed7
--- /dev/null
+++ b/contrib/julienschmidt/httprouter/internal/tracing/config.go
@@ -0,0 +1,92 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package tracing
+
+import (
+ "math"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+ "github.com/DataDog/dd-trace-go/v2/instrumentation"
+ "github.com/DataDog/dd-trace-go/v2/instrumentation/options"
+)
+
+type Config struct {
+ headerTags instrumentation.HeaderTags
+ spanOpts []tracer.StartSpanOption
+ serviceName string
+ analyticsRate float64
+}
+
+func NewConfig(opts ...Option) *Config {
+ cfg := new(Config)
+ if options.GetBoolEnv("DD_TRACE_HTTPROUTER_ANALYTICS_ENABLED", false) {
+ cfg.analyticsRate = 1.0
+ } else {
+ cfg.analyticsRate = instr.AnalyticsRate(true)
+ }
+ cfg.serviceName = instr.ServiceName(instrumentation.ComponentDefault, nil)
+ cfg.headerTags = instr.HTTPHeadersAsTags()
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ if !math.IsNaN(cfg.analyticsRate) {
+ cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.EventSampleRate, cfg.analyticsRate))
+ }
+
+ cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.SpanKind, ext.SpanKindServer))
+ cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.Component, componentName))
+ return cfg
+}
+
+type Option func(*Config)
+
+// WithService sets the given service name for the returned router.
+func WithService(name string) Option {
+ return func(cfg *Config) {
+ cfg.serviceName = name
+ }
+}
+
+// WithSpanOptions applies the given set of options to the span started by the router.
+func WithSpanOptions(opts ...tracer.StartSpanOption) Option {
+ return func(cfg *Config) {
+ cfg.spanOpts = opts
+ }
+}
+
+// WithAnalytics enables Trace Analytics for all started spans.
+func WithAnalytics(on bool) Option {
+ return func(cfg *Config) {
+ if on {
+ cfg.analyticsRate = 1.0
+ } else {
+ cfg.analyticsRate = math.NaN()
+ }
+ }
+}
+
+// WithAnalyticsRate sets the sampling rate for Trace Analytics events
+// correlated to started spans.
+func WithAnalyticsRate(rate float64) Option {
+ return func(cfg *Config) {
+ if rate >= 0.0 && rate <= 1.0 {
+ cfg.analyticsRate = rate
+ } else {
+ cfg.analyticsRate = math.NaN()
+ }
+ }
+}
+
+// WithHeaderTags enables the integration to attach HTTP request headers as span tags.
+// Warning:
+// Using this feature can risk exposing sensitive data such as authorization tokens to Datadog.
+// Special headers can not be sub-selected. E.g., an entire Cookie header would be transmitted, without the ability to choose specific Cookies.
+func WithHeaderTags(headers []string) Option {
+ return func(cfg *Config) {
+ cfg.headerTags = instrumentation.NewHeaderTags(headers)
+ }
+}
diff --git a/contrib/julienschmidt/httprouter/internal/tracing/tracing.go b/contrib/julienschmidt/httprouter/internal/tracing/tracing.go
new file mode 100644
index 0000000000..1ad4bda9ba
--- /dev/null
+++ b/contrib/julienschmidt/httprouter/internal/tracing/tracing.go
@@ -0,0 +1,62 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package tracing
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+ "github.com/DataDog/dd-trace-go/v2/instrumentation"
+ "github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace"
+)
+
+var instr *instrumentation.Instrumentation
+
+const componentName = "julienschmidt/httprouter"
+
+func init() {
+ instr = instrumentation.Load(instrumentation.PackageJulienschmidtHTTPRouter)
+}
+
+type Router interface {
+ Lookup(method string, path string) (any, []Param, bool)
+}
+
+type Param interface {
+ GetKey() string
+ GetValue() string
+}
+
+// BeforeHandle is an adapter of httptrace.BeforeHandle for julienschmidt/httprouter types.
+func BeforeHandle[T any, WT Router](
+ cfg *Config,
+ router T,
+ wrapRouter func(T) WT,
+ w http.ResponseWriter,
+ req *http.Request,
+) (http.ResponseWriter, *http.Request, func(), bool) {
+ wRouter := wrapRouter(router)
+ // get the resource associated to this request
+ route := req.URL.Path
+ _, ps, _ := wRouter.Lookup(req.Method, route)
+ for _, param := range ps {
+ route = strings.Replace(route, param.GetValue(), ":"+param.GetKey(), 1)
+ }
+
+ resource := req.Method + " " + route
+ spanOpts := make([]tracer.StartSpanOption, len(cfg.spanOpts))
+ copy(spanOpts, cfg.spanOpts) // spanOpts must be a copy of r.config.spanOpts, locally scoped, to avoid races.
+ spanOpts = append(spanOpts, httptrace.HeaderTagsFromRequest(req, cfg.headerTags))
+
+ serveCfg := &httptrace.ServeConfig{
+ Service: cfg.serviceName,
+ Resource: resource,
+ SpanOpts: spanOpts,
+ Route: route,
+ }
+ return httptrace.BeforeHandle(serveCfg, w, req)
+}
diff --git a/contrib/julienschmidt/httprouter/option.go b/contrib/julienschmidt/httprouter/option.go
index b776b56102..b42ae72dc8 100644
--- a/contrib/julienschmidt/httprouter/option.go
+++ b/contrib/julienschmidt/httprouter/option.go
@@ -6,14 +6,11 @@
package httprouter
import (
- "math"
-
+ "github.com/DataDog/dd-trace-go/contrib/julienschmidt/httprouter/v2/internal/tracing"
"github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/instrumentation"
)
-const defaultServiceName = "http.router"
-
type routerConfig struct {
serviceName string
spanOpts []tracer.StartSpanOption
@@ -21,67 +18,24 @@ type routerConfig struct {
headerTags instrumentation.HeaderTags
}
-// RouterOption describes options for the HTTPRouter integration.
-type RouterOption interface {
- apply(*routerConfig)
-}
-
-// RouterOptionFn represents options applicable to New.
-type RouterOptionFn func(*routerConfig)
-
-func (fn RouterOptionFn) apply(cfg *routerConfig) {
- fn(cfg)
-}
-
-func defaults(cfg *routerConfig) {
- cfg.analyticsRate = instr.AnalyticsRate(true)
- cfg.serviceName = instr.ServiceName(instrumentation.ComponentServer, nil)
- cfg.headerTags = instr.HTTPHeadersAsTags()
-}
+// RouterOption represents an option that can be passed to New.
+type RouterOption = tracing.Option
// WithService sets the given service name for the returned router.
-func WithService(name string) RouterOptionFn {
- return func(cfg *routerConfig) {
- cfg.serviceName = name
- }
-}
+var WithService = tracing.WithService
// WithSpanOptions applies the given set of options to the span started by the router.
-func WithSpanOptions(opts ...tracer.StartSpanOption) RouterOptionFn {
- return func(cfg *routerConfig) {
- cfg.spanOpts = opts
- }
-}
+var WithSpanOptions = tracing.WithSpanOptions
// WithAnalytics enables Trace Analytics for all started spans.
-func WithAnalytics(on bool) RouterOptionFn {
- return func(cfg *routerConfig) {
- if on {
- cfg.analyticsRate = 1.0
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalytics = tracing.WithAnalytics
// WithAnalyticsRate sets the sampling rate for Trace Analytics events
// correlated to started spans.
-func WithAnalyticsRate(rate float64) RouterOptionFn {
- return func(cfg *routerConfig) {
- if rate >= 0.0 && rate <= 1.0 {
- cfg.analyticsRate = rate
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalyticsRate = tracing.WithAnalyticsRate
// WithHeaderTags enables the integration to attach HTTP request headers as span tags.
// Warning:
// Using this feature can risk exposing sensitive data such as authorization tokens to Datadog.
// Special headers can not be sub-selected. E.g., an entire Cookie header would be transmitted, without the ability to choose specific Cookies.
-func WithHeaderTags(headers []string) RouterOptionFn {
- return func(cfg *routerConfig) {
- cfg.headerTags = instrumentation.NewHeaderTags(headers)
- }
-}
+var WithHeaderTags = tracing.WithHeaderTags
diff --git a/contrib/k8s.io/client-go/go.mod b/contrib/k8s.io/client-go/go.mod
index 709de618fa..a28457572c 100644
--- a/contrib/k8s.io/client-go/go.mod
+++ b/contrib/k8s.io/client-go/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/k8s.io/client-go/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
k8s.io/apimachinery v0.23.17
k8s.io/client-go v0.23.17
diff --git a/contrib/labstack/echo.v4/echotrace_test.go b/contrib/labstack/echo.v4/echotrace_test.go
index adc992810e..cef0ed82c7 100644
--- a/contrib/labstack/echo.v4/echotrace_test.go
+++ b/contrib/labstack/echo.v4/echotrace_test.go
@@ -473,7 +473,7 @@ func TestNoDebugStack(t *testing.T) {
span := spans[0]
require.NotNil(t, span.Tag(ext.ErrorMsg))
assert.Equal(errWant.Error(), span.Tag(ext.ErrorMsg))
- assert.Empty(span.Tag(ext.ErrorStack))
+ assert.Nil(span.Tags()[ext.ErrorStack])
assert.Equal("labstack/echo.v4", span.Tag(ext.Component))
assert.Equal(ext.SpanKindServer, span.Tag(ext.SpanKind))
}
diff --git a/contrib/labstack/echo.v4/go.mod b/contrib/labstack/echo.v4/go.mod
index 57a98269ca..54c9f4cec9 100644
--- a/contrib/labstack/echo.v4/go.mod
+++ b/contrib/labstack/echo.v4/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/labstack/echo.v4/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/labstack/echo/v4 v4.11.1
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/log/slog/go.mod b/contrib/log/slog/go.mod
index eefc4d83aa..d69af309cd 100644
--- a/contrib/log/slog/go.mod
+++ b/contrib/log/slog/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/log/slog/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/log/slog/slog.go b/contrib/log/slog/slog.go
index c86a714536..a46e684530 100644
--- a/contrib/log/slog/slog.go
+++ b/contrib/log/slog/slog.go
@@ -10,16 +10,22 @@ import (
"context"
"io"
"log/slog"
+ "strconv"
"github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
"github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/instrumentation"
)
-var instr *instrumentation.Instrumentation
-
func init() {
- instr = instrumentation.Load(instrumentation.PackageLogSlog)
+ _ = instrumentation.Load(instrumentation.PackageLogSlog)
+}
+
+var _ slog.Handler = (*handler)(nil)
+
+type group struct {
+ name string
+ attrs []slog.Attr
}
// NewJSONHandler is a convenience function that returns a *slog.JSONHandler logger enhanced with
@@ -30,21 +36,71 @@ func NewJSONHandler(w io.Writer, opts *slog.HandlerOptions) slog.Handler {
// WrapHandler enhances the given logger handler attaching tracing information to logs.
func WrapHandler(h slog.Handler) slog.Handler {
- return &handler{h}
+ return &handler{wrapped: h}
}
type handler struct {
- slog.Handler
+ wrapped slog.Handler
+ groups []group
+}
+
+// Enabled calls the wrapped handler Enabled method.
+func (h *handler) Enabled(ctx context.Context, level slog.Level) bool {
+ return h.wrapped.Enabled(ctx, level)
}
// Handle handles the given Record, attaching tracing information if found.
func (h *handler) Handle(ctx context.Context, rec slog.Record) error {
+ reqHandler := h.wrapped
+
+ // We need to ensure the trace id and span id keys are set at the root level:
+ // https://docs.datadoghq.com/tracing/other_telemetry/connect_logs_and_traces/
+ // In case the user has created group loggers, we ignore those and
+ // set them at the root level.
span, ok := tracer.SpanFromContext(ctx)
if ok {
- rec.Add(
- slog.String(ext.LogKeyTraceID, span.Context().TraceID()),
- slog.Uint64(ext.LogKeySpanID, span.Context().SpanID()),
- )
+ traceID := span.Context().TraceID()
+ spanID := strconv.FormatUint(span.Context().SpanID(), 10)
+
+ attrs := []slog.Attr{
+ slog.String(ext.LogKeyTraceID, traceID),
+ slog.String(ext.LogKeySpanID, spanID),
+ }
+ reqHandler = reqHandler.WithAttrs(attrs)
+ }
+ for _, g := range h.groups {
+ reqHandler = reqHandler.WithGroup(g.name)
+ if len(g.attrs) > 0 {
+ reqHandler = reqHandler.WithAttrs(g.attrs)
+ }
+ }
+ return reqHandler.Handle(ctx, rec)
+}
+
+// WithAttrs saves the provided attributes associated to the current Group.
+// If Group was not called for the logger, we just call WithAttrs for the wrapped handler.
+func (h *handler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ if len(h.groups) == 0 {
+ return &handler{
+ wrapped: h.wrapped.WithAttrs(attrs),
+ groups: h.groups,
+ }
+ }
+ groups := append([]group{}, h.groups...)
+ curGroup := groups[len(groups)-1]
+ curGroup.attrs = append(curGroup.attrs, attrs...)
+ groups[len(groups)-1] = curGroup
+
+ return &handler{
+ wrapped: h.wrapped,
+ groups: groups,
+ }
+}
+
+// WithGroup saves the provided group to be used later in the Handle method.
+func (h *handler) WithGroup(name string) slog.Handler {
+ return &handler{
+ wrapped: h.wrapped,
+ groups: append(h.groups, group{name: name}),
}
- return h.Handler.Handle(ctx, rec)
}
diff --git a/contrib/log/slog/slog_test.go b/contrib/log/slog/slog_test.go
index 09bd242cf7..7fc3140497 100644
--- a/contrib/log/slog/slog_test.go
+++ b/contrib/log/slog/slog_test.go
@@ -9,7 +9,9 @@ import (
"bytes"
"context"
"encoding/json"
+ "io"
"log/slog"
+ "strconv"
"strings"
"testing"
@@ -21,29 +23,37 @@ import (
"github.com/DataDog/dd-trace-go/v2/instrumentation/testutils"
)
-func assertLogEntry(t *testing.T, rawEntry, wantMsg, wantLevel string) {
+func assertLogEntry(t *testing.T, rawEntry, wantMsg, wantLevel string, span *tracer.Span, assertExtra func(t *testing.T, entry map[string]interface{})) {
t.Helper()
- var data map[string]interface{}
- err := json.Unmarshal([]byte(rawEntry), &data)
+ t.Log(rawEntry)
+
+ var entry map[string]interface{}
+ err := json.Unmarshal([]byte(rawEntry), &entry)
require.NoError(t, err)
- require.NotEmpty(t, data)
+ require.NotEmpty(t, entry)
+
+ assert.Equal(t, wantMsg, entry["msg"])
+ assert.Equal(t, wantLevel, entry["level"])
+ assert.NotEmpty(t, entry["time"])
- assert.Equal(t, wantMsg, data["msg"])
- assert.Equal(t, wantLevel, data["level"])
- assert.NotEmpty(t, data["time"])
- assert.NotEmpty(t, data[ext.LogKeyTraceID])
- assert.NotEmpty(t, data[ext.LogKeySpanID])
+ traceID := span.Context().TraceID()
+ spanID := strconv.FormatUint(span.Context().SpanID(), 10)
+ assert.Equal(t, traceID, entry[ext.LogKeyTraceID], "trace id not found")
+ assert.Equal(t, spanID, entry[ext.LogKeySpanID], "span id not found")
+
+ if assertExtra != nil {
+ assertExtra(t, entry)
+ }
}
-func testLogger(t *testing.T, createHandler func(b *bytes.Buffer) slog.Handler) {
+func testLogger(t *testing.T, createLogger func(b io.Writer) *slog.Logger, assertExtra func(t *testing.T, entry map[string]interface{})) {
tracer.Start(tracer.WithLogger(testutils.DiscardLogger()))
defer tracer.Stop()
// create the application logger
var b bytes.Buffer
- h := createHandler(&b)
- logger := slog.New(h)
+ logger := createLogger(&b)
// start a new span
span, ctx := tracer.StartSpanFromContext(context.Background(), "test")
@@ -59,18 +69,146 @@ func testLogger(t *testing.T, createHandler func(b *bytes.Buffer) slog.Handler)
)
// assert log entries contain trace information
require.Len(t, logs, 2)
- assertLogEntry(t, logs[0], "this is an info log with tracing information", "INFO")
- assertLogEntry(t, logs[1], "this is an error log with tracing information", "ERROR")
+ assertLogEntry(t, logs[0], "this is an info log with tracing information", "INFO", span, assertExtra)
+ assertLogEntry(t, logs[1], "this is an error log with tracing information", "ERROR", span, assertExtra)
}
func TestNewJSONHandler(t *testing.T) {
- testLogger(t, func(b *bytes.Buffer) slog.Handler {
- return NewJSONHandler(b, nil)
- })
+ testLogger(
+ t,
+ func(w io.Writer) *slog.Logger {
+ return slog.New(NewJSONHandler(w, nil))
+ },
+ nil,
+ )
}
func TestWrapHandler(t *testing.T) {
- testLogger(t, func(b *bytes.Buffer) slog.Handler {
- return WrapHandler(slog.NewJSONHandler(b, nil))
+ testLogger(
+ t,
+ func(w io.Writer) *slog.Logger {
+ return slog.New(WrapHandler(slog.NewJSONHandler(w, nil)))
+ },
+ nil,
+ )
+}
+
+func TestHandlerWithAttrs(t *testing.T) {
+ testLogger(
+ t,
+ func(w io.Writer) *slog.Logger {
+ return slog.New(NewJSONHandler(w, nil)).
+ With("key1", "val1").
+ With(ext.LogKeyTraceID, "trace-id").
+ With(ext.LogKeySpanID, "span-id")
+ },
+ nil,
+ )
+}
+
+func TestHandlerWithGroup(t *testing.T) {
+ t.Run("simple", func(t *testing.T) {
+ testLogger(
+ t,
+ func(w io.Writer) *slog.Logger {
+ return slog.New(NewJSONHandler(w, nil)).
+ WithGroup("some-group").
+ With("key1", "val1")
+ },
+ func(t *testing.T, entry map[string]interface{}) {
+ assert.Equal(t, map[string]interface{}{
+ "key1": "val1",
+ }, entry["some-group"], "group entry not found")
+ },
+ )
+ })
+
+ t.Run("nested groups", func(t *testing.T) {
+ testLogger(
+ t,
+ func(w io.Writer) *slog.Logger {
+ return slog.New(NewJSONHandler(w, nil)).
+ With("key0", "val0").
+ WithGroup("group1").
+ With("key1", "val1").
+ WithGroup("group1"). // repeat same key again
+ With("key1", "val1").
+ WithGroup("group2").
+ With("key2", "val2").
+ With("key3", "val3")
+ },
+ func(t *testing.T, entry map[string]interface{}) {
+ groupKeys := map[string]interface{}{
+ "key1": "val1",
+ "group1": map[string]interface{}{
+ "key1": "val1",
+ "group2": map[string]interface{}{
+ "key2": "val2",
+ "key3": "val3",
+ },
+ },
+ }
+ assert.Equal(t, "val0", entry["key0"], "root level key not found")
+ assert.Equal(t, groupKeys, entry["group1"], "nested group entries not found")
+ },
+ )
})
}
+
+// TestRecordClone is a regression test for https://github.com/DataDog/dd-trace-go/issues/2918.
+func TestRecordClone(t *testing.T) {
+ // start a new span
+ span, ctx := tracer.StartSpanFromContext(context.Background(), "test")
+ defer span.Finish()
+
+ r := slog.Record{}
+ gate := func() {
+ // Calling Handle below should not overwrite this value
+ r.Add("sentinel-key", "sentinel-value")
+ }
+ h := handlerGate{gate, WrapHandler(slog.NewJSONHandler(io.Discard, nil))}
+ // Up to slog.nAttrsInline (5) attributes are stored in the front array of
+ // the record. Make sure to add more records than that to trigger the bug.
+ for i := 0; i < 5*10; i++ {
+ r.Add("i", i)
+ }
+ h.Handle(ctx, r)
+
+ var foundSentinel bool
+ r.Attrs(func(a slog.Attr) bool {
+ if a.Key == "sentinel-key" {
+ foundSentinel = true
+ return false
+ }
+ return true
+ })
+ assert.True(t, foundSentinel)
+}
+
+func BenchmarkHandler(b *testing.B) {
+ span, ctx := tracer.StartSpanFromContext(context.Background(), "test")
+ defer span.Finish()
+
+ // create a logger with a bunch of nested groups and fields
+ logger := slog.New(NewJSONHandler(io.Discard, nil))
+ logger = logger.With("attr1", "val1").WithGroup("group1").With("attr2", "val2").WithGroup("group3").With("attr3", "val3")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ logger.InfoContext(ctx, "some message")
+ }
+}
+
+// handlerGate calls a gate function before calling the underlying handler. This
+// allows simulating a concurrent modification of the record that happens after
+// Handle is called (and the record has been copied), but before the back array
+// of the Record is written to.
+type handlerGate struct {
+ gate func()
+ slog.Handler
+}
+
+func (h handlerGate) Handle(ctx context.Context, r slog.Record) {
+ h.gate()
+ h.Handler.Handle(ctx, r)
+}
diff --git a/contrib/miekg/dns/go.mod b/contrib/miekg/dns/go.mod
index 2d6a30fcf0..150a88cdf3 100644
--- a/contrib/miekg/dns/go.mod
+++ b/contrib/miekg/dns/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/miekg/dns/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/miekg/dns v1.1.55
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/net/http/go.mod b/contrib/net/http/go.mod
index 34400b5e63..506cc8235b 100644
--- a/contrib/net/http/go.mod
+++ b/contrib/net/http/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/net/http/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/net/http/option.go b/contrib/net/http/option.go
index 30c4017c01..42513e6dd5 100644
--- a/contrib/net/http/option.go
+++ b/contrib/net/http/option.go
@@ -14,6 +14,7 @@ import (
"github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/instrumentation"
"github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace"
+ "github.com/DataDog/dd-trace-go/v2/instrumentation/options"
)
type commonConfig struct {
@@ -62,7 +63,7 @@ func (o HandlerOptionFn) apply(cfg *config) {
}
func defaults(cfg *config) {
- if httptrace.GetBoolEnv("DD_TRACE_HTTP_ANALYTICS_ENABLED", false) {
+ if options.GetBoolEnv("DD_TRACE_HTTP_ANALYTICS_ENABLED", false) {
cfg.analyticsRate = 1.0
} else {
cfg.analyticsRate = instr.AnalyticsRate(true)
@@ -200,7 +201,7 @@ func newRoundTripperConfig() *roundTripperConfig {
commonConfig: sharedCfg,
propagation: true,
spanNamer: defaultSpanNamer,
- queryString: httptrace.GetBoolEnv(envClientQueryStringEnabled, true),
+ queryString: options.GetBoolEnv(envClientQueryStringEnabled, true),
isStatusError: isClientError,
}
v := os.Getenv(envClientErrorStatuses)
diff --git a/contrib/net/http/roundtripper_test.go b/contrib/net/http/roundtripper_test.go
index b77cd949a1..69d53e2273 100644
--- a/contrib/net/http/roundtripper_test.go
+++ b/contrib/net/http/roundtripper_test.go
@@ -132,7 +132,7 @@ func TestRoundTripperErrors(t *testing.T) {
spans := mt.FinishedSpans()
assert.Len(t, spans, 3)
s := spans[0] // 400 is error
- assert.Equal(t, "400: Bad Request", s.Tag(ext.Error).(error).Error())
+ assert.Equal(t, "400: Bad Request", s.Tag(ext.ErrorMsg))
assert.Equal(t, "400", s.Tag(ext.HTTPCode))
s = spans[1] // 500 is not error
assert.Empty(t, s.Tag(ext.Error))
@@ -154,7 +154,7 @@ func TestRoundTripperErrors(t *testing.T) {
assert.Empty(t, s.Tag(ext.Error))
assert.Equal(t, "400", s.Tag(ext.HTTPCode))
s = spans[1] // 500 is error
- assert.Equal(t, "500: Internal Server Error", s.Tag(ext.Error).(error).Error())
+ assert.Equal(t, "500: Internal Server Error", s.Tag(ext.ErrorMsg))
assert.Equal(t, "500", s.Tag(ext.HTTPCode))
s = spans[2] // 200 is not error
assert.Empty(t, s.Tag(ext.Error))
diff --git a/contrib/net/http/trace.go b/contrib/net/http/trace.go
index 2519ea826c..93efa83e51 100644
--- a/contrib/net/http/trace.go
+++ b/contrib/net/http/trace.go
@@ -10,12 +10,8 @@ package http // import "github.com/DataDog/dd-trace-go/contrib/net/http/v2"
import (
"net/http"
- "github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
- "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/instrumentation"
- "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec"
"github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace"
- "github.com/DataDog/dd-trace-go/v2/instrumentation/options"
)
const componentName = instrumentation.PackageNetHTTP
@@ -27,93 +23,16 @@ func init() {
}
// ServeConfig specifies the tracing configuration when using TraceAndServe.
-type ServeConfig struct {
- // Service specifies the service name to use. If left blank, the global service name
- // will be inherited.
- Service string
- // Resource optionally specifies the resource name for this request.
- Resource string
- // QueryParams should be true in order to append the URL query values to the "http.url" tag.
- QueryParams bool
- // Route is the request matched route if any, or is empty otherwise
- Route string
- // RouteParams specifies framework-specific route parameters (e.g. for route /user/:id coming
- // in as /user/123 we'll have {"id": "123"}). This field is optional and is used for monitoring
- // by AppSec. It is only taken into account when AppSec is enabled.
- RouteParams map[string]string
- // FinishOpts specifies any options to be used when finishing the request span.
- FinishOpts []tracer.FinishOption
- // SpanOpts specifies any options to be applied to the request starting span.
- SpanOpts []tracer.StartSpanOption
-}
+type ServeConfig = httptrace.ServeConfig
// TraceAndServe serves the handler h using the given ResponseWriter and Request, applying tracing
// according to the specified config.
func TraceAndServe(h http.Handler, w http.ResponseWriter, r *http.Request, cfg *ServeConfig) {
- if cfg == nil {
- cfg = new(ServeConfig)
- }
- opts := options.Expand(cfg.SpanOpts, 2, 3) // make a copy of cfg.SpanOpts to avoid races.
- // Pre-append span.kind and component tags to the options so that they can be overridden.
- opts[0] = tracer.Tag(ext.SpanKind, ext.SpanKindServer)
- opts[1] = tracer.Tag(ext.Component, componentName)
- if cfg.Service != "" {
- opts = append(opts, tracer.ServiceName(cfg.Service))
- }
- if cfg.Resource != "" {
- opts = append(opts, tracer.ResourceName(cfg.Resource))
- }
- if cfg.Route != "" {
- opts = append(opts, tracer.Tag(ext.HTTPRoute, cfg.Route))
- }
- span, ctx := httptrace.StartRequestSpan(r, opts...)
- rw, ddrw := wrapResponseWriter(w)
- defer func() {
- httptrace.FinishRequestSpan(span, ddrw.status, cfg.FinishOpts...)
- }()
- if instr.AppSecEnabled() {
- h = httpsec.WrapHandler(h, span, cfg.RouteParams, nil)
- }
- h.ServeHTTP(rw, r.WithContext(ctx))
-}
-
-// responseWriter is a small wrapper around an http response writer that will
-// intercept and store the status of a request.
-type responseWriter struct {
- http.ResponseWriter
- status int
-}
+ tw, tr, afterHandle, handled := httptrace.BeforeHandle(cfg, w, r)
+ defer afterHandle()
-func newResponseWriter(w http.ResponseWriter) *responseWriter {
- return &responseWriter{w, 0}
-}
-
-// Status returns the status code that was monitored.
-func (w *responseWriter) Status() int {
- return w.status
-}
-
-// Write writes the data to the connection as part of an HTTP reply.
-// We explicitly call WriteHeader with the 200 status code
-// in order to get it reported into the span.
-func (w *responseWriter) Write(b []byte) (int, error) {
- if w.status == 0 {
- w.WriteHeader(http.StatusOK)
- }
- return w.ResponseWriter.Write(b)
-}
-
-// WriteHeader sends an HTTP response header with status code.
-// It also sets the status code to the span.
-func (w *responseWriter) WriteHeader(status int) {
- if w.status != 0 {
+ if handled {
return
}
- w.ResponseWriter.WriteHeader(status)
- w.status = status
-}
-
-// Unwrap returns the underlying wrapped http.ResponseWriter.
-func (w *responseWriter) Unwrap() http.ResponseWriter {
- return w.ResponseWriter
+ h.ServeHTTP(tw, tr)
}
diff --git a/contrib/net/http/trace_test.go b/contrib/net/http/trace_test.go
index c6214de766..dbd64572ab 100644
--- a/contrib/net/http/trace_test.go
+++ b/contrib/net/http/trace_test.go
@@ -149,26 +149,6 @@ func TestTraceAndServe(t *testing.T) {
assert.Equal("Hello, world!\n", string(slurp))
})
- // there doesn't appear to be an easy way to test http.Pusher support via an http request
- // so we'll just confirm wrapResponseWriter preserves it
- t.Run("Pusher", func(t *testing.T) {
- var i struct {
- http.ResponseWriter
- http.Pusher
- }
- var w http.ResponseWriter = i
- _, ok := w.(http.ResponseWriter)
- assert.True(t, ok)
- _, ok = w.(http.Pusher)
- assert.True(t, ok)
-
- w, _ = wrapResponseWriter(w)
- _, ok = w.(http.ResponseWriter)
- assert.True(t, ok)
- _, ok = w.(http.Pusher)
- assert.True(t, ok)
- })
-
t.Run("distributed", func(t *testing.T) {
mt := mocktracer.Start()
assert := assert.New(t)
diff --git a/contrib/olivere/elastic.v5/go.mod b/contrib/olivere/elastic.v5/go.mod
index f2b49af39c..874e6bb250 100644
--- a/contrib/olivere/elastic.v5/go.mod
+++ b/contrib/olivere/elastic.v5/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/olivere/elastic.v5/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
gopkg.in/olivere/elastic.v5 v5.0.84
)
diff --git a/contrib/redis/go-redis.v9/go.mod b/contrib/redis/go-redis.v9/go.mod
index ac96b073ab..69d0464e17 100644
--- a/contrib/redis/go-redis.v9/go.mod
+++ b/contrib/redis/go-redis.v9/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/redis/go-redis.v9/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/redis/go-redis/v9 v9.1.0
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/segmentio/kafka-go/dsm.go b/contrib/segmentio/kafka-go/dsm.go
new file mode 100644
index 0000000000..6a37869b8e
--- /dev/null
+++ b/contrib/segmentio/kafka-go/dsm.go
@@ -0,0 +1,86 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package kafka
+
+import (
+ "context"
+
+ "github.com/DataDog/dd-trace-go/v2/datastreams"
+ "github.com/DataDog/dd-trace-go/v2/datastreams/options"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+)
+
+func (tr *Tracer) SetConsumeDSMCheckpoint(msg Message) {
+ if !tr.cfg.dataStreamsEnabled || msg == nil {
+ return
+ }
+ edges := []string{"direction:in", "topic:" + msg.GetTopic(), "type:kafka"}
+ if tr.kafkaCfg.ConsumerGroupID != "" {
+ edges = append(edges, "group:"+tr.kafkaCfg.ConsumerGroupID)
+ }
+ carrier := NewMessageCarrier(msg)
+ ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
+ datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
+ options.CheckpointParams{PayloadSize: getConsumerMsgSize(msg)},
+ edges...,
+ )
+ if !ok {
+ return
+ }
+ datastreams.InjectToBase64Carrier(ctx, carrier)
+ if tr.kafkaCfg.ConsumerGroupID != "" {
+ // only track Kafka lag if a consumer group is set.
+ // since there is no ack mechanism, we consider that messages read are committed right away.
+ tracer.TrackKafkaCommitOffset(tr.kafkaCfg.ConsumerGroupID, msg.GetTopic(), int32(msg.GetPartition()), msg.GetOffset())
+ }
+}
+
+func (tr *Tracer) SetProduceDSMCheckpoint(msg Message, writer Writer) {
+ if !tr.cfg.dataStreamsEnabled || msg == nil {
+ return
+ }
+
+ var topic string
+ if writer.GetTopic() != "" {
+ topic = writer.GetTopic()
+ } else {
+ topic = msg.GetTopic()
+ }
+
+ edges := []string{"direction:out", "topic:" + topic, "type:kafka"}
+ carrier := MessageCarrier{msg}
+ ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
+ datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
+ options.CheckpointParams{PayloadSize: getProducerMsgSize(msg)},
+ edges...,
+ )
+ if !ok {
+ return
+ }
+
+ // Headers will be dropped if the current protocol does not support them
+ datastreams.InjectToBase64Carrier(ctx, carrier)
+}
+
+func getProducerMsgSize(msg Message) (size int64) {
+ for _, header := range msg.GetHeaders() {
+ size += int64(len(header.GetKey()) + len(header.GetValue()))
+ }
+ if msg.GetValue() != nil {
+ size += int64(len(msg.GetValue()))
+ }
+ if msg.GetKey() != nil {
+ size += int64(len(msg.GetKey()))
+ }
+ return size
+}
+
+func getConsumerMsgSize(msg Message) (size int64) {
+ for _, header := range msg.GetHeaders() {
+ size += int64(len(header.GetKey()) + len(header.GetValue()))
+ }
+ return size + int64(len(msg.GetValue())+len(msg.GetKey()))
+}
diff --git a/contrib/segmentio/kafka-go/example_test.go b/contrib/segmentio/kafka-go/example_test.go
index 777945d832..8c6d49e99e 100644
--- a/contrib/segmentio/kafka-go/example_test.go
+++ b/contrib/segmentio/kafka-go/example_test.go
@@ -13,7 +13,7 @@ import (
kafkatrace "github.com/DataDog/dd-trace-go/contrib/segmentio/kafka-go/v2"
"github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
- kafka "github.com/segmentio/kafka-go"
+ "github.com/segmentio/kafka-go"
)
func ExampleWriter() {
diff --git a/contrib/segmentio/kafka-go/go.mod b/contrib/segmentio/kafka-go/go.mod
index b01bd2099e..0d0f30adf0 100644
--- a/contrib/segmentio/kafka-go/go.mod
+++ b/contrib/segmentio/kafka-go/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/segmentio/kafka-go/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/segmentio/kafka-go v0.4.47
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/segmentio/kafka-go/kafka.go b/contrib/segmentio/kafka-go/kafka.go
index 64a47c8775..106e657f87 100644
--- a/contrib/segmentio/kafka-go/kafka.go
+++ b/contrib/segmentio/kafka-go/kafka.go
@@ -7,12 +7,8 @@ package kafka // import "github.com/DataDog/dd-trace-go/contrib/segmentio/kafka-
import (
"context"
- "math"
"strings"
- "github.com/DataDog/dd-trace-go/v2/datastreams"
- "github.com/DataDog/dd-trace-go/v2/datastreams/options"
- "github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
"github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/instrumentation"
@@ -25,79 +21,35 @@ func init() {
instr = instrumentation.Load(instrumentation.PackageSegmentioKafkaGo)
}
+// A Reader wraps a kafka.Reader.
+type Reader struct {
+ *kafka.Reader
+ tracer *Tracer
+ prev *tracer.Span
+}
+
// NewReader calls kafka.NewReader and wraps the resulting Consumer.
func NewReader(conf kafka.ReaderConfig, opts ...Option) *Reader {
return WrapReader(kafka.NewReader(conf), opts...)
}
-// NewWriter calls kafka.NewWriter and wraps the resulting Producer.
-func NewWriter(conf kafka.WriterConfig, opts ...Option) *Writer {
- return WrapWriter(kafka.NewWriter(conf), opts...)
-}
-
// WrapReader wraps a kafka.Reader so that any consumed events are traced.
func WrapReader(c *kafka.Reader, opts ...Option) *Reader {
wrapped := &Reader{
Reader: c,
- cfg: newConfig(opts...),
}
-
+ cfg := KafkaConfig{}
if c.Config().Brokers != nil {
- wrapped.bootstrapServers = strings.Join(c.Config().Brokers, ",")
+ cfg.BootstrapServers = strings.Join(c.Config().Brokers, ",")
}
-
if c.Config().GroupID != "" {
- wrapped.groupID = c.Config().GroupID
+ cfg.ConsumerGroupID = c.Config().GroupID
}
-
- instr.Logger().Debug("contrib/segmentio/kafka-go.v0/kafka: Wrapping Reader: %#v", wrapped.cfg)
+ wrapped.tracer = NewTracer(cfg, opts...)
+ instr.Logger().Debug("contrib/segmentio/kafka-go/kafka: Wrapping Reader: %#v", wrapped.tracer.cfg)
return wrapped
}
-// A kafkaConfig struct holds information from the kafka config for span tags
-type kafkaConfig struct {
- bootstrapServers string
- groupID string
-}
-
-// A Reader wraps a kafka.Reader.
-type Reader struct {
- *kafka.Reader
- kafkaConfig
- cfg *config
- prev *tracer.Span
-}
-
-func (r *Reader) startSpan(ctx context.Context, msg *kafka.Message) *tracer.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(r.cfg.consumerServiceName),
- tracer.ResourceName("Consume Topic " + msg.Topic),
- tracer.SpanType(ext.SpanTypeMessageConsumer),
- tracer.Tag(ext.MessagingKafkaPartition, msg.Partition),
- tracer.Tag("offset", msg.Offset),
- tracer.Tag(ext.Component, instrumentation.PackageSegmentioKafkaGo),
- tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Tag(ext.KafkaBootstrapServers, r.bootstrapServers),
- tracer.Measured(),
- }
-
- if !math.IsNaN(r.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, r.cfg.analyticsRate))
- }
- // kafka supports headers, so try to extract a span context
- carrier := messageCarrier{msg}
- if spanctx, err := tracer.Extract(carrier); err == nil {
- opts = append(opts, tracer.ChildOf(spanctx))
- }
- span, _ := tracer.StartSpanFromContext(ctx, r.cfg.consumerSpanName, opts...)
- // reinject the span context so consumers can pick it up
- if err := tracer.Inject(span.Context(), carrier); err != nil {
- instr.Logger().Debug("contrib/segmentio/kafka-go: Failed to inject span context into carrier in reader, %v", err)
- }
- return span
-}
-
// Close calls the underlying Reader.Close and if polling is enabled, finishes
// any remaining span.
func (r *Reader) Close() error {
@@ -119,8 +71,9 @@ func (r *Reader) ReadMessage(ctx context.Context) (kafka.Message, error) {
if err != nil {
return kafka.Message{}, err
}
- r.prev = r.startSpan(ctx, &msg)
- setConsumeCheckpoint(r.cfg.dataStreamsEnabled, r.groupID, &msg)
+ tMsg := wrapMessage(&msg)
+ r.prev = r.tracer.StartConsumeSpan(ctx, tMsg)
+ r.tracer.SetConsumeDSMCheckpoint(tMsg)
return msg, nil
}
@@ -134,147 +87,51 @@ func (r *Reader) FetchMessage(ctx context.Context) (kafka.Message, error) {
if err != nil {
return msg, err
}
- r.prev = r.startSpan(ctx, &msg)
- setConsumeCheckpoint(r.cfg.dataStreamsEnabled, r.groupID, &msg)
+ tMsg := wrapMessage(&msg)
+ r.prev = r.tracer.StartConsumeSpan(ctx, tMsg)
+ r.tracer.SetConsumeDSMCheckpoint(tMsg)
return msg, nil
}
-func setConsumeCheckpoint(enabled bool, groupID string, msg *kafka.Message) {
- if !enabled || msg == nil {
- return
- }
- edges := []string{"direction:in", "topic:" + msg.Topic, "type:kafka"}
- if groupID != "" {
- edges = append(edges, "group:"+groupID)
- }
- carrier := messageCarrier{msg}
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
- datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
- options.CheckpointParams{PayloadSize: getConsumerMsgSize(msg)},
- edges...,
- )
- if !ok {
- return
- }
- datastreams.InjectToBase64Carrier(ctx, carrier)
- if groupID != "" {
- // only track Kafka lag if a consumer group is set.
- // since there is no ack mechanism, we consider that messages read are committed right away.
- tracer.TrackKafkaCommitOffset(groupID, msg.Topic, int32(msg.Partition), msg.Offset)
- }
+// Writer wraps a kafka.Writer with tracing config data
+type KafkaWriter struct {
+ *kafka.Writer
+ tracer *Tracer
+}
+
+// NewWriter calls kafka.NewWriter and wraps the resulting Producer.
+func NewWriter(conf kafka.WriterConfig, opts ...Option) *KafkaWriter {
+ return WrapWriter(kafka.NewWriter(conf), opts...)
}
// WrapWriter wraps a kafka.Writer so requests are traced.
-func WrapWriter(w *kafka.Writer, opts ...Option) *Writer {
- writer := &Writer{
+func WrapWriter(w *kafka.Writer, opts ...Option) *KafkaWriter {
+ writer := &KafkaWriter{
Writer: w,
- cfg: newConfig(opts...),
}
-
+ cfg := KafkaConfig{}
if w.Addr.String() != "" {
- writer.bootstrapServers = w.Addr.String()
+ cfg.BootstrapServers = w.Addr.String()
}
- instr.Logger().Debug("contrib/segmentio/kafka-go: Wrapping Writer: %#v", writer.cfg)
+ writer.tracer = NewTracer(cfg, opts...)
+ instr.Logger().Debug("contrib/segmentio/kafka-go: Wrapping Writer: %#v", writer.tracer.kafkaCfg)
return writer
}
-// Writer wraps a kafka.Writer with tracing config data
-type Writer struct {
- *kafka.Writer
- kafkaConfig
- cfg *config
-}
-
-func (w *Writer) startSpan(ctx context.Context, msg *kafka.Message) *tracer.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(w.cfg.producerServiceName),
- tracer.SpanType(ext.SpanTypeMessageProducer),
- tracer.Tag(ext.Component, instrumentation.PackageSegmentioKafkaGo),
- tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Tag(ext.KafkaBootstrapServers, w.bootstrapServers),
- }
- if w.Writer.Topic != "" {
- opts = append(opts, tracer.ResourceName("Produce Topic "+w.Writer.Topic))
- } else {
- opts = append(opts, tracer.ResourceName("Produce Topic "+msg.Topic))
- }
- if !math.IsNaN(w.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, w.cfg.analyticsRate))
- }
- carrier := messageCarrier{msg}
- span, _ := tracer.StartSpanFromContext(ctx, w.cfg.producerSpanName, opts...)
- if err := tracer.Inject(span.Context(), carrier); err != nil {
- instr.Logger().Debug("contrib/segmentio/kafka-go: Failed to inject span context into carrier in writer, %v", err)
- }
- return span
-}
-
-func finishSpan(span *tracer.Span, partition int, offset int64, err error) {
- span.SetTag(ext.MessagingKafkaPartition, partition)
- span.SetTag("offset", offset)
- span.Finish(tracer.WithError(err))
-}
-
// WriteMessages calls kafka-go.Writer.WriteMessages and traces the requests.
-func (w *Writer) WriteMessages(ctx context.Context, msgs ...kafka.Message) error {
+func (w *KafkaWriter) WriteMessages(ctx context.Context, msgs ...kafka.Message) error {
// although there's only one call made to the SyncProducer, the messages are
// treated individually, so we create a span for each one
spans := make([]*tracer.Span, len(msgs))
for i := range msgs {
- spans[i] = w.startSpan(ctx, &msgs[i])
- setProduceCheckpoint(w.cfg.dataStreamsEnabled, &msgs[i], w.Writer)
+ tMsg := wrapMessage(&msgs[i])
+ tWriter := wrapTracingWriter(w.Writer)
+ spans[i] = w.tracer.StartProduceSpan(ctx, tWriter, tMsg)
+ w.tracer.SetProduceDSMCheckpoint(tMsg, tWriter)
}
err := w.Writer.WriteMessages(ctx, msgs...)
for i, span := range spans {
- finishSpan(span, msgs[i].Partition, msgs[i].Offset, err)
+ w.tracer.FinishProduceSpan(span, msgs[i].Partition, msgs[i].Offset, err)
}
return err
}
-
-func setProduceCheckpoint(enabled bool, msg *kafka.Message, writer *kafka.Writer) {
- if !enabled || msg == nil {
- return
- }
-
- var topic string
- if writer.Topic != "" {
- topic = writer.Topic
- } else {
- topic = msg.Topic
- }
-
- edges := []string{"direction:out", "topic:" + topic, "type:kafka"}
- carrier := messageCarrier{msg}
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
- datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
- options.CheckpointParams{PayloadSize: getProducerMsgSize(msg)},
- edges...,
- )
- if !ok {
- return
- }
-
- // Headers will be dropped if the current protocol does not support them
- datastreams.InjectToBase64Carrier(ctx, carrier)
-}
-
-func getProducerMsgSize(msg *kafka.Message) (size int64) {
- for _, header := range msg.Headers {
- size += int64(len(header.Key) + len(header.Value))
- }
- if msg.Value != nil {
- size += int64(len(msg.Value))
- }
- if msg.Key != nil {
- size += int64(len(msg.Key))
- }
- return size
-}
-
-func getConsumerMsgSize(msg *kafka.Message) (size int64) {
- for _, header := range msg.Headers {
- size += int64(len(header.Key) + len(header.Value))
- }
- return size + int64(len(msg.Value)+len(msg.Key))
-}
diff --git a/contrib/segmentio/kafka-go/kafka_test.go b/contrib/segmentio/kafka-go/kafka_test.go
index 836ac02ef6..babe591672 100644
--- a/contrib/segmentio/kafka-go/kafka_test.go
+++ b/contrib/segmentio/kafka-go/kafka_test.go
@@ -7,7 +7,12 @@ package kafka
import (
"context"
+ "errors"
+ "fmt"
+ "log"
+ "net"
"os"
+ "strconv"
"testing"
"time"
@@ -27,44 +32,139 @@ const (
testReaderMaxWait = 10 * time.Millisecond
)
-func skipIntegrationTest(t *testing.T) {
- if _, ok := os.LookupEnv("INTEGRATION"); !ok {
- t.Skip("🚧 Skipping integration test (INTEGRATION environment variable is not set)")
+var (
+ // add some dummy values to broker/addr to test bootstrap servers.
+ kafkaBrokers = []string{"localhost:9092", "localhost:9093", "localhost:9094"}
+)
+
+func TestMain(m *testing.M) {
+ _, ok := os.LookupEnv("INTEGRATION")
+ if !ok {
+ log.Println("🚧 Skipping integration test (INTEGRATION environment variable is not set)")
+ os.Exit(0)
}
+ cleanup := createTopic()
+ exitCode := m.Run()
+ cleanup()
+ os.Exit(exitCode)
}
-/*
-to setup the integration test locally run:
- docker-compose -f local_testing.yaml up
-*/
+func testWriter() *kafka.Writer {
+ return &kafka.Writer{
+ Addr: kafka.TCP(kafkaBrokers...),
+ Topic: testTopic,
+ RequiredAcks: kafka.RequireOne,
+ Balancer: &kafka.LeastBytes{},
+ }
+}
-type readerOpFn func(t *testing.T, r *Reader)
+func testReader() *kafka.Reader {
+ return kafka.NewReader(kafka.ReaderConfig{
+ Brokers: kafkaBrokers,
+ GroupID: testGroupID,
+ Topic: testTopic,
+ MaxWait: testReaderMaxWait,
+ MaxBytes: 10e6, // 10MB
+ })
+}
+
+func createTopic() func() {
+ conn, err := kafka.Dial("tcp", "localhost:9092")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer conn.Close()
+
+ controller, err := conn.Controller()
+ if err != nil {
+ log.Fatal(err)
+ }
+ controllerConn, err := kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := controllerConn.DeleteTopics(testTopic); err != nil && !errors.Is(err, kafka.UnknownTopicOrPartition) {
+ log.Fatalf("failed to delete topic: %v", err)
+ }
+ topicConfigs := []kafka.TopicConfig{
+ {
+ Topic: testTopic,
+ NumPartitions: 1,
+ ReplicationFactor: 1,
+ },
+ }
+ if err := controllerConn.CreateTopics(topicConfigs...); err != nil {
+ log.Fatal(err)
+ }
+ if err := ensureTopicReady(); err != nil {
+ log.Fatal(err)
+ }
+ return func() {
+ if err := controllerConn.DeleteTopics(testTopic); err != nil {
+ log.Printf("failed to delete topic: %v", err)
+ }
+ if err := controllerConn.Close(); err != nil {
+ log.Printf("failed to close controller connection: %v", err)
+ }
+ }
+}
+
+func ensureTopicReady() error {
+ const (
+ maxRetries = 10
+ retryDelay = 100 * time.Millisecond
+ )
+ writer := testWriter()
+ defer writer.Close()
+ reader := testReader()
+ defer reader.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ var (
+ retryCount int
+ err error
+ )
+ for retryCount < maxRetries {
+ err = writer.WriteMessages(ctx, kafka.Message{Key: []byte("some-key"), Value: []byte("some-value")})
+ if err == nil {
+ break
+ }
+ // This error happens sometimes with brand-new topics, as there is a delay between when the topic is created
+ // on the broker, and when the topic can actually be written to.
+ if errors.Is(err, kafka.UnknownTopicOrPartition) {
+ retryCount++
+ log.Printf("topic not ready yet, retrying produce in %s (retryCount: %d)\n", retryDelay, retryCount)
+ time.Sleep(retryDelay)
+ }
+ }
+ if err != nil {
+ return fmt.Errorf("timeout waiting for topic to be ready: %w", err)
+ }
+ // read the message to ensure we don't pollute tests
+ _, err = reader.ReadMessage(ctx)
+ if err != nil {
+ return err
+ }
+ return nil
+}
-func genIntegrationTestSpans(t *testing.T, mt mocktracer.Tracer, writerOp func(t *testing.T, w *Writer), readerOp readerOpFn, writerOpts []Option, readerOpts []Option) ([]*mocktracer.Span, []kafka.Message) {
- skipIntegrationTest(t)
+type readerOpFn func(t *testing.T, r *Reader)
+func genIntegrationTestSpans(t *testing.T, mt mocktracer.Tracer, writerOp func(t *testing.T, w *KafkaWriter), readerOp readerOpFn, writerOpts []Option, readerOpts []Option) ([]*mocktracer.Span, []kafka.Message) {
writtenMessages := []kafka.Message{}
- // add some dummy values to broker/addr to test bootstrap servers.
- kw := &kafka.Writer{
- Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
- Topic: testTopic,
- RequiredAcks: kafka.RequireOne,
- Completion: func(messages []kafka.Message, err error) {
- writtenMessages = append(writtenMessages, messages...)
- },
+ kw := testWriter()
+ kw.Completion = func(messages []kafka.Message, err error) {
+ writtenMessages = append(writtenMessages, messages...)
}
w := WrapWriter(kw, writerOpts...)
writerOp(t, w)
err := w.Close()
require.NoError(t, err)
- r := NewReader(kafka.ReaderConfig{
- Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
- GroupID: testGroupID,
- Topic: testTopic,
- MaxWait: testReaderMaxWait,
- }, readerOpts...)
+ r := WrapReader(testReader(), readerOpts...)
readerOp(t, r)
err = r.Close()
require.NoError(t, err)
@@ -93,7 +193,7 @@ func TestReadMessageFunctional(t *testing.T) {
spans, writtenMessages := genIntegrationTestSpans(
t,
mt,
- func(t *testing.T, w *Writer) {
+ func(t *testing.T, w *KafkaWriter) {
err := w.WriteMessages(context.Background(), messagesToWrite...)
require.NoError(t, err, "Expected to write message to topic")
},
@@ -112,8 +212,8 @@ func TestReadMessageFunctional(t *testing.T) {
[]Option{WithDataStreams()},
)
- assert.Len(t, writtenMessages, len(messagesToWrite))
- assert.Len(t, readMessages, len(messagesToWrite))
+ require.Len(t, writtenMessages, len(messagesToWrite))
+ require.Len(t, readMessages, len(messagesToWrite))
// producer span
s0 := spans[0]
@@ -123,12 +223,12 @@ func TestReadMessageFunctional(t *testing.T) {
assert.Equal(t, 0.1, s0.Tag(ext.EventSampleRate))
assert.Equal(t, "queue", s0.Tag(ext.SpanType))
assert.Equal(t, float64(0), s0.Tag(ext.MessagingKafkaPartition))
- assert.Equal(t, "segmentio/kafka.go.v0", s0.Tag(ext.Component))
+ assert.Equal(t, "segmentio/kafka-go", s0.Tag(ext.Component))
assert.Equal(t, ext.SpanKindProducer, s0.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s0.Tag(ext.MessagingSystem))
assert.Equal(t, "localhost:9092,localhost:9093,localhost:9094", s0.Tag(ext.KafkaBootstrapServers))
- p, ok := datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&writtenMessages[0]}))
+ p, ok := datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), NewMessageCarrier(wrapMessage(&writtenMessages[0]))))
assert.True(t, ok)
expectedCtx, _ := tracer.SetDataStreamsCheckpoint(context.Background(), "direction:out", "topic:"+testTopic, "type:kafka")
expected, _ := datastreams.PathwayFromContext(expectedCtx)
@@ -143,15 +243,19 @@ func TestReadMessageFunctional(t *testing.T) {
assert.Equal(t, nil, s1.Tag(ext.EventSampleRate))
assert.Equal(t, "queue", s1.Tag(ext.SpanType))
assert.Equal(t, float64(0), s1.Tag(ext.MessagingKafkaPartition))
- assert.Equal(t, "segmentio/kafka.go.v0", s1.Tag(ext.Component))
+ assert.Equal(t, "segmentio/kafka-go", s1.Tag(ext.Component))
assert.Equal(t, ext.SpanKindConsumer, s1.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s1.Tag(ext.MessagingSystem))
assert.Equal(t, "localhost:9092,localhost:9093,localhost:9094", s1.Tag(ext.KafkaBootstrapServers))
- p, ok = datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&readMessages[0]}))
+ // context propagation
+ assert.Equal(t, s0.SpanID(), s1.ParentID(), "consume span should be child of the produce span")
+ assert.Equal(t, s0.TraceID(), s1.TraceID(), "spans should have the same trace id")
+
+ p, ok = datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), NewMessageCarrier(wrapMessage(&readMessages[0]))))
assert.True(t, ok)
expectedCtx, _ = tracer.SetDataStreamsCheckpoint(
- datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&writtenMessages[0]}),
+ datastreams.ExtractFromBase64Carrier(context.Background(), NewMessageCarrier(wrapMessage(&writtenMessages[0]))),
"direction:in", "topic:"+testTopic, "type:kafka", "group:"+testGroupID,
)
expected, _ = datastreams.PathwayFromContext(expectedCtx)
@@ -176,7 +280,7 @@ func TestFetchMessageFunctional(t *testing.T) {
spans, writtenMessages := genIntegrationTestSpans(
t,
mt,
- func(t *testing.T, w *Writer) {
+ func(t *testing.T, w *KafkaWriter) {
err := w.WriteMessages(context.Background(), messagesToWrite...)
require.NoError(t, err, "Expected to write message to topic")
},
@@ -203,12 +307,12 @@ func TestFetchMessageFunctional(t *testing.T) {
assert.Equal(t, 0.1, s0.Tag(ext.EventSampleRate))
assert.Equal(t, "queue", s0.Tag(ext.SpanType))
assert.Equal(t, float64(0), s0.Tag(ext.MessagingKafkaPartition))
- assert.Equal(t, "segmentio/kafka.go.v0", s0.Tag(ext.Component))
+ assert.Equal(t, "segmentio/kafka-go", s0.Tag(ext.Component))
assert.Equal(t, ext.SpanKindProducer, s0.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s0.Tag(ext.MessagingSystem))
assert.Equal(t, "localhost:9092,localhost:9093,localhost:9094", s0.Tag(ext.KafkaBootstrapServers))
- p, ok := datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&writtenMessages[0]}))
+ p, ok := datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), NewMessageCarrier(wrapMessage(&writtenMessages[0]))))
assert.True(t, ok)
expectedCtx, _ := tracer.SetDataStreamsCheckpoint(context.Background(), "direction:out", "topic:"+testTopic, "type:kafka")
expected, _ := datastreams.PathwayFromContext(expectedCtx)
@@ -223,15 +327,18 @@ func TestFetchMessageFunctional(t *testing.T) {
assert.Equal(t, nil, s1.Tag(ext.EventSampleRate))
assert.Equal(t, "queue", s1.Tag(ext.SpanType))
assert.Equal(t, float64(0), s1.Tag(ext.MessagingKafkaPartition))
- assert.Equal(t, "segmentio/kafka.go.v0", s1.Tag(ext.Component))
+ assert.Equal(t, "segmentio/kafka-go", s1.Tag(ext.Component))
assert.Equal(t, ext.SpanKindConsumer, s1.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s1.Tag(ext.MessagingSystem))
assert.Equal(t, "localhost:9092,localhost:9093,localhost:9094", s1.Tag(ext.KafkaBootstrapServers))
- p, ok = datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&readMessages[0]}))
+ // context propagation
+ assert.Equal(t, s0.SpanID(), s1.ParentID(), "consume span should be child of the produce span")
+
+ p, ok = datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), NewMessageCarrier(wrapMessage(&readMessages[0]))))
assert.True(t, ok)
expectedCtx, _ = tracer.SetDataStreamsCheckpoint(
- datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&writtenMessages[0]}),
+ datastreams.ExtractFromBase64Carrier(context.Background(), NewMessageCarrier(wrapMessage(&writtenMessages[0]))),
"direction:in", "topic:"+testTopic, "type:kafka", "group:"+testGroupID,
)
expected, _ = datastreams.PathwayFromContext(expectedCtx)
@@ -239,40 +346,106 @@ func TestFetchMessageFunctional(t *testing.T) {
assert.Equal(t, expected.GetHash(), p.GetHash())
}
-func BenchmarkReaderStartSpan(b *testing.B) {
- r := NewReader(kafka.ReaderConfig{
- Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
- GroupID: testGroupID,
- Topic: testTopic,
- MaxWait: testReaderMaxWait,
- })
+func TestProduceMultipleMessages(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ messages := []kafka.Message{
+ {
+ Key: []byte("key1"),
+ Value: []byte("value1"),
+ },
+ {
+ Key: []byte("key2"),
+ Value: []byte("value2"),
+ },
+ {
+ Key: []byte("key3"),
+ Value: []byte("value3"),
+ },
+ }
+
+ writer := WrapWriter(testWriter())
+ reader := WrapReader(testReader())
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err := writer.WriteMessages(ctx, messages...)
+ require.NoError(t, err)
+ require.NoError(t, writer.Close())
+
+ curMsg := 0
+ for curMsg < len(messages) {
+ readMsg, err := reader.ReadMessage(ctx)
+ require.NoError(t, err)
+ require.Equal(t, string(messages[curMsg].Key), string(readMsg.Key))
+ require.Equal(t, string(messages[curMsg].Value), string(readMsg.Value))
+ curMsg++
+ }
+ require.NoError(t, reader.Close())
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 6)
+
+ produceSpans := spans[0:3]
+ consumeSpans := spans[3:6]
+ for i := 0; i < 3; i++ {
+ ps := produceSpans[i]
+ cs := consumeSpans[i]
+
+ assert.Equal(t, "kafka.produce", ps.OperationName(), "wrong produce span name")
+ assert.Equal(t, "kafka.consume", cs.OperationName(), "wrong consume span name")
+ assert.Equal(t, cs.ParentID(), ps.SpanID(), "consume span should be child of a produce span")
+ assert.Equal(t, uint64(0), ps.ParentID(), "produce span should not be child of any span")
+ assert.Equal(t, cs.TraceID(), ps.TraceID(), "spans should be part of the same trace")
+ }
+}
+
+// benchSpan is a package-level variable used to prevent compiler optimisations in the benchmarks below.
+var benchSpan *tracer.Span
+
+func BenchmarkReaderStartSpan(b *testing.B) {
+ ctx := context.Background()
+ kafkaCfg := KafkaConfig{
+ BootstrapServers: "localhost:9092,localhost:9093,localhost:9094",
+ ConsumerGroupID: testGroupID,
+ }
+ tr := NewTracer(kafkaCfg)
msg := kafka.Message{
Key: []byte("key1"),
Value: []byte("value1"),
}
+ var result *tracer.Span
b.ResetTimer()
for n := 0; n < b.N; n++ {
- r.startSpan(nil, &msg)
+ result = tr.StartConsumeSpan(ctx, wrapMessage(&msg))
}
+ benchSpan = result
}
func BenchmarkWriterStartSpan(b *testing.B) {
+ ctx := context.Background()
+ kafkaCfg := KafkaConfig{
+ BootstrapServers: "localhost:9092,localhost:9093,localhost:9094",
+ ConsumerGroupID: testGroupID,
+ }
+ tr := NewTracer(kafkaCfg)
kw := &kafka.Writer{
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
Topic: testTopic,
RequiredAcks: kafka.RequireOne,
}
- w := WrapWriter(kw)
-
msg := kafka.Message{
Key: []byte("key1"),
Value: []byte("value1"),
}
+ var result *tracer.Span
b.ResetTimer()
for n := 0; n < b.N; n++ {
- w.startSpan(nil, &msg)
+ result = tr.StartProduceSpan(ctx, wrapTracingWriter(kw), wrapMessage(&msg))
}
+ benchSpan = result
}
diff --git a/contrib/segmentio/kafka-go/message_carrier.go b/contrib/segmentio/kafka-go/message_carrier.go
new file mode 100644
index 0000000000..45cdaeeeed
--- /dev/null
+++ b/contrib/segmentio/kafka-go/message_carrier.go
@@ -0,0 +1,52 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package kafka
+
+import (
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+)
+
+// A MessageCarrier implements TextMapReader/TextMapWriter for extracting/injecting traces on a kafka.Message
+type MessageCarrier struct {
+ msg Message
+}
+
+var _ interface {
+ tracer.TextMapReader
+ tracer.TextMapWriter
+} = (*MessageCarrier)(nil)
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c MessageCarrier) ForeachKey(handler func(key, val string) error) error {
+ for _, h := range c.msg.GetHeaders() {
+ err := handler(h.GetKey(), string(h.GetValue()))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Set implements TextMapWriter
+func (c MessageCarrier) Set(key, val string) {
+ headers := c.msg.GetHeaders()
+ // ensure uniqueness of keys
+ for i := 0; i < len(headers); i++ {
+ if headers[i].GetKey() == key {
+ headers = append(headers[:i], headers[i+1:]...)
+ i--
+ }
+ }
+ headers = append(headers, KafkaHeader{
+ Key: key,
+ Value: []byte(val),
+ })
+ c.msg.SetHeaders(headers)
+}
+
+func NewMessageCarrier(msg Message) MessageCarrier {
+ return MessageCarrier{msg: msg}
+}
diff --git a/contrib/segmentio/kafka-go/option_test.go b/contrib/segmentio/kafka-go/option_test.go
index 43147f6ee1..effbd7f0b4 100644
--- a/contrib/segmentio/kafka-go/option_test.go
+++ b/contrib/segmentio/kafka-go/option_test.go
@@ -49,7 +49,7 @@ func TestAnalyticsSettings(t *testing.T) {
t.Run("optionOverridesEnv", func(t *testing.T) {
t.Setenv("DD_DATA_STREAMS_ENABLED", "false")
cfg := newConfig()
- WithDataStreams()(cfg)
+ WithDataStreams().apply(cfg)
assert.True(t, cfg.dataStreamsEnabled)
})
}
diff --git a/contrib/segmentio/kafka-go/tracer.go b/contrib/segmentio/kafka-go/tracer.go
new file mode 100644
index 0000000000..6510d7963f
--- /dev/null
+++ b/contrib/segmentio/kafka-go/tracer.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package kafka
+
+type Tracer struct {
+ kafkaCfg KafkaConfig
+ cfg *config
+}
+
+func NewTracer(kafkaCfg KafkaConfig, opts ...Option) *Tracer {
+ tr := &Tracer{
+ kafkaCfg: kafkaCfg,
+ }
+ tr.cfg = newConfig(opts...)
+ return tr
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/option_test.go b/contrib/segmentio/kafka-go/tracer_test.go
similarity index 53%
rename from contrib/confluentinc/confluent-kafka-go/kafka/option_test.go
rename to contrib/segmentio/kafka-go/tracer_test.go
index 33d38b52b9..06d64c9333 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka/option_test.go
+++ b/contrib/segmentio/kafka-go/tracer_test.go
@@ -9,53 +9,47 @@ import (
"math"
"testing"
- "github.com/stretchr/testify/assert"
-
"github.com/DataDog/dd-trace-go/v2/instrumentation/testutils"
-)
-func TestDataStreamsActivation(t *testing.T) {
- t.Run("default", func(t *testing.T) {
- cfg := newConfig()
- assert.False(t, cfg.dataStreamsEnabled)
- })
- t.Run("withOption", func(t *testing.T) {
- cfg := newConfig(WithDataStreams())
- assert.True(t, cfg.dataStreamsEnabled)
- })
- t.Run("withEnv", func(t *testing.T) {
- t.Setenv("DD_DATA_STREAMS_ENABLED", "true")
- cfg := newConfig()
- assert.True(t, cfg.dataStreamsEnabled)
- })
- t.Run("optionOverridesEnv", func(t *testing.T) {
- t.Setenv("DD_DATA_STREAMS_ENABLED", "false")
- cfg := newConfig(WithDataStreams())
- assert.True(t, cfg.dataStreamsEnabled)
- })
-}
+ "github.com/stretchr/testify/assert"
+)
-func TestAnalyticsSettings(t *testing.T) {
+func TestTracerAnalyticsSettings(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
- cfg := newConfig()
- assert.True(t, math.IsNaN(cfg.analyticsRate))
+ tr := NewTracer(KafkaConfig{})
+ assert.True(t, math.IsNaN(tr.cfg.analyticsRate))
})
t.Run("global", func(t *testing.T) {
t.Skip("global flag disabled")
testutils.SetGlobalAnalyticsRate(t, 0.4)
- cfg := newConfig()
- assert.Equal(t, 0.4, cfg.analyticsRate)
+
+ tr := NewTracer(KafkaConfig{})
+ assert.Equal(t, 0.4, tr.cfg.analyticsRate)
})
t.Run("enabled", func(t *testing.T) {
- cfg := newConfig(WithAnalytics(true))
- assert.Equal(t, 1.0, cfg.analyticsRate)
+ tr := NewTracer(KafkaConfig{}, WithAnalytics(true))
+ assert.Equal(t, 1.0, tr.cfg.analyticsRate)
})
t.Run("override", func(t *testing.T) {
testutils.SetGlobalAnalyticsRate(t, 0.4)
- cfg := newConfig(WithAnalyticsRate(0.2))
- assert.Equal(t, 0.2, cfg.analyticsRate)
+
+ tr := NewTracer(KafkaConfig{}, WithAnalyticsRate(0.2))
+ assert.Equal(t, 0.2, tr.cfg.analyticsRate)
+ })
+
+ t.Run("withEnv", func(t *testing.T) {
+ t.Setenv("DD_DATA_STREAMS_ENABLED", "true")
+ tr := NewTracer(KafkaConfig{})
+ assert.True(t, tr.cfg.dataStreamsEnabled)
+ })
+
+ t.Run("optionOverridesEnv", func(t *testing.T) {
+ t.Setenv("DD_DATA_STREAMS_ENABLED", "false")
+ tr := NewTracer(KafkaConfig{})
+ WithDataStreams().apply(tr.cfg)
+ assert.True(t, tr.cfg.dataStreamsEnabled)
})
}
diff --git a/contrib/segmentio/kafka-go/tracing.go b/contrib/segmentio/kafka-go/tracing.go
new file mode 100644
index 0000000000..5cb014b0d4
--- /dev/null
+++ b/contrib/segmentio/kafka-go/tracing.go
@@ -0,0 +1,161 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package kafka
+
+import (
+ "context"
+ "math"
+
+ "github.com/segmentio/kafka-go"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+)
+
+const componentName = "segmentio/kafka-go"
+
+func (tr *Tracer) StartConsumeSpan(ctx context.Context, msg Message) *tracer.Span {
+ opts := []tracer.StartSpanOption{
+ tracer.ServiceName(tr.cfg.consumerServiceName),
+ tracer.ResourceName("Consume Topic " + msg.GetTopic()),
+ tracer.SpanType(ext.SpanTypeMessageConsumer),
+ tracer.Tag(ext.MessagingKafkaPartition, msg.GetPartition()),
+ tracer.Tag("offset", msg.GetOffset()),
+ tracer.Tag(ext.Component, componentName),
+ tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
+ tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
+ tracer.Measured(),
+ }
+ if tr.kafkaCfg.BootstrapServers != "" {
+ opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, tr.kafkaCfg.BootstrapServers))
+ }
+ if !math.IsNaN(tr.cfg.analyticsRate) {
+ opts = append(opts, tracer.Tag(ext.EventSampleRate, tr.cfg.analyticsRate))
+ }
+ // kafka supports headers, so try to extract a span context
+ carrier := NewMessageCarrier(msg)
+ if spanctx, err := tracer.Extract(carrier); err == nil {
+ opts = append(opts, tracer.ChildOf(spanctx))
+ }
+ span, _ := tracer.StartSpanFromContext(ctx, tr.cfg.consumerSpanName, opts...)
+ // reinject the span context so consumers can pick it up
+ if err := tracer.Inject(span.Context(), carrier); err != nil {
+ instr.Logger().Debug("contrib/segmentio/kafka-go: Failed to inject span context into carrier in reader, %v", err)
+ }
+ return span
+}
+
+func (tr *Tracer) StartProduceSpan(ctx context.Context, writer Writer, msg Message, spanOpts ...tracer.StartSpanOption) *tracer.Span {
+ opts := []tracer.StartSpanOption{
+ tracer.ServiceName(tr.cfg.producerServiceName),
+ tracer.SpanType(ext.SpanTypeMessageProducer),
+ tracer.Tag(ext.Component, componentName),
+ tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
+ tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
+ }
+ if tr.kafkaCfg.BootstrapServers != "" {
+ opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, tr.kafkaCfg.BootstrapServers))
+ }
+ if writer.GetTopic() != "" {
+ opts = append(opts, tracer.ResourceName("Produce Topic "+writer.GetTopic()))
+ } else {
+ opts = append(opts, tracer.ResourceName("Produce Topic "+msg.GetTopic()))
+ }
+ if !math.IsNaN(tr.cfg.analyticsRate) {
+ opts = append(opts, tracer.Tag(ext.EventSampleRate, tr.cfg.analyticsRate))
+ }
+ opts = append(opts, spanOpts...)
+ carrier := NewMessageCarrier(msg)
+ span, _ := tracer.StartSpanFromContext(ctx, tr.cfg.producerSpanName, opts...)
+ if err := tracer.Inject(span.Context(), carrier); err != nil {
+ instr.Logger().Debug("contrib/segmentio/kafka-go: Failed to inject span context into carrier in writer, %v", err)
+ }
+ return span
+}
+
+func (*Tracer) FinishProduceSpan(span *tracer.Span, partition int, offset int64, err error) {
+ span.SetTag(ext.MessagingKafkaPartition, partition)
+ span.SetTag("offset", offset)
+ span.Finish(tracer.WithError(err))
+}
+
+type wMessage struct {
+ *kafka.Message
+}
+
+func wrapMessage(msg *kafka.Message) Message {
+ if msg == nil {
+ return nil
+ }
+ return &wMessage{msg}
+}
+
+func (w *wMessage) GetValue() []byte {
+ return w.Value
+}
+
+func (w *wMessage) GetKey() []byte {
+ return w.Key
+}
+
+func (w *wMessage) GetHeaders() []Header {
+ hs := make([]Header, 0, len(w.Headers))
+ for _, h := range w.Headers {
+ hs = append(hs, wrapHeader(h))
+ }
+ return hs
+}
+
+func (w *wMessage) SetHeaders(headers []Header) {
+ hs := make([]kafka.Header, 0, len(headers))
+ for _, h := range headers {
+ hs = append(hs, kafka.Header{
+ Key: h.GetKey(),
+ Value: h.GetValue(),
+ })
+ }
+ w.Message.Headers = hs
+}
+
+func (w *wMessage) GetTopic() string {
+ return w.Topic
+}
+
+func (w *wMessage) GetPartition() int {
+ return w.Partition
+}
+
+func (w *wMessage) GetOffset() int64 {
+ return w.Offset
+}
+
+type wHeader struct {
+ kafka.Header
+}
+
+func wrapHeader(h kafka.Header) Header {
+ return &wHeader{h}
+}
+
+func (w wHeader) GetKey() string {
+ return w.Key
+}
+
+func (w wHeader) GetValue() []byte {
+ return w.Value
+}
+
+type wWriter struct {
+ *kafka.Writer
+}
+
+func (w *wWriter) GetTopic() string {
+ return w.Topic
+}
+
+func wrapTracingWriter(w *kafka.Writer) Writer {
+ return &wWriter{w}
+}
diff --git a/contrib/segmentio/kafka-go/types.go b/contrib/segmentio/kafka-go/types.go
new file mode 100644
index 0000000000..ef4a83e64a
--- /dev/null
+++ b/contrib/segmentio/kafka-go/types.go
@@ -0,0 +1,44 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package kafka
+
+type Header interface {
+ GetKey() string
+ GetValue() []byte
+}
+
+type KafkaHeader struct {
+ Key string
+ Value []byte
+}
+
+func (h KafkaHeader) GetKey() string {
+ return h.Key
+}
+
+func (h KafkaHeader) GetValue() []byte {
+ return h.Value
+}
+
+type Writer interface {
+ GetTopic() string
+}
+
+type Message interface {
+ GetValue() []byte
+ GetKey() []byte
+ GetHeaders() []Header
+ SetHeaders([]Header)
+ GetTopic() string
+ GetPartition() int
+ GetOffset() int64
+}
+
+// KafkaConfig holds information from the kafka config for span tags.
+type KafkaConfig struct {
+ BootstrapServers string
+ ConsumerGroupID string
+}
diff --git a/contrib/sirupsen/logrus/go.mod b/contrib/sirupsen/logrus/go.mod
index 3ffc4212b4..e5fa8a50a9 100644
--- a/contrib/sirupsen/logrus/go.mod
+++ b/contrib/sirupsen/logrus/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/sirupsen/logrus/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.9.0
)
diff --git a/contrib/syndtr/goleveldb/go.mod b/contrib/syndtr/goleveldb/go.mod
index 5fd1aebd6d..beb48fd520 100644
--- a/contrib/syndtr/goleveldb/go.mod
+++ b/contrib/syndtr/goleveldb/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/syndtr/goleveldb/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
)
diff --git a/contrib/tidwall/buntdb/go.mod b/contrib/tidwall/buntdb/go.mod
index 143b37dea5..296d8d8b3c 100644
--- a/contrib/tidwall/buntdb/go.mod
+++ b/contrib/tidwall/buntdb/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/tidwall/buntdb/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
github.com/tidwall/buntdb v1.3.0
)
diff --git a/contrib/twitchtv/twirp/go.mod b/contrib/twitchtv/twirp/go.mod
index 7b4a5aba36..18e7bd281e 100644
--- a/contrib/twitchtv/twirp/go.mod
+++ b/contrib/twitchtv/twirp/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/twitchtv/twirp/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
github.com/twitchtv/twirp v8.1.3+incompatible
)
diff --git a/contrib/uptrace/bun/go.mod b/contrib/uptrace/bun/go.mod
index 0fc03849ca..d466e48a2d 100644
--- a/contrib/uptrace/bun/go.mod
+++ b/contrib/uptrace/bun/go.mod
@@ -5,7 +5,7 @@ go 1.22.0
toolchain go1.23.1
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/go-sql-driver/mysql v1.8.1
github.com/lib/pq v1.10.9
github.com/microsoft/go-mssqldb v1.7.2
diff --git a/contrib/urfave/negroni/go.mod b/contrib/urfave/negroni/go.mod
index 8053196350..e90363931b 100644
--- a/contrib/urfave/negroni/go.mod
+++ b/contrib/urfave/negroni/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/contrib/urfave/negroni/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
github.com/urfave/negroni v1.0.0
)
diff --git a/contrib/valyala/fasthttp/go.mod b/contrib/valyala/fasthttp/go.mod
index a4aba2394d..eb0cf31ded 100644
--- a/contrib/valyala/fasthttp/go.mod
+++ b/contrib/valyala/fasthttp/go.mod
@@ -2,11 +2,9 @@ module github.com/DataDog/dd-trace-go/contrib/valyala/fasthttp/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/stretchr/testify v1.9.0
github.com/valyala/fasthttp v1.51.0
)
diff --git a/ddtrace/mocktracer/mockspan.go b/ddtrace/mocktracer/mockspan.go
index 2a1d99fcbd..5a6854ce77 100644
--- a/ddtrace/mocktracer/mockspan.go
+++ b/ddtrace/mocktracer/mockspan.go
@@ -61,13 +61,10 @@ func (s *Span) Tag(k string) interface{} {
return v
}
v, ok := s.m[k]
- if !ok {
- return nil
- }
- if v != nil {
+ if ok {
return v
}
- return v
+ return nil
}
func (s *Span) Tags() map[string]interface{} {
diff --git a/ddtrace/mocktracer/mocktracer.go b/ddtrace/mocktracer/mocktracer.go
index 07f0a37b49..295d386048 100644
--- a/ddtrace/mocktracer/mocktracer.go
+++ b/ddtrace/mocktracer/mocktracer.go
@@ -97,14 +97,6 @@ func (t *mocktracer) FinishSpan(s *tracer.Span) {
t.addFinishedSpan(s)
}
-// Stop deactivates the mock tracer and sets the active tracer to a no-op.
-// TODO(hannahkm): investigate this
-// func (t *mocktracer) Stop() {
-// internal.SetGlobalTracer(&internal.NoopTracer{})
-// internal.Testing = false
-// t.dsmProcessor.Stop()
-// }
-
// Stop deactivates the mock tracer and sets the active tracer to a no-op.
func (t *mocktracer) Stop() {
tracer.StopTestTracer()
diff --git a/ddtrace/mocktracer/mocktracer_test.go b/ddtrace/mocktracer/mocktracer_test.go
index 23a16d3311..38f98b10f8 100644
--- a/ddtrace/mocktracer/mocktracer_test.go
+++ b/ddtrace/mocktracer/mocktracer_test.go
@@ -195,6 +195,7 @@ func TestTracerInject(t *testing.T) {
t.Run("ok", func(t *testing.T) {
mt := newMockTracer()
+ defer mt.Stop()
assert := assert.New(t)
sp := mt.StartSpan("op", tracer.WithSpanID(2))
@@ -300,6 +301,7 @@ func TestTracerExtract(t *testing.T) {
assert := assert.New(t)
mt := newMockTracer()
+ defer mt.Stop()
sp := mt.StartSpan("op", tracer.WithSpanID(2))
sp.SetTag(ext.ManualDrop, true)
sp.SetBaggageItem("a", "B")
diff --git a/ddtrace/tracer/log.go b/ddtrace/tracer/log.go
index c82bfcb3e0..b43b8e055b 100644
--- a/ddtrace/tracer/log.go
+++ b/ddtrace/tracer/log.go
@@ -92,7 +92,18 @@ func logStartup(t *tracer) {
featureFlags = append(featureFlags, f)
}
- cp, _ := t.config.propagator.(*chainedPropagator)
+ var injectorNames, extractorNames string
+ switch v := t.config.propagator.(type) {
+ case *chainedPropagator:
+ injectorNames = v.injectorNames
+ extractorNames = v.extractorsNames
+ case nil:
+ injectorNames = ""
+ extractorNames = ""
+ default:
+ injectorNames = "custom"
+ extractorNames = "custom"
+ }
info := startupInfo{
Date: time.Now().Format(time.RFC3339),
@@ -128,12 +139,10 @@ func logStartup(t *tracer) {
Orchestrion: t.config.orchestrionCfg,
FeatureFlags: featureFlags,
}
- // v1 shim sets a wrapped propagator, thus yielding a nil value here when
- // is casted to a chainedPropagator value.
- if cp != nil {
- info.PropagationStyleInject = cp.injectorNames
- info.PropagationStyleExtract = cp.extractorsNames
- }
+
+ info.PropagationStyleInject = injectorNames
+ info.PropagationStyleExtract = extractorNames
+
if _, _, err := samplingRulesFromEnv(); err != nil {
info.SamplingRulesError = fmt.Sprintf("%s", err)
}
diff --git a/ddtrace/tracer/log_test.go b/ddtrace/tracer/log_test.go
index be5f63b992..44e04a2c88 100644
--- a/ddtrace/tracer/log_test.go
+++ b/ddtrace/tracer/log_test.go
@@ -188,3 +188,77 @@ func TestLogFormat(t *testing.T) {
assert.Len(tp.Logs(), 1)
assert.Regexp(logPrefixRegexp+` DEBUG: Started Span: dd.trace_id="12345" dd.span_id="12345" dd.parent_id="0", Operation: test, Resource: /, Tags: map.*, map.*`, tp.Logs()[0])
}
+
+func TestLogPropagators(t *testing.T) {
+ t.Run("default", func(t *testing.T) {
+ assert := assert.New(t)
+ substring := `"propagation_style_inject":"datadog,tracecontext","propagation_style_extract":"datadog,tracecontext"`
+ log := setup(t, nil)
+ assert.Regexp(substring, log)
+ })
+ t.Run("datadog,tracecontext", func(t *testing.T) {
+ assert := assert.New(t)
+ t.Setenv("DD_TRACE_PROPAGATION_STYLE", "datadog,tracecontext")
+ substring := `"propagation_style_inject":"datadog,tracecontext","propagation_style_extract":"datadog,tracecontext"`
+ log := setup(t, nil)
+ assert.Regexp(substring, log)
+ })
+ t.Run("b3multi", func(t *testing.T) {
+ assert := assert.New(t)
+ t.Setenv("DD_TRACE_PROPAGATION_STYLE", "b3multi")
+ substring := `"propagation_style_inject":"b3multi","propagation_style_extract":"b3multi"`
+ log := setup(t, nil)
+ assert.Regexp(substring, log)
+ })
+ t.Run("none", func(t *testing.T) {
+ assert := assert.New(t)
+ t.Setenv("DD_TRACE_PROPAGATION_STYLE", "none")
+ substring := `"propagation_style_inject":"","propagation_style_extract":""`
+ log := setup(t, nil)
+ assert.Regexp(substring, log)
+ })
+ t.Run("different-injector-extractor", func(t *testing.T) {
+ assert := assert.New(t)
+ t.Setenv("DD_TRACE_PROPAGATION_STYLE_INJECT", "b3multi")
+ t.Setenv("DD_TRACE_PROPAGATION_STYLE_EXTRACT", "tracecontext")
+ substring := `"propagation_style_inject":"b3multi","propagation_style_extract":"tracecontext"`
+ log := setup(t, nil)
+ assert.Regexp(substring, log)
+ })
+ t.Run("custom-propagator", func(t *testing.T) {
+ assert := assert.New(t)
+ substring := `"propagation_style_inject":"custom","propagation_style_extract":"custom"`
+ p := &prop{}
+ log := setup(t, p)
+ assert.Regexp(substring, log)
+ })
+}
+
+type prop struct{}
+
+func (p *prop) Inject(context *SpanContext, carrier interface{}) (e error) {
+ return
+}
+func (p *prop) Extract(carrier interface{}) (sctx *SpanContext, e error) {
+ return
+}
+
+func setup(t *testing.T, customProp Propagator) string {
+ tp := new(log.RecordLogger)
+ var tracer *tracer
+ var stop func()
+ var err error
+ if customProp != nil {
+ tracer, _, _, stop, err = startTestTracer(t, WithLogger(tp), WithPropagator(customProp))
+ assert.NoError(t, err)
+ } else {
+ tracer, _, _, stop, err = startTestTracer(t, WithLogger(tp))
+ assert.NoError(t, err)
+ }
+ defer stop()
+ tp.Reset()
+ tp.Ignore("appsec: ", telemetry.LogPrefix)
+ logStartup(tracer)
+ require.Len(t, tp.Logs(), 2)
+ return tp.Logs()[1]
+}
diff --git a/ddtrace/tracer/option.go b/ddtrace/tracer/option.go
index f7a6059ec3..814a089c86 100644
--- a/ddtrace/tracer/option.go
+++ b/ddtrace/tracer/option.go
@@ -1365,6 +1365,10 @@ func setHeaderTags(headerAsTags []string) bool {
globalconfig.ClearHeaderTags()
for _, h := range headerAsTags {
header, tag := normalizer.HeaderTag(h)
+ if len(header) == 0 || len(tag) == 0 {
+ log.Debug("Header-tag input is in unsupported format; dropping input value %v", h)
+ continue
+ }
globalconfig.SetHeaderTag(header, tag)
}
return true
diff --git a/ddtrace/tracer/option_test.go b/ddtrace/tracer/option_test.go
index 86860c6457..1f3689f4e0 100644
--- a/ddtrace/tracer/option_test.go
+++ b/ddtrace/tracer/option_test.go
@@ -350,7 +350,7 @@ func TestIntegrationEnabled(t *testing.T) {
t.Fatal(err)
}
err = filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
- if filepath.Base(path) != "go.mod" {
+ if filepath.Base(path) != "go.mod" || strings.Contains(path, "/internal") {
return nil
}
rErr := testIntegrationEnabled(t, filepath.Dir(path))
@@ -392,14 +392,14 @@ func testIntegrationEnabled(t *testing.T, contribPath string) error {
packages = append(packages, out)
}
for _, pkg := range packages {
- if strings.Contains(pkg.ImportPath, "/test") || strings.Contains(pkg.ImportPath, "/internal") {
+ if strings.Contains(pkg.ImportPath, "/test") {
continue
}
- if !hasInstrumentationImport(pkg) {
- return fmt.Errorf(`package %q is expected use instrumentation telemetry. For more info see https://github.com/DataDog/dd-trace-go/blob/main/contrib/README.md#instrumentation-telemetry`, pkg.ImportPath)
+ if hasInstrumentationImport(pkg) {
+ return nil
}
}
- return nil
+ return fmt.Errorf(`package %q is expected use instrumentation telemetry. For more info see https://github.com/DataDog/dd-trace-go/blob/main/contrib/README.md#instrumentation-telemetry`, contribPath)
}
func hasInstrumentationImport(p contribPkg) bool {
@@ -1566,6 +1566,28 @@ func TestWithHeaderTags(t *testing.T) {
assert.Equal(ext.HTTPRequestHeaders+".2_h_e_a_d_e_r", globalconfig.HeaderTag("2.h.e.a.d.e.r"))
})
+ t.Run("envvar-invalid", func(t *testing.T) {
+ defer globalconfig.ClearHeaderTags()
+ t.Setenv("DD_TRACE_HEADER_TAGS", "header1:")
+
+ assert := assert.New(t)
+ newConfig()
+
+ assert.Equal(0, globalconfig.HeaderTagsLen())
+ })
+
+ t.Run("envvar-partially-invalid", func(t *testing.T) {
+ defer globalconfig.ClearHeaderTags()
+ t.Setenv("DD_TRACE_HEADER_TAGS", "header1,header2:")
+
+ assert := assert.New(t)
+ newConfig()
+
+ assert.Equal(1, globalconfig.HeaderTagsLen())
+ fmt.Println(globalconfig.HeaderTagMap())
+ assert.Equal(ext.HTTPRequestHeaders+".header1", globalconfig.HeaderTag("Header1"))
+ })
+
t.Run("env-override", func(t *testing.T) {
defer globalconfig.ClearHeaderTags()
assert := assert.New(t)
diff --git a/ddtrace/tracer/span.go b/ddtrace/tracer/span.go
index d5ad54ce97..52cc830009 100644
--- a/ddtrace/tracer/span.go
+++ b/ddtrace/tracer/span.go
@@ -212,6 +212,10 @@ func (s *Span) SetTag(key string, value interface{}) {
return
}
+ if v, ok := value.([]byte); ok {
+ s.setMeta(key, string(v))
+ }
+
if value != nil {
// Arrays will be translated to dot notation. e.g.
// {"myarr.0": "foo", "myarr.1": "bar"}
@@ -552,6 +556,9 @@ func (s *Span) Finish(opts ...FinishOption) {
if !cfg.FinishTime.IsZero() {
t = cfg.FinishTime.UnixNano()
}
+ if cfg.NoDebugStack {
+ delete(s.meta, ext.ErrorStack)
+ }
if cfg.Error != nil {
s.Lock()
s.setTagError(cfg.Error, errorConfig{
diff --git a/docker-compose.yaml b/docker-compose.yaml
index 03242034e5..a246ee8423 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -125,6 +125,7 @@ services:
image: memcached:1.5.9
ports:
- "11211:11211"
+<<<<<<< HEAD
zookeeper:
image: confluentinc/cp-zookeeper:7.7.0
environment:
@@ -145,6 +146,26 @@ services:
KAFKA_GROUP_ID: "gotest"
depends_on:
- zookeeper
+=======
+ kafka:
+ image: confluentinc/confluent-local:7.5.0
+ environment:
+ KAFKA_LISTENERS: "PLAINTEXT://0.0.0.0:9093,BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9094"
+ KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://localhost:9093,BROKER://localhost:9092"
+ KAFKA_REST_BOOTSTRAP_SERVERS: "PLAINTEXT://0.0.0.0:9093,BROKER://0.0.0.0:9092"
+ KAFKA_CONTROLLER_QUORUM_VOTERS: "1@localhost:9094"
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "BROKER:PLAINTEXT,PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT"
+ KAFKA_INTER_BROKER_LISTENER_NAME: "BROKER"
+ KAFKA_BROKER_ID: "1"
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1"
+ KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1"
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: "1"
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: "1"
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: "0"
+ KAFKA_NODE_ID: "1"
+ KAFKA_PROCESS_ROLES: "broker,controller"
+ KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
+>>>>>>> v1.69.1-rc.4
ports:
- "9092:9092"
localstack:
diff --git a/go.mod b/go.mod
index 666bfdcca0..f3d80fb90a 100644
--- a/go.mod
+++ b/go.mod
@@ -10,6 +10,11 @@ require (
github.com/DataDog/go-libddwaf/v3 v3.4.0
github.com/DataDog/gostackparse v0.7.0
github.com/DataDog/sketches-go v1.4.5
+ github.com/aws/aws-sdk-go-v2 v1.32.2
+ github.com/aws/aws-sdk-go-v2/service/eventbridge v1.35.2
+ github.com/aws/aws-sdk-go-v2/service/sns v1.33.2
+ github.com/aws/aws-sdk-go-v2/service/sqs v1.36.2
+ github.com/aws/smithy-go v1.22.0
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b
github.com/google/uuid v1.5.0
@@ -37,6 +42,9 @@ require (
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/andybalholm/brotli v1.0.6 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
@@ -45,7 +53,6 @@ require (
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
- github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/klauspost/compress v1.17.1 // indirect
@@ -67,3 +74,5 @@ require (
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
+
+replace github.com/DataDog/dd-trace-go/v2/internal/setup-smoke-test => ./internal/setup-smoke-test
diff --git a/go.sum b/go.sum
index 7e827e58a2..dfdfc1feae 100644
--- a/go.sum
+++ b/go.sum
@@ -20,6 +20,22 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5
github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI=
+github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 h1:7edmS3VOBDhK00b/MwGtGglCm7hhwNYnjJs/PgFdMQE=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21/go.mod h1:Q9o5h4HoIWG8XfzxqiuK/CGUbepCJ8uTlaE3bAbxytQ=
+github.com/aws/aws-sdk-go-v2/service/eventbridge v1.35.2 h1:FGrUiKglp0u7Zs19serLM/i22+IiwGxLCOJm4OtOMBI=
+github.com/aws/aws-sdk-go-v2/service/eventbridge v1.35.2/go.mod h1:OtWNmq2QGr/BUeJfs7ASAlzg0qjt96Su401dCdOks14=
+github.com/aws/aws-sdk-go-v2/service/sns v1.33.2 h1:GeVRrB1aJsGdXxdPY6VOv0SWs+pfdeDlKgiBxi0+V6I=
+github.com/aws/aws-sdk-go-v2/service/sns v1.33.2/go.mod h1:c6Sj8zleZXYs4nyU3gpDKTzPWu7+t30YUXoLYRpbUvU=
+github.com/aws/aws-sdk-go-v2/service/sqs v1.36.2 h1:kmbcoWgbzfh5a6rvfjOnfHSGEqD13qu1GfTPRZqg0FI=
+github.com/aws/aws-sdk-go-v2/service/sqs v1.36.2/go.mod h1:/UPx74a3M0WYeT2yLQYG/qHhkPlPXd6TsppfGgy2COk=
+github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
+github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
@@ -60,8 +76,6 @@ github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBB
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
@@ -71,8 +85,6 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
-github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
-github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g=
github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@@ -84,15 +96,11 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
-github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
-github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0=
github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 h1:jYi87L8j62qkXzaYHAQAhEapgukhenIMZRBKTNRLHJ4=
@@ -103,8 +111,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
-github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY=
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
@@ -215,23 +221,3 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
-lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
-modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
-modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
-modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0=
-modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI=
-modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw=
-modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
-modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
-modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
-modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
-modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
-modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
-modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
-modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ=
-modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
-modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
-modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
-modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
-modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
diff --git a/go.work b/go.work
index 64bf501111..90543e099f 100644
--- a/go.work
+++ b/go.work
@@ -1,6 +1,6 @@
-go 1.23.0 // Go version must match the highest supported version, not the minimum supported version, with format X.Y.
+go 1.22.0 // Go version must match the lowest supported version, not the highest, with format X.Y.
-toolchain go1.23.1
+toolchain go1.22
use (
.
@@ -61,6 +61,7 @@ use (
./internal/apps
./internal/contrib/validationtest
./internal/exectracetest
+ ./internal/setup-smoke-test
./internal/traceprof/traceproftest
./tools/fixmodules
./tools/v2check
diff --git a/go.work.sum b/go.work.sum
index 44af9a81be..c8d87fc2b4 100644
--- a/go.work.sum
+++ b/go.work.sum
@@ -939,6 +939,7 @@ github.com/aws/aws-sdk-go-v2 v1.17.8/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3eP
github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2 v1.20.3/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M=
github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
+github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM=
github.com/aws/aws-sdk-go-v2/config v1.18.21/go.mod h1:+jPQiVPz1diRnjj6VGqWcLK6EzNmQ42l7J3OqGTLsSY=
@@ -950,9 +951,11 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24L
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.32/go.mod h1:RudqOgadTWdcS3t/erPQo24pcVEoYyqj/kKW5Vya21I=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.40/go.mod h1:5kKmFhLeOVy6pwPDpDNA6/hK/d6URC98pqDDqHgdBx4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.26/go.mod h1:vq86l7956VgFr0/FWQ2BWnK07QC3WYsepKzy33qqY5U=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.34/go.mod h1:RZP0scceAyhMIQ9JvFp7HvkpcgqjL4l/4C+7RAeGbuM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.33/go.mod h1:zG2FcwjQarWaqXSCGpgcr3RSjZ6dHGguZSppUL0XR7Q=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.24/go.mod h1:+fFaIjycTmpV6hjmPTbyU9Kp5MI/lA+bbibcAtmlhYA=
@@ -986,6 +989,7 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2M
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
+github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
diff --git a/instrumentation/appsec/emitter/httpsec/http.go b/instrumentation/appsec/emitter/httpsec/http.go
index 956fdca815..f342391f82 100644
--- a/instrumentation/appsec/emitter/httpsec/http.go
+++ b/instrumentation/appsec/emitter/httpsec/http.go
@@ -108,61 +108,83 @@ func makeCookies(parsed []*http.Cookie) map[string][]string {
return cookies
}
-// WrapHandler wraps the given HTTP handler with the abstract HTTP operation defined by HandlerOperationArgs and
-// HandlerOperationRes.
-// The onBlock params are used to cleanup the context when needed.
-// It is a specific patch meant for Gin, for which we must abort the
-// context since it uses a queue of handlers and it's the only way to make
-// sure other queued handlers don't get executed.
-// TODO: this patch must be removed/improved when we rework our actions/operations system
-func WrapHandler(handler http.Handler, span trace.TagSetter, pathParams map[string]string, opts *Config) http.Handler {
+// BeforeHandle contains the appsec functionality that should be executed before a http.Handler runs.
+// It returns the modified http.ResponseWriter and http.Request, an additional afterHandle function
+// that should be executed after the Handler runs, and a handled bool that instructs if the request has been handled
+// or not - in case it was handled, the original handler should not run.
+func BeforeHandle(
+ w http.ResponseWriter,
+ r *http.Request,
+ span trace.TagSetter,
+ pathParams map[string]string,
+ opts *Config,
+) (http.ResponseWriter, *http.Request, func(), bool) {
if opts == nil {
opts = defaultWrapHandlerConfig
} else if opts.ResponseHeaderCopier == nil {
opts.ResponseHeaderCopier = defaultWrapHandlerConfig.ResponseHeaderCopier
}
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- op, blockAtomic, ctx := StartOperation(r.Context(), HandlerOperationArgs{
- Method: r.Method,
- RequestURI: r.RequestURI,
- Host: r.Host,
- RemoteAddr: r.RemoteAddr,
- Headers: r.Header,
- Cookies: makeCookies(r.Cookies()),
- QueryParams: r.URL.Query(),
- PathParams: pathParams,
- })
- r = r.WithContext(ctx)
-
- defer func() {
- var statusCode int
- if res, ok := w.(interface{ Status() int }); ok {
- statusCode = res.Status()
+ op, blockAtomic, ctx := StartOperation(r.Context(), HandlerOperationArgs{
+ Method: r.Method,
+ RequestURI: r.RequestURI,
+ Host: r.Host,
+ RemoteAddr: r.RemoteAddr,
+ Headers: r.Header,
+ Cookies: makeCookies(r.Cookies()),
+ QueryParams: r.URL.Query(),
+ PathParams: pathParams,
+ })
+ tr := r.WithContext(ctx)
+
+ afterHandle := func() {
+ var statusCode int
+ if res, ok := w.(interface{ Status() int }); ok {
+ statusCode = res.Status()
+ }
+ op.Finish(HandlerOperationRes{
+ Headers: opts.ResponseHeaderCopier(w),
+ StatusCode: statusCode,
+ }, &span)
+
+ // Execute the onBlock functions to make sure blocking works properly
+ // in case we are instrumenting the Gin framework
+ if blockPtr := blockAtomic.Load(); blockPtr != nil {
+ for _, f := range opts.OnBlock {
+ f()
}
- op.Finish(HandlerOperationRes{
- Headers: opts.ResponseHeaderCopier(w),
- StatusCode: statusCode,
- }, &span)
-
- // Execute the onBlock functions to make sure blocking works properly
- // in case we are instrumenting the Gin framework
- if blockPtr := blockAtomic.Load(); blockPtr != nil {
- for _, f := range opts.OnBlock {
- f()
- }
-
- if blockPtr.Handler != nil {
- blockPtr.Handler.ServeHTTP(w, r)
- }
+
+ if blockPtr.Handler != nil {
+ blockPtr.Handler.ServeHTTP(w, tr)
}
- }()
+ }
+ }
+
+ handled := false
+ if blockPtr := blockAtomic.Load(); blockPtr != nil && blockPtr.Handler != nil {
+ // handler is replaced
+ blockPtr.Handler.ServeHTTP(w, tr)
+ blockPtr.Handler = nil
+ handled = true
+ }
+ return w, tr, afterHandle, handled
+}
- if blockPtr := blockAtomic.Load(); blockPtr != nil && blockPtr.Handler != nil {
- handler = blockPtr.Handler
- blockPtr.Handler = nil
+// WrapHandler wraps the given HTTP handler with the abstract HTTP operation defined by HandlerOperationArgs and
+// HandlerOperationRes.
+// The onBlock params are used to cleanup the context when needed.
+// It is a specific patch meant for Gin, for which we must abort the
+// context since it uses a queue of handlers and it's the only way to make
+// sure other queued handlers don't get executed.
+// TODO: this patch must be removed/improved when we rework our actions/operations system
+func WrapHandler(handler http.Handler, span trace.TagSetter, pathParams map[string]string, opts *Config) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ tw, tr, afterHandle, handled := BeforeHandle(w, r, span, pathParams, opts)
+ defer afterHandle()
+ if handled {
+ return
}
- handler.ServeHTTP(w, r)
+ handler.ServeHTTP(tw, tr)
})
}
diff --git a/instrumentation/httptrace/before_handle.go b/instrumentation/httptrace/before_handle.go
new file mode 100644
index 0000000000..c4b91e3e7e
--- /dev/null
+++ b/instrumentation/httptrace/before_handle.go
@@ -0,0 +1,79 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package httptrace
+
+import (
+ "net/http"
+
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+ "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec"
+ "github.com/DataDog/dd-trace-go/v2/internal/appsec"
+)
+
+// ServeConfig specifies the tracing configuration when using TraceAndServe.
+type ServeConfig struct {
+ // Service specifies the service name to use. If left blank, the global service name
+ // will be inherited.
+ Service string
+ // Resource optionally specifies the resource name for this request.
+ Resource string
+ // QueryParams should be true in order to append the URL query values to the "http.url" tag.
+ QueryParams bool
+ // Route is the request matched route if any, or is empty otherwise
+ Route string
+ // RouteParams specifies framework-specific route parameters (e.g. for route /user/:id coming
+ // in as /user/123 we'll have {"id": "123"}). This field is optional and is used for monitoring
+ // by AppSec. It is only taken into account when AppSec is enabled.
+ RouteParams map[string]string
+ // FinishOpts specifies any options to be used when finishing the request span.
+ FinishOpts []tracer.FinishOption
+ // SpanOpts specifies any options to be applied to the request starting span.
+ SpanOpts []tracer.StartSpanOption
+}
+
+// BeforeHandle contains functionality that should be executed before a http.Handler runs.
+// It returns the "traced" http.ResponseWriter and http.Request, an additional afterHandle function
+// that should be executed after the Handler runs, and a handled bool that instructs if the request has been handled
+// or not - in case it was handled, the original handler should not run.
+func BeforeHandle(cfg *ServeConfig, w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request, func(), bool) {
+ if cfg == nil {
+ cfg = new(ServeConfig)
+ }
+ opts := make([]tracer.StartSpanOption, len(cfg.SpanOpts))
+ copy(opts, cfg.SpanOpts)
+ if cfg.Service != "" {
+ opts = append(opts, tracer.ServiceName(cfg.Service))
+ }
+ if cfg.Resource != "" {
+ opts = append(opts, tracer.ResourceName(cfg.Resource))
+ }
+ if cfg.Route != "" {
+ opts = append(opts, tracer.Tag(ext.HTTPRoute, cfg.Route))
+ }
+ // Pre-append span.kind and component tags to the options so that they can be overridden.
+ opts = append([]tracer.StartSpanOption{tracer.Tag(ext.SpanKind, ext.SpanKindServer), tracer.Tag(ext.Component, "net/http")}, opts...)
+ span, ctx := StartRequestSpan(r, opts...)
+ rw, ddrw := wrapResponseWriter(w)
+ rt := r.WithContext(ctx)
+
+ closeSpan := func() {
+ FinishRequestSpan(span, ddrw.status, cfg.FinishOpts...)
+ }
+ afterHandle := closeSpan
+ handled := false
+ if appsec.Enabled() {
+ secW, secReq, secAfterHandle, secHandled := httpsec.BeforeHandle(rw, rt, span, cfg.RouteParams, nil)
+ afterHandle = func() {
+ secAfterHandle()
+ closeSpan()
+ }
+ rw = secW
+ rt = secReq
+ handled = secHandled
+ }
+ return rw, rt, afterHandle, handled
+}
diff --git a/instrumentation/httptrace/httptrace.go b/instrumentation/httptrace/httptrace.go
index 5770e64671..c12d18d841 100644
--- a/instrumentation/httptrace/httptrace.go
+++ b/instrumentation/httptrace/httptrace.go
@@ -17,7 +17,6 @@ import (
"github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
"github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/instrumentation"
- "github.com/DataDog/dd-trace-go/v2/internal"
"github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec"
)
@@ -144,10 +143,3 @@ func HeaderTagsFromRequest(req *http.Request, headerTags instrumentation.HeaderT
}
}
}
-
-// This is a workaround needed because of v2 changes that prevents contribs from accessing
-// the internal directory. This function should not be used if the internal directory
-// can be accessed.
-func GetBoolEnv(key string, def bool) bool {
- return internal.BoolEnv(key, def)
-}
diff --git a/instrumentation/httptrace/httptrace_test.go b/instrumentation/httptrace/httptrace_test.go
index 15c01a1e16..c144391eff 100644
--- a/instrumentation/httptrace/httptrace_test.go
+++ b/instrumentation/httptrace/httptrace_test.go
@@ -106,13 +106,13 @@ func TestConfiguredErrorStatuses(t *testing.T) {
switch status {
case 0:
assert.Equal(t, "200", spans[i].Tag(ext.HTTPCode))
- assert.Nil(t, spans[i].Tag(ext.Error))
+ assert.Nil(t, spans[i].Tag(ext.ErrorMsg))
case 200, 400:
assert.Equal(t, strconv.Itoa(status), spans[i].Tag(ext.HTTPCode))
- assert.Equal(t, fmt.Errorf("%s: %s", strconv.Itoa(status), http.StatusText(status)), spans[i].Tag(ext.Error).(error))
+ assert.Equal(t, fmt.Sprintf("%s: %s", strconv.Itoa(status), http.StatusText(status)), spans[i].Tag(ext.ErrorMsg))
case 500:
assert.Equal(t, strconv.Itoa(status), spans[i].Tag(ext.HTTPCode))
- assert.Nil(t, spans[i].Tag(ext.Error))
+ assert.Nil(t, spans[i].Tag(ext.ErrorMsg))
}
}
})
@@ -133,7 +133,7 @@ func TestConfiguredErrorStatuses(t *testing.T) {
spans := mt.FinishedSpans()
require.Len(t, spans, 1)
assert.Equal(t, "0", spans[0].Tag(ext.HTTPCode))
- assert.Equal(t, fmt.Errorf("0: %s", http.StatusText(0)), spans[0].Tag(ext.Error).(error))
+ assert.Equal(t, fmt.Sprintf("0: %s", http.StatusText(0)), spans[0].Tag(ext.ErrorMsg))
})
}
diff --git a/instrumentation/httptrace/make_responsewriter.go b/instrumentation/httptrace/make_responsewriter.go
new file mode 100644
index 0000000000..cb4e2c345e
--- /dev/null
+++ b/instrumentation/httptrace/make_responsewriter.go
@@ -0,0 +1,88 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+//go:build ignore
+// +build ignore
+
+// This program generates wrapper implementations of http.ResponseWriter that
+// also satisfy http.Flusher, http.Pusher, http.CloseNotifier and http.Hijacker,
+// based on whether or not the passed in http.ResponseWriter also satisfies
+// them.
+
+package main
+
+import (
+ "os"
+ "text/template"
+
+ "github.com/DataDog/dd-trace-go/v2/contrib/internal/lists"
+)
+
+func main() {
+ interfaces := []string{"Flusher", "Pusher", "CloseNotifier", "Hijacker"}
+ var combos [][][]string
+ for pick := len(interfaces); pick > 0; pick-- {
+ combos = append(combos, lists.Combinations(interfaces, pick))
+ }
+ template.Must(template.New("").Parse(tpl)).Execute(os.Stdout, map[string]interface{}{
+ "Interfaces": interfaces,
+ "Combinations": combos,
+ })
+}
+
+var tpl = `// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+// Code generated by make_responsewriter.go DO NOT EDIT
+
+package httptrace
+
+import "net/http"
+
+
+// wrapResponseWriter wraps an underlying http.ResponseWriter so that it can
+// trace the http response codes. It also checks for various http interfaces
+// (Flusher, Pusher, CloseNotifier, Hijacker) and if the underlying
+// http.ResponseWriter implements them it generates an unnamed struct with the
+// appropriate fields.
+//
+// This code is generated because we have to account for all the permutations
+// of the interfaces.
+//
+// In case of any new interfaces or methods we didn't consider here, we also
+// implement the rwUnwrapper interface, which is used internally by
+// the standard library: https://github.com/golang/go/blob/6d89b38ed86e0bfa0ddaba08dc4071e6bb300eea/src/net/http/responsecontroller.go#L42-L44
+func wrapResponseWriter(w http.ResponseWriter) (http.ResponseWriter, *responseWriter) {
+{{- range .Interfaces }}
+ h{{.}}, ok{{.}} := w.(http.{{.}})
+{{- end }}
+
+ mw := newResponseWriter(w)
+ type monitoredResponseWriter interface {
+ http.ResponseWriter
+ Status() int
+ Unwrap() http.ResponseWriter
+ }
+ switch {
+{{- range .Combinations }}
+ {{- range . }}
+ case {{ range $i, $v := . }}{{ if gt $i 0 }} && {{ end }}ok{{ $v }}{{ end }}:
+ w = struct {
+ monitoredResponseWriter
+ {{- range . }}
+ http.{{.}}
+ {{- end }}
+ }{mw{{ range . }}, h{{.}}{{ end }}}
+ {{- end }}
+{{- end }}
+ default:
+ w = mw
+ }
+
+ return w, mw
+}
+`
diff --git a/instrumentation/httptrace/response_writer.go b/instrumentation/httptrace/response_writer.go
new file mode 100644
index 0000000000..2bbc31bad7
--- /dev/null
+++ b/instrumentation/httptrace/response_writer.go
@@ -0,0 +1,51 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package httptrace
+
+//go:generate sh -c "go run make_responsewriter.go | gofmt > trace_gen.go"
+
+import "net/http"
+
+// responseWriter is a small wrapper around an http response writer that will
+// intercept and store the status of a request.
+type responseWriter struct {
+ http.ResponseWriter
+ status int
+}
+
+func newResponseWriter(w http.ResponseWriter) *responseWriter {
+ return &responseWriter{w, 0}
+}
+
+// Status returns the status code that was monitored.
+func (w *responseWriter) Status() int {
+ return w.status
+}
+
+// Write writes the data to the connection as part of an HTTP reply.
+// We explicitly call WriteHeader with the 200 status code
+// in order to get it reported into the span.
+func (w *responseWriter) Write(b []byte) (int, error) {
+ if w.status == 0 {
+ w.WriteHeader(http.StatusOK)
+ }
+ return w.ResponseWriter.Write(b)
+}
+
+// WriteHeader sends an HTTP response header with status code.
+// It also sets the status code to the span.
+func (w *responseWriter) WriteHeader(status int) {
+ if w.status != 0 {
+ return
+ }
+ w.ResponseWriter.WriteHeader(status)
+ w.status = status
+}
+
+// Unwrap returns the underlying wrapped http.ResponseWriter.
+func (w *responseWriter) Unwrap() http.ResponseWriter {
+ return w.ResponseWriter
+}
diff --git a/instrumentation/httptrace/response_writer_test.go b/instrumentation/httptrace/response_writer_test.go
new file mode 100644
index 0000000000..78d5ffc6e2
--- /dev/null
+++ b/instrumentation/httptrace/response_writer_test.go
@@ -0,0 +1,36 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package httptrace
+
+import (
+ "net/http"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_wrapResponseWriter(t *testing.T) {
+ // there doesn't appear to be an easy way to test http.Pusher support via an http request
+ // so we'll just confirm wrapResponseWriter preserves it
+ t.Run("Pusher", func(t *testing.T) {
+ var i struct {
+ http.ResponseWriter
+ http.Pusher
+ }
+ var w http.ResponseWriter = i
+ _, ok := w.(http.ResponseWriter)
+ assert.True(t, ok)
+ _, ok = w.(http.Pusher)
+ assert.True(t, ok)
+
+ w, _ = wrapResponseWriter(w)
+ _, ok = w.(http.ResponseWriter)
+ assert.True(t, ok)
+ _, ok = w.(http.Pusher)
+ assert.True(t, ok)
+ })
+
+}
diff --git a/contrib/net/http/trace_gen.go b/instrumentation/httptrace/trace_gen.go
similarity index 99%
rename from contrib/net/http/trace_gen.go
rename to instrumentation/httptrace/trace_gen.go
index db04144454..24e261838e 100644
--- a/contrib/net/http/trace_gen.go
+++ b/instrumentation/httptrace/trace_gen.go
@@ -5,7 +5,7 @@
// Code generated by make_responsewriter.go DO NOT EDIT
-package http
+package httptrace
import "net/http"
diff --git a/instrumentation/internal/namingschematest/go.mod b/instrumentation/internal/namingschematest/go.mod
index 8af6faa401..4b759f7b78 100644
--- a/instrumentation/internal/namingschematest/go.mod
+++ b/instrumentation/internal/namingschematest/go.mod
@@ -2,60 +2,58 @@ module github.com/DataDog/dd-trace-go/instrumentation/internal/namingschematest/
go 1.22.0
-toolchain go1.23.1
-
require (
cloud.google.com/go/pubsub v1.41.0
github.com/99designs/gqlgen v0.17.49
- github.com/DataDog/dd-trace-go/contrib/99designs/gqlgen/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/aws/aws-sdk-go-v2/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/aws/aws-sdk-go/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/bradfitz/gomemcache/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/cloud.google.com/go/pubsub.v1/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/confluentinc/confluent-kafka-go/kafka.v2/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/confluentinc/confluent-kafka-go/kafka/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/database/sql/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/dimfeld/httptreemux.v5/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/elastic/go-elasticsearch.v6/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/emicklei/go-restful.v3/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/gin-gonic/gin/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/globalsign/mgo/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/go-chi/chi.v5/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/go-pg/pg.v10/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/go-redis/redis.v7/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/go-redis/redis.v8/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/go-redis/redis/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/go.mongodb.org/mongo-driver/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/gocql/gocql/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/gofiber/fiber.v2/v2 v2.0.0-20241021170900-f2acfa7aff40
+ github.com/DataDog/dd-trace-go/contrib/99designs/gqlgen/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/aws/aws-sdk-go-v2/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/aws/aws-sdk-go/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/bradfitz/gomemcache/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/cloud.google.com/go/pubsub.v1/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/confluentinc/confluent-kafka-go/kafka.v2/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/confluentinc/confluent-kafka-go/kafka/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/database/sql/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/dimfeld/httptreemux.v5/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/elastic/go-elasticsearch.v6/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/emicklei/go-restful.v3/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/gin-gonic/gin/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/globalsign/mgo/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/go-chi/chi.v5/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/go-chi/chi/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/go-pg/pg.v10/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/go-redis/redis.v7/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/go-redis/redis.v8/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/go-redis/redis/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/go.mongodb.org/mongo-driver/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/gocql/gocql/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/gofiber/fiber.v2/v2 v2.0.0-20241024132757-00c300faaace
github.com/DataDog/dd-trace-go/contrib/gomodule/redigo/v2 v2.0.0-20240827110213-c6fc4fe2047a
- github.com/DataDog/dd-trace-go/contrib/google.golang.org/grpc/v2 v2.0.0-beta.2.0.20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/graph-gophers/graphql-go/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/hashicorp/consul/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/hashicorp/vault/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/julienschmidt/httprouter/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/labstack/echo.v4/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/olivere/elastic.v5/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/redis/go-redis.v9/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/segmentio/kafka-go/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/syndtr/goleveldb/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/tidwall/buntdb/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/twitchtv/twirp/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/urfave/negroni/v2 v2.0.0-20241021170900-f2acfa7aff40
+ github.com/DataDog/dd-trace-go/contrib/google.golang.org/grpc/v2 v2.0.0-beta.2.0.20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/graph-gophers/graphql-go/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/hashicorp/consul/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/hashicorp/vault/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/julienschmidt/httprouter/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/labstack/echo.v4/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/olivere/elastic.v5/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/redis/go-redis.v9/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/segmentio/kafka-go/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/syndtr/goleveldb/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/tidwall/buntdb/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/twitchtv/twirp/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/urfave/negroni/v2 v2.0.0-20241024132757-00c300faaace
github.com/DataDog/dd-trace-go/instrumentation/testutils/grpc/v2 v2.0.0-20240827110213-c6fc4fe2047a
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/IBM/sarama v1.40.0
github.com/Shopify/sarama v1.38.1
github.com/aws/aws-sdk-go v1.54.20
- github.com/aws/aws-sdk-go-v2 v1.30.3
+ github.com/aws/aws-sdk-go-v2 v1.32.2
github.com/aws/aws-sdk-go-v2/config v1.27.27
github.com/aws/aws-sdk-go-v2/service/ec2 v1.171.0
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2
- github.com/aws/aws-sdk-go-v2/service/sns v1.31.3
- github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3
+ github.com/aws/aws-sdk-go-v2/service/sns v1.33.2
+ github.com/aws/aws-sdk-go-v2/service/sqs v1.36.2
github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874
github.com/confluentinc/confluent-kafka-go v1.9.2
github.com/confluentinc/confluent-kafka-go/v2 v2.5.0
@@ -151,8 +149,8 @@ require (
github.com/DataDog/datadog-agent/pkg/obfuscate v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.1 // indirect
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
- github.com/DataDog/dd-trace-go/contrib/IBM/sarama/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/Shopify/sarama/v2 v2.0.0-20241021170900-f2acfa7aff40
+ github.com/DataDog/dd-trace-go/contrib/IBM/sarama/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/Shopify/sarama/v2 v2.0.0-20241024132757-00c300faaace
github.com/DataDog/go-sqllexer v0.0.16 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.6 // indirect
@@ -162,12 +160,12 @@ require (
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.27 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
- github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 // indirect
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/eventbridge v1.35.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16 // indirect
@@ -178,7 +176,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect
- github.com/aws/smithy-go v1.20.3 // indirect
+ github.com/aws/smithy-go v1.22.0 // indirect
github.com/bytedance/sonic v1.11.6 // indirect
github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
diff --git a/instrumentation/internal/namingschematest/go.sum b/instrumentation/internal/namingschematest/go.sum
index 672cda1768..924f5dfffe 100644
--- a/instrumentation/internal/namingschematest/go.sum
+++ b/instrumentation/internal/namingschematest/go.sum
@@ -91,8 +91,8 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI
github.com/aws/aws-sdk-go v1.29.11/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
github.com/aws/aws-sdk-go v1.54.20 h1:FZ2UcXya7bUkvkpf7TaPmiL7EubK0go1nlXGLRwEsoo=
github.com/aws/aws-sdk-go v1.54.20/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
-github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY=
-github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
+github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI=
+github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM=
github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90=
@@ -103,20 +103,20 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8 h1:u1KOU1S15ufyZqmH/rA3POkiRH6EcDANHj2xHRzq+zc=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8/go.mod h1:WPv2FRnkIOoDv/8j2gSUsI4qDc7392w5anFB/I89GZ8=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 h1:7edmS3VOBDhK00b/MwGtGglCm7hhwNYnjJs/PgFdMQE=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21/go.mod h1:Q9o5h4HoIWG8XfzxqiuK/CGUbepCJ8uTlaE3bAbxytQ=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3 h1:nEhZKd1JQ4EB1tekcqW1oIVpDC1ZFrjrp/cLC5MXjFQ=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3/go.mod h1:q9vzW3Xr1KEXa8n4waHiFt1PrppNDlMymlYP+xpsFbY=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.171.0 h1:r398oizT1O8AdQGpnxOMOIstEAAb3PPW5QZsL8w4Ujc=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.171.0/go.mod h1:9KdiRVKTZyPRTlbX3i41FxTV+5OatZ7xOJCN4lleX7g=
-github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 h1:pjZzcXU25gsD2WmlmlayEsyXIWMVOK3//x4BXvK9c0U=
-github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3/go.mod h1:4ew4HelByABYyBE+8iU8Rzrp5PdBic5yd9nFMhbnwE8=
+github.com/aws/aws-sdk-go-v2/service/eventbridge v1.35.2 h1:FGrUiKglp0u7Zs19serLM/i22+IiwGxLCOJm4OtOMBI=
+github.com/aws/aws-sdk-go-v2/service/eventbridge v1.35.2/go.mod h1:OtWNmq2QGr/BUeJfs7ASAlzg0qjt96Su401dCdOks14=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE=
@@ -133,18 +133,18 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALw
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE=
github.com/aws/aws-sdk-go-v2/service/sfn v1.29.3 h1:7BK+k08c5r1oqqHeb6ye0affEQQJ/fimBTGZSjmpjwk=
github.com/aws/aws-sdk-go-v2/service/sfn v1.29.3/go.mod h1:+mtHHxsylrf+kjxcbvfnu6jtyTT8Fa9BlqjQk5XJZ80=
-github.com/aws/aws-sdk-go-v2/service/sns v1.31.3 h1:eSTEdxkfle2G98FE+Xl3db/XAXXVTJPNQo9K/Ar8oAI=
-github.com/aws/aws-sdk-go-v2/service/sns v1.31.3/go.mod h1:1dn0delSO3J69THuty5iwP0US2Glt0mx2qBBlI13pvw=
-github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 h1:Vjqy5BZCOIsn4Pj8xzyqgGmsSqzz7y/WXbN3RgOoVrc=
-github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3/go.mod h1:L0enV3GCRd5iG9B64W35C4/hwsCB00Ib+DKVGTadKHI=
+github.com/aws/aws-sdk-go-v2/service/sns v1.33.2 h1:GeVRrB1aJsGdXxdPY6VOv0SWs+pfdeDlKgiBxi0+V6I=
+github.com/aws/aws-sdk-go-v2/service/sns v1.33.2/go.mod h1:c6Sj8zleZXYs4nyU3gpDKTzPWu7+t30YUXoLYRpbUvU=
+github.com/aws/aws-sdk-go-v2/service/sqs v1.36.2 h1:kmbcoWgbzfh5a6rvfjOnfHSGEqD13qu1GfTPRZqg0FI=
+github.com/aws/aws-sdk-go-v2/service/sqs v1.36.2/go.mod h1:/UPx74a3M0WYeT2yLQYG/qHhkPlPXd6TsppfGgy2COk=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ=
-github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE=
-github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
+github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
+github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
diff --git a/instrumentation/internal/namingschematest/segmentio_kafkago.go b/instrumentation/internal/namingschematest/segmentio_kafkago.go
index 60fb01f0a5..61df865d12 100644
--- a/instrumentation/internal/namingschematest/segmentio_kafkago.go
+++ b/instrumentation/internal/namingschematest/segmentio_kafkago.go
@@ -27,7 +27,7 @@ const (
type readerOpFn func(t *testing.T, r *segmentiotracer.Reader)
-func genIntegrationTestSpans(t *testing.T, mt mocktracer.Tracer, writerOp func(t *testing.T, w *segmentiotracer.Writer), readerOp readerOpFn, writerOpts []segmentiotracer.Option, readerOpts []segmentiotracer.Option) ([]*mocktracer.Span, []kafka.Message) {
+func genIntegrationTestSpans(t *testing.T, mt mocktracer.Tracer, writerOp func(t *testing.T, w *segmentiotracer.KafkaWriter), readerOp readerOpFn, writerOpts []segmentiotracer.Option, readerOpts []segmentiotracer.Option) ([]*mocktracer.Span, []kafka.Message) {
writtenMessages := []kafka.Message{}
// add some dummy values to broker/addr to test bootstrap servers.
@@ -81,7 +81,7 @@ func segmentioKafkaGoGenSpans() harness.GenSpansFn {
spans, _ := genIntegrationTestSpans(
t,
mt,
- func(t *testing.T, w *segmentiotracer.Writer) {
+ func(t *testing.T, w *segmentiotracer.KafkaWriter) {
err := w.WriteMessages(context.Background(), messagesToWrite...)
require.NoError(t, err, "Expected to write message to topic")
},
diff --git a/instrumentation/options/options.go b/instrumentation/options/options.go
index 4be38a67df..ab2cb8c296 100644
--- a/instrumentation/options/options.go
+++ b/instrumentation/options/options.go
@@ -5,6 +5,8 @@
package options
+import "github.com/DataDog/dd-trace-go/v2/internal"
+
// Copy should be used any time existing options are copied into
// a new locally scoped set of options. This is to avoid data races and
// accidental side effects.
@@ -29,3 +31,10 @@ func Expand[T any](opts []T, initialPosition, trailCapacity int) []T {
copy(dup[initialPosition:], opts)
return dup
}
+
+// This is a workaround needed because of v2 changes that prevents contribs from accessing
+// the internal directory. This function should not be used if the internal directory
+// can be
+func GetBoolEnv(key string, def bool) bool {
+ return internal.BoolEnv(key, def)
+}
diff --git a/instrumentation/packages.go b/instrumentation/packages.go
index e42270b723..a1702ff31e 100644
--- a/instrumentation/packages.go
+++ b/instrumentation/packages.go
@@ -52,7 +52,7 @@ const (
PackageSyndtrGoLevelDB Package = "syndtr/goleveldb/leveldb"
PackageSirupsenLogrus Package = "sirupsen/logrus"
PackageShopifySarama Package = "Shopify/sarama"
- PackageSegmentioKafkaGo Package = "segmentio/kafka.go.v0"
+ PackageSegmentioKafkaGo Package = "segmentio/kafka-go"
PackageRedisGoRedisV9 Package = "redis/go-redis.v9"
PackageOlivereElasticV5 Package = "olivere/elastic"
PackageMiekgDNS Package = "miekg/dns"
diff --git a/instrumentation/testutils/grpc/go.mod b/instrumentation/testutils/grpc/go.mod
index 0b085a9ce0..b8c0285270 100644
--- a/instrumentation/testutils/grpc/go.mod
+++ b/instrumentation/testutils/grpc/go.mod
@@ -2,10 +2,8 @@ module github.com/DataDog/dd-trace-go/instrumentation/testutils/grpc/v2
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.2
)
diff --git a/internal/apps/gc-overhead/main.go b/internal/apps/gc-overhead/main.go
index ec50264773..23eab1a1c4 100644
--- a/internal/apps/gc-overhead/main.go
+++ b/internal/apps/gc-overhead/main.go
@@ -25,7 +25,7 @@ import (
"time"
httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2"
- "github.com/DataDog/dd-trace-go/internal/apps"
+ "github.com/DataDog/dd-trace-go/internal/apps/v2"
)
func main() {
diff --git a/internal/apps/go.mod b/internal/apps/go.mod
index dd7fa25ae7..e05e79c5d6 100644
--- a/internal/apps/go.mod
+++ b/internal/apps/go.mod
@@ -1,10 +1,10 @@
-module github.com/DataDog/dd-trace-go/internal/apps
+module github.com/DataDog/dd-trace-go/internal/apps/v2
-go 1.23.0
+go 1.22.0
require (
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
golang.org/x/sync v0.7.0
)
diff --git a/internal/apps/memory-leak/main.go b/internal/apps/memory-leak/main.go
index 703d8d5e81..4b8b997b1d 100644
--- a/internal/apps/memory-leak/main.go
+++ b/internal/apps/memory-leak/main.go
@@ -13,7 +13,7 @@ import (
"net/http"
"sync"
- "github.com/DataDog/dd-trace-go/internal/apps"
+ "github.com/DataDog/dd-trace-go/internal/apps/v2"
httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2"
)
diff --git a/internal/apps/unit-of-work/main.go b/internal/apps/unit-of-work/main.go
index 9318af235b..e9b474f46e 100644
--- a/internal/apps/unit-of-work/main.go
+++ b/internal/apps/unit-of-work/main.go
@@ -11,7 +11,7 @@ import (
"net/http"
"os"
- "github.com/DataDog/dd-trace-go/internal/apps"
+ "github.com/DataDog/dd-trace-go/internal/apps/v2"
httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2"
)
diff --git a/internal/apps/worker-pool-bottleneck/main.go b/internal/apps/worker-pool-bottleneck/main.go
index c802af9e51..93e9c074cd 100644
--- a/internal/apps/worker-pool-bottleneck/main.go
+++ b/internal/apps/worker-pool-bottleneck/main.go
@@ -29,7 +29,7 @@ import (
"time"
httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2"
- "github.com/DataDog/dd-trace-go/internal/apps"
+ "github.com/DataDog/dd-trace-go/internal/apps/v2"
)
func main() {
diff --git a/internal/appsec/README.md b/internal/appsec/README.md
index dc65d03551..cc60323ea6 100644
--- a/internal/appsec/README.md
+++ b/internal/appsec/README.md
@@ -19,7 +19,7 @@ API entrypoint present in `dd-trace-go/contrib` that support appsec is a call to
```mermaid
flowchart LR
-UserCode[User Code] --> Instrumentation --> IG{Instrumentation Gateway} -----> Listener
+UserCode[User Code] --> Instrumentation --> IG{Instrumentation
Gateway} --> Listener
```
Dyngo is a context-scoped event listener system that provide a way to listen dynamically to events that are happening in
@@ -28,7 +28,9 @@ the customer code and to react to configuration changes and hot-swap event liste
```mermaid
flowchart LR
-UserCode[User Code] --> appsec/emitter --> IG{dyngo} -----> appsec/listener
+UserCode[contrib] --> appsec/emitter --> IG{dyngo} --> appsec/listener --> WAF
+appsec/remoteconfig -->|config change| IG
+appsec/config -->|config change| IG
```
### Operation definition requirements
@@ -73,12 +75,12 @@ func StartExampleOperation(ctx context.Context, args ExampleOperationArgs) *Exam
}
op := &ExampleOperation{
Operation: dyngo.NewOperation(parent),
- }
+ }
return dyngo.StartOperation(op, args)
}
func (op *ExampleOperation) Finish(result ExampleOperationResult) {
- dyngo.FinishOperation(op, result)
+ dyngo.FinishOperation(op, result)
}
```
@@ -143,5 +145,68 @@ flowchart TD
> [!IMPORTANT]
> Please note that this is how the operation SHOULD be stacked. If the user code does not have a Top Level Operation
-> then nothing will be monitored. In this case an error log should be produced to explain thouroughly the issue to
+> then nothing will be monitored. In this case an error log should be produced to explain thoroughly the issue to
> the user.
+
+### Features
+
+Features represent an abstract feature added to the tracer by AppSec. They are the bridge between the configuration and
+its sources
+and the actual code that needs to be ran in case of enablement or disablement of a feature. Features are divided in two
+parts:
+
+- The builder that should be a pure function that takes the configuration and returns a feature object.
+- The listeners that are methods of the feature object that are called when an event from the Instrumentation Gateway is
+ triggered.
+
+From there, at each configuration change from any config source, the AppSec module will rebuild the feature objects,
+register the listeners to the Instrumentation Gateway, and hot-swap the root level operation with the new one,
+consequently making the whole AppSec code atomic.
+
+Here is an example of how a system with only two features, GRPC and HTTP WAF Protection, would look like:
+
+```mermaid
+flowchart TD
+
+ subgraph HTTP Feature
+ HTTPListener
+ HTTPBuilder
+ end
+
+ subgraph GRPC Feature
+ GRPCBuilder
+ GRPCListener
+ end
+
+ subgraph Configuration
+ RemoteConfig
+ EnvConfig
+ ...
+ end
+
+ Configuration -->|config change| AppSec
+
+ AppSec -->|rebuild| HTTPBuilder
+ AppSec -->|rebuild| GRPCBuilder
+ HTTPBuilder -->|register HTTP Listener| IG
+ GRPCBuilder -->|register GRPC Listener| IG
+
+
+
+ IG{Instrumentation
Gateway} -->|Start httpsec.HandlerOperation| HTTPListener
+ IG{Instrumentation
Gateway} -->|Start grpcsec.HandlerOperation| GRPCListener
+```
+
+All currently available features are the following ones:
+
+| Feature Name | Description |
+|------------------------|--------------------------------------------------------|
+| HTTP WAF Protection | Protects HTTP requests from attacks |
+| GRPC WAF Protection | Protects GRPC requests from attacks |
+| GraphQL WAF Protection | Protects GraphQL requests from attacks |
+| SQL RASP | Runtime Application Self-Protection for SQL injections |
+| OS RASP | Runtime Application Self-Protection for LFI attacks |
+| HTTP RASP | Runtime Application Self-Protection for SSRF attacks |
+| User Security | User blocking and login failures/success events |
+| WAF Context | Setup of the request scoped context system of the WAF |
+| Tracing | Bridge between the tracer and AppSec features |
diff --git a/internal/civisibility/integrations/gotesting/instrumentation_orchestrion.go b/internal/civisibility/integrations/gotesting/instrumentation_orchestrion.go
index 2b3345549b..cd715f812f 100644
--- a/internal/civisibility/integrations/gotesting/instrumentation_orchestrion.go
+++ b/internal/civisibility/integrations/gotesting/instrumentation_orchestrion.go
@@ -14,6 +14,7 @@ import (
"sync/atomic"
"testing"
"time"
+ _ "unsafe"
"github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
"github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants"
@@ -29,6 +30,8 @@ import (
// ******************************************************************************************************************
// instrumentTestingM helper function to instrument internalTests and internalBenchmarks in a `*testing.M` instance.
+//
+//go:linkname instrumentTestingM
func instrumentTestingM(m *testing.M) func(exitCode int) {
// Check if CI Visibility was disabled using the kill switch before trying to initialize it
atomic.StoreInt32(&ciVisibilityEnabledValue, -1)
@@ -72,6 +75,8 @@ func instrumentTestingM(m *testing.M) func(exitCode int) {
}
// instrumentTestingTFunc helper function to instrument a testing function func(*testing.T)
+//
+//go:linkname instrumentTestingTFunc
func instrumentTestingTFunc(f func(*testing.T)) func(*testing.T) {
// Check if CI Visibility was disabled using the kill switch before instrumenting
if !isCiVisibilityEnabled() {
@@ -188,6 +193,8 @@ func instrumentTestingTFunc(f func(*testing.T)) func(*testing.T) {
}
// instrumentSetErrorInfo helper function to set an error in the `*testing.T, *testing.B, *testing.common` CI Visibility span
+//
+//go:linkname instrumentSetErrorInfo
func instrumentSetErrorInfo(tb testing.TB, errType string, errMessage string, skip int) {
// Check if CI Visibility was disabled using the kill switch before
if !isCiVisibilityEnabled() {
@@ -202,6 +209,8 @@ func instrumentSetErrorInfo(tb testing.TB, errType string, errMessage string, sk
}
// instrumentCloseAndSkip helper function to close and skip with a reason a `*testing.T, *testing.B, *testing.common` CI Visibility span
+//
+//go:linkname instrumentCloseAndSkip
func instrumentCloseAndSkip(tb testing.TB, skipReason string) {
// Check if CI Visibility was disabled using the kill switch before
if !isCiVisibilityEnabled() {
@@ -216,6 +225,8 @@ func instrumentCloseAndSkip(tb testing.TB, skipReason string) {
}
// instrumentSkipNow helper function to close and skip a `*testing.T, *testing.B, *testing.common` CI Visibility span
+//
+//go:linkname instrumentSkipNow
func instrumentSkipNow(tb testing.TB) {
// Check if CI Visibility was disabled using the kill switch before
if !isCiVisibilityEnabled() {
@@ -230,6 +241,8 @@ func instrumentSkipNow(tb testing.TB) {
}
// instrumentTestingBFunc helper function to instrument a benchmark function func(*testing.B)
+//
+//go:linkname instrumentTestingBFunc
func instrumentTestingBFunc(pb *testing.B, name string, f func(*testing.B)) (string, func(*testing.B)) {
// Check if CI Visibility was disabled using the kill switch before instrumenting
if !isCiVisibilityEnabled() {
diff --git a/internal/civisibility/integrations/gotesting/testcontroller_test.go b/internal/civisibility/integrations/gotesting/testcontroller_test.go
index 15222d47b1..8cb9d04fb9 100644
--- a/internal/civisibility/integrations/gotesting/testcontroller_test.go
+++ b/internal/civisibility/integrations/gotesting/testcontroller_test.go
@@ -103,7 +103,6 @@ func runFlakyTestRetriesTests(m *testing.M) {
// 1 TestRetryAlwaysFail + 10 retry tests from testing_test.go
// 1 TestNormalPassingAfterRetryAlwaysFail
// 1 TestEarlyFlakeDetection
- // 2 normal spans from testing_test.go
// check spans by resource name
checkSpansByResourceName(finishedSpans, "github.com/DataDog/dd-trace-go/v2/internal/civisibility/integrations/gotesting", 1)
@@ -438,11 +437,6 @@ func setUpHttpServer(flakyRetriesEnabled bool, earlyFlakyDetectionEnabled bool,
fmt.Printf("MockApi sending response: %v\n", response)
json.NewEncoder(w).Encode(&response)
- } else if r.URL.Path == "/api/v2/git/repository/search_commits" {
- w.Header().Set("Content-Type", "application/json")
- w.Write([]byte("{}"))
- } else if r.URL.Path == "/api/v2/git/repository/packfile" {
- w.WriteHeader(http.StatusAccepted)
} else {
http.NotFound(w, r)
}
diff --git a/internal/civisibility/integrations/gotesting/testing_test.go b/internal/civisibility/integrations/gotesting/testing_test.go
index 97d5dfb31c..8d0481fc16 100644
--- a/internal/civisibility/integrations/gotesting/testing_test.go
+++ b/internal/civisibility/integrations/gotesting/testing_test.go
@@ -11,12 +11,12 @@ import (
"net/http/httptest"
"runtime"
"slices"
+ "strconv"
"testing"
- ddhttp "github.com/DataDog/dd-trace-go/contrib/net/http/v2"
"github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
"github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer"
- ddtracer "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
"github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants"
"github.com/stretchr/testify/assert"
@@ -76,6 +76,60 @@ func Test_Foo(gt *testing.T) {
}
}
+// Code inspired by contrib/net/http/roundtripper.go
+// It's not possible to import `contrib/net/http` package because it causes a circular dependency.
+// This is a simplified version of the code.
+type roundTripper struct {
+ base http.RoundTripper
+ namer func(*http.Request) string
+}
+
+func (rt *roundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ resourceName := rt.namer(req)
+ // Make a copy of the URL so we don't modify the outgoing request
+ url := *req.URL
+ url.User = nil // Do not include userinfo in the HTTPURL tag.
+ opts := []tracer.StartSpanOption{
+ tracer.SpanType(ext.SpanTypeHTTP),
+ tracer.ResourceName(resourceName),
+ tracer.Tag(ext.HTTPMethod, req.Method),
+ tracer.Tag(ext.HTTPURL, url.String()),
+ tracer.Tag(ext.SpanKind, ext.SpanKindClient),
+ tracer.Tag(ext.NetworkDestinationName, url.Hostname()),
+ }
+ span, ctx := tracer.StartSpanFromContext(req.Context(), "", opts...)
+ defer func() {
+ span.Finish()
+ }()
+ r2 := req.Clone(ctx)
+ res, err = rt.base.RoundTrip(r2)
+ if err != nil {
+ span.SetTag("http.errors", err.Error())
+ } else {
+ span.SetTag(ext.HTTPCode, strconv.Itoa(res.StatusCode))
+ // treat 5XX as errors
+ if res.StatusCode/100 == 5 {
+ span.SetTag("http.errors", res.Status)
+ span.SetTag(ext.Error, fmt.Errorf("%d: %s", res.StatusCode, http.StatusText(res.StatusCode)))
+ }
+ }
+ return res, err
+}
+
+// Code from contrib/net/http/roundtripper.go
+// It's not possible to import `contrib/net/http` package because it causes a circular dependency.
+func wrapRoundTripper(rt http.RoundTripper, namer func(*http.Request) string) http.RoundTripper {
+ if namer == nil {
+ namer = func(req *http.Request) string {
+ return ""
+ }
+ }
+ return &roundTripper{
+ base: rt,
+ namer: namer,
+ }
+}
+
// TestWithExternalCalls demonstrates testing with external HTTP calls.
func TestWithExternalCalls(gt *testing.T) {
assertTest(gt)
@@ -94,7 +148,7 @@ func TestWithExternalCalls(gt *testing.T) {
ctx := (*T)(t).Context()
// Wrap the default HTTP transport for tracing
- rt := ddhttp.WrapRoundTripper(http.DefaultTransport)
+ rt := wrapRoundTripper(http.DefaultTransport, nil)
client := &http.Client{
Transport: rt,
}
@@ -120,7 +174,7 @@ func TestWithExternalCalls(gt *testing.T) {
// we can also add custom tags to the test span by retrieving the
// context and call the `ddtracer.SpanFromContext` api
ctx := (*T)(t).Context()
- span, _ := ddtracer.SpanFromContext(ctx)
+ span, _ := tracer.SpanFromContext(ctx)
// Custom namer function for the HTTP request
customNamer := func(req *http.Request) string {
@@ -131,7 +185,7 @@ func TestWithExternalCalls(gt *testing.T) {
return value
}
- rt := ddhttp.WrapRoundTripper(http.DefaultTransport, ddhttp.RTWithResourceNamer(customNamer))
+ rt := wrapRoundTripper(http.DefaultTransport, customNamer)
client := &http.Client{
Transport: rt,
}
diff --git a/internal/civisibility/integrations/manual_api_mocktracer_test.go b/internal/civisibility/integrations/manual_api_mocktracer_test.go
index c2381c2394..1c497cbeae 100644
--- a/internal/civisibility/integrations/manual_api_mocktracer_test.go
+++ b/internal/civisibility/integrations/manual_api_mocktracer_test.go
@@ -279,7 +279,7 @@ func TestWithInnerFunc(t *testing.T) {
}
func testAssertions(assert *assert.Assertions, now time.Time, testSpan *mocktracer.Span) {
- assert.Equal(now, testSpan.StartTime())
+ assert.Equal(now.Unix(), testSpan.StartTime().Unix())
assert.Equal("my-module-framework.test", testSpan.OperationName())
tags := map[string]interface{}{
@@ -306,7 +306,7 @@ func testAssertions(assert *assert.Assertions, now time.Time, testSpan *mocktrac
assert.Contains(spanTags, constants.TestSourceStartLine)
assert.Contains(spanTags, constants.TestSourceEndLine)
// make sure the startLine < endLine
- assert.Less(spanTags[constants.TestSourceStartLine].(int), spanTags[constants.TestSourceEndLine].(int))
+ assert.Less(spanTags[constants.TestSourceStartLine].(float64), spanTags[constants.TestSourceEndLine].(float64))
commonAssertions(assert, testSpan)
}
diff --git a/internal/contrib/validationtest/go.mod b/internal/contrib/validationtest/go.mod
index 6399d67f8b..be10d8f6bc 100644
--- a/internal/contrib/validationtest/go.mod
+++ b/internal/contrib/validationtest/go.mod
@@ -2,12 +2,10 @@ module github.com/DataDog/dd-trace-go/v2/internal/contrib/validationtest
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/bradfitz/gomemcache/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/miekg/dns/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/bradfitz/gomemcache/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/miekg/dns/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/bradfitz/gomemcache v0.0.0-20230611145640-acc696258285
github.com/miekg/dns v1.1.59
github.com/stretchr/testify v1.9.0
diff --git a/internal/datastreams/processor.go b/internal/datastreams/processor.go
index 0f0bc38faa..c2f8c811a8 100644
--- a/internal/datastreams/processor.go
+++ b/internal/datastreams/processor.go
@@ -311,16 +311,16 @@ func (p *Processor) flushInput() {
func (p *Processor) run(tick <-chan time.Time) {
for {
select {
+ case <-p.stop:
+ // drop in flight payloads on the input channel
+ p.sendToAgent(p.flush(time.Now().Add(bucketDuration * 10)))
+ return
case now := <-tick:
p.sendToAgent(p.flush(now))
case done := <-p.flushRequest:
p.flushInput()
p.sendToAgent(p.flush(time.Now().Add(bucketDuration * 10)))
close(done)
- case <-p.stop:
- // drop in flight payloads on the input channel
- p.sendToAgent(p.flush(time.Now().Add(bucketDuration * 10)))
- return
default:
s := p.in.pop()
if s == nil {
diff --git a/internal/exectracetest/go.mod b/internal/exectracetest/go.mod
index e7b798b1af..2151b7a009 100644
--- a/internal/exectracetest/go.mod
+++ b/internal/exectracetest/go.mod
@@ -3,8 +3,8 @@ module github.com/DataDog/dd-trace-go/internal/exectracetest/v2
go 1.22.0
require (
- github.com/DataDog/dd-trace-go/contrib/database/sql/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/database/sql/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b
github.com/mattn/go-sqlite3 v1.14.18
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
diff --git a/internal/setup-smoke-test/go.mod b/internal/setup-smoke-test/go.mod
index baf3d9dfda..a5d2a1e5d7 100644
--- a/internal/setup-smoke-test/go.mod
+++ b/internal/setup-smoke-test/go.mod
@@ -1,17 +1,15 @@
-module github.com/DataDog/dd-trace-go/internal/setup-smoke-test
+module github.com/DataDog/dd-trace-go/v2/internal/setup-smoke-test
go 1.22.0
-require (
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
-)
+require github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
require (
github.com/DataDog/appsec-internal-go v1.8.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 // indirect
github.com/DataDog/datadog-go/v5 v5.3.0 // indirect
+ github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace
github.com/DataDog/go-libddwaf/v3 v3.4.0 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/gostackparse v0.7.0 // indirect
diff --git a/internal/setup-smoke-test/go.sum b/internal/setup-smoke-test/go.sum
index 5577bb9879..c7f8ec3479 100644
--- a/internal/setup-smoke-test/go.sum
+++ b/internal/setup-smoke-test/go.sum
@@ -6,10 +6,10 @@ github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 h1:LplNAmMgZvGU7
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0/go.mod h1:4Vo3SJ24uzfKHUHLoFa8t8o+LH+7TCQ7sPcZDtOpSP4=
github.com/DataDog/datadog-go/v5 v5.3.0 h1:2q2qjFOb3RwAZNU+ez27ZVDwErJv5/VpbBPprz7Z+s8=
github.com/DataDog/datadog-go/v5 v5.3.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q=
-github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40 h1:aMuIYy4vul5XEyd0u7D7xF1b810bRqAz8FyQaQ90HO0=
-github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40/go.mod h1:yQN7MWAbZf7NqIZqclpffIAZ7fSRgLuI2py6oR/Kzpw=
-github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3 h1:xRpFR9l4VaIhI+mRMQ8LgmgF9BdA7DyxqxG8/FG0NUo=
-github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3/go.mod h1:mVKhZvjuOBNZG7ozKrqt5v2A08CS1QfyoVZ2q6wsMAQ=
+github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace h1:hTM19Vui2eFTI8gcbmsIFARlYsCydh7/fb/XsduNDHw=
+github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241024132757-00c300faaace/go.mod h1:1dkf610m7AvqbZXkJozDuhmDg894T/l693ivQ2B1QwQ=
+github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace h1:2Ie6mY061GP/pjpxN8o+TxZJQtfVefzjkj8zsIOyI+Y=
+github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace/go.mod h1:mVKhZvjuOBNZG7ozKrqt5v2A08CS1QfyoVZ2q6wsMAQ=
github.com/DataDog/go-libddwaf/v3 v3.4.0 h1:NJ2W2vhYaOm1OWr1LJCbdgp7ezG/XLJcQKBmjFwhSuM=
github.com/DataDog/go-libddwaf/v3 v3.4.0/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4=
github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
diff --git a/internal/traceprof/traceproftest/go.mod b/internal/traceprof/traceproftest/go.mod
index 99f98b1aa8..1c0957d394 100644
--- a/internal/traceprof/traceproftest/go.mod
+++ b/internal/traceprof/traceproftest/go.mod
@@ -2,12 +2,10 @@ module github.com/DataDog/dd-trace-go/v2/internal/traceprof/traceproftest
go 1.22.0
-toolchain go1.23.1
-
require (
- github.com/DataDog/dd-trace-go/contrib/google.golang.org/grpc/v2 v2.0.0-beta.2.0.20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/contrib/julienschmidt/httprouter/v2 v2.0.0-20241021170900-f2acfa7aff40
- github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
+ github.com/DataDog/dd-trace-go/contrib/google.golang.org/grpc/v2 v2.0.0-beta.2.0.20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/contrib/julienschmidt/httprouter/v2 v2.0.0-20241024132757-00c300faaace
+ github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3.0.20241024132757-00c300faaace
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b
github.com/julienschmidt/httprouter v1.3.0
github.com/stretchr/testify v1.9.0
@@ -19,7 +17,6 @@ require (
github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 // indirect
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
- github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.0.0-20241021170900-f2acfa7aff40 // indirect
github.com/DataDog/dd-trace-go/instrumentation/testutils/grpc/v2 v2.0.0-20240909090048-d1f31b7b7397 // indirect
github.com/DataDog/go-libddwaf/v3 v3.4.0 // indirect
github.com/DataDog/go-sqllexer v0.0.11 // indirect
diff --git a/tools/fixmodules/go.mod b/tools/fixmodules/go.mod
index 8ea7b9f069..ed0d1842e4 100644
--- a/tools/fixmodules/go.mod
+++ b/tools/fixmodules/go.mod
@@ -1,5 +1,3 @@
module github.com/DataDog/dd-trace-go/v2/scripts/fixmodules
go 1.22.0
-
-toolchain go1.23.1
diff --git a/tools/v2check/_stage/ddtracetypes/ddtracetypes.go b/tools/v2check/_stage/ddtracetypes/ddtracetypes.go
index 950531cb2f..2c461af271 100644
--- a/tools/v2check/_stage/ddtracetypes/ddtracetypes.go
+++ b/tools/v2check/_stage/ddtracetypes/ddtracetypes.go
@@ -8,7 +8,7 @@ package main
import (
"time"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "github.com/DataDog/dd-trace-go/v2/ddtrace"
)
func main() {
diff --git a/tools/v2check/_stage/go.mod b/tools/v2check/_stage/go.mod
index 03a7081184..21f013e42d 100644
--- a/tools/v2check/_stage/go.mod
+++ b/tools/v2check/_stage/go.mod
@@ -2,8 +2,6 @@ module github.com/DataDog/dd-trace-go/v2/tools/v2check/_stage
go 1.22.0
-toolchain go1.23.1
-
require (
github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
gopkg.in/DataDog/dd-trace-go.v1 v1.67.0
diff --git a/tools/v2check/go.mod b/tools/v2check/go.mod
index 9eeff70bd8..a4cbc7e6d4 100644
--- a/tools/v2check/go.mod
+++ b/tools/v2check/go.mod
@@ -2,8 +2,6 @@ module github.com/DataDog/dd-trace-go/v2/tools/v2check
go 1.22.0
-toolchain go1.23.1
-
require (
github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.3
golang.org/x/tools v0.22.0