From 2420f21f9b46ff9d705a0f45985f2317d01ee9d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 04:05:39 +0000 Subject: [PATCH] Bump github.com/getsentry/sentry-go from 0.23.0 to 0.25.0 Bumps [github.com/getsentry/sentry-go](https://github.com/getsentry/sentry-go) from 0.23.0 to 0.25.0. - [Release notes](https://github.com/getsentry/sentry-go/releases) - [Changelog](https://github.com/getsentry/sentry-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-go/compare/v0.23.0...v0.25.0) --- updated-dependencies: - dependency-name: github.com/getsentry/sentry-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +- .../getsentry/sentry-go/CHANGELOG.md | 62 +++ .../github.com/getsentry/sentry-go/Makefile | 2 +- .../github.com/getsentry/sentry-go/client.go | 41 +- vendor/github.com/getsentry/sentry-go/dsn.go | 9 +- .../getsentry/sentry-go/integrations.go | 98 ++++ .../sentry-go/internal/traceparser/parser.go | 9 +- .../getsentry/sentry-go/profile_sample.go | 9 +- .../getsentry/sentry-go/profiler.go | 424 ++++++++++++------ .../getsentry/sentry-go/profiler_windows.go | 4 +- .../github.com/getsentry/sentry-go/sentry.go | 13 +- .../getsentry/sentry-go/traces_profiler.go | 32 +- .../github.com/getsentry/sentry-go/tracing.go | 22 +- .../getsentry/sentry-go/transport.go | 23 +- vendor/github.com/getsentry/sentry-go/util.go | 19 + vendor/modules.txt | 2 +- 17 files changed, 585 insertions(+), 190 deletions(-) diff --git a/go.mod b/go.mod index 45ef75e1..07a3c917 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/alphagov/router go 1.20 require ( - github.com/getsentry/sentry-go v0.23.0 + github.com/getsentry/sentry-go v0.25.0 github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 github.com/onsi/ginkgo/v2 v2.12.0 github.com/onsi/gomega v1.27.10 diff --git a/go.sum b/go.sum index 1a760bf7..83d724eb 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654 h1:XOPLOMn/zT4jIgxfxSsoXPxkrzz0FaCHwp33x5POJ+Q= -github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE= -github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/getsentry/sentry-go v0.25.0 h1:q6Eo+hS+yoJlTO3uu/azhQadsD8V+jQn2D8VvX1eOyI= +github.com/getsentry/sentry-go v0.25.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= diff --git a/vendor/github.com/getsentry/sentry-go/CHANGELOG.md b/vendor/github.com/getsentry/sentry-go/CHANGELOG.md index 662d5871..ca8f199d 100644 --- a/vendor/github.com/getsentry/sentry-go/CHANGELOG.md +++ b/vendor/github.com/getsentry/sentry-go/CHANGELOG.md @@ -1,10 +1,71 @@ # Changelog +## 0.25.0 + +The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.25.0. + +### Deprecations + +As previously announced, this release removes two global constants from the SDK. + +- `sentry.Version` was removed. Use `sentry.SDKVersion` instead ([#727](https://github.com/getsentry/sentry-go/pull/727)) +- `sentry.SDKIdentifier` was removed. Use `Client.GetSDKIdentifier()` instead ([#727](https://github.com/getsentry/sentry-go/pull/727)) + +### Features + +- Add `ClientOptions.IgnoreTransactions`, which allows you to ignore specific transactions based on their name ([#717](https://github.com/getsentry/sentry-go/pull/717)) +- Add `ClientOptions.Tags`, which allows you to set global tags that are applied to all events. You can also define tags by setting `SENTRY_TAGS_` environment variables ([#718](https://github.com/getsentry/sentry-go/pull/718)) + +### Bug fixes + +- Fix an issue in the profiler that would cause an infinite loop if the duration of a transaction is longer than 30 seconds ([#724](https://github.com/getsentry/sentry-go/issues/724)) + +### Misc + +- `dsn.RequestHeaders()` is not to be removed, though it is still considered deprecated and should only be used when using a custom transport that sends events to the `/store` endpoint ([#720](https://github.com/getsentry/sentry-go/pull/720)) + +## 0.24.1 + +The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.24.1. + +### Bug fixes + +- Prevent a panic in `sentryotel.flushSpanProcessor()` ([(#711)](https://github.com/getsentry/sentry-go/pull/711)) +- Prevent a panic when setting the SDK identifier ([#715](https://github.com/getsentry/sentry-go/pull/715)) + +## 0.24.0 + +The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.24.0. + +### Deprecations + +- `sentry.Version` to be removed in 0.25.0. Use `sentry.SDKVersion` instead. +- `sentry.SDKIdentifier` to be removed in 0.25.0. Use `Client.GetSDKIdentifier()` instead. +- `dsn.RequestHeaders()` to be removed after 0.25.0, but no earlier than December 1, 2023. Requests to the `/envelope` endpoint are authenticated using the DSN in the envelope header. + +### Features + +- Run a single instance of the profiler instead of multiple ones for each Go routine ([#655](https://github.com/getsentry/sentry-go/pull/655)) +- Use the route path as the transaction names when using the Gin integration ([#675](https://github.com/getsentry/sentry-go/pull/675)) +- Set the SDK name accordingly when a framework integration is used ([#694](https://github.com/getsentry/sentry-go/pull/694)) +- Read release information (VCS revision) from `debug.ReadBuildInfo` ([#704](https://github.com/getsentry/sentry-go/pull/704)) + +### Bug fixes + +- [otel] Fix incorrect usage of `attributes.Value.AsString` ([#684](https://github.com/getsentry/sentry-go/pull/684)) +- Fix trace function name parsing in profiler on go1.21+ ([#695](https://github.com/getsentry/sentry-go/pull/695)) + +### Misc + +- Test against Go 1.21 ([#695](https://github.com/getsentry/sentry-go/pull/695)) +- Make tests more robust ([#698](https://github.com/getsentry/sentry-go/pull/698), [#699](https://github.com/getsentry/sentry-go/pull/699), [#700](https://github.com/getsentry/sentry-go/pull/700), [#702](https://github.com/getsentry/sentry-go/pull/702)) + ## 0.23.0 The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.23.0. ### Features + - Initial support for [Cron Monitoring](https://docs.sentry.io/product/crons/) ([#661](https://github.com/getsentry/sentry-go/pull/661)) This is how the basic usage of the feature looks like: @@ -56,6 +117,7 @@ The Sentry SDK team is happy to announce the immediate availability of Sentry Go - Expose SpanFromContext function ([#672](https://github.com/getsentry/sentry-go/pull/672)) ### Bug fixes + - Make `Span.Finish` a no-op when the span is already finished ([#660](https://github.com/getsentry/sentry-go/pull/660)) ## 0.22.0 diff --git a/vendor/github.com/getsentry/sentry-go/Makefile b/vendor/github.com/getsentry/sentry-go/Makefile index c1e27ebe..89523ac0 100644 --- a/vendor/github.com/getsentry/sentry-go/Makefile +++ b/vendor/github.com/getsentry/sentry-go/Makefile @@ -49,7 +49,7 @@ test-coverage: $(COVERAGE_REPORT_DIR) clean-report-dir ## Test with coverage en DIR_ABS=$$(python -c 'import os, sys; print(os.path.realpath(sys.argv[1]))' $${dir}) ; \ REPORT_NAME=$$(basename $${DIR_ABS}); \ (cd "$${dir}" && \ - $(GO) test -count=1 -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" ./... && \ + $(GO) test -count=1 -timeout $(TIMEOUT)s -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" ./... && \ cp $(COVERAGE_PROFILE) "$(COVERAGE_REPORT_DIR_ABS)/$${REPORT_NAME}_$(COVERAGE_PROFILE)" && \ $(GO) tool cover -html=$(COVERAGE_PROFILE) -o coverage.html); \ done; diff --git a/vendor/github.com/getsentry/sentry-go/client.go b/vendor/github.com/getsentry/sentry-go/client.go index 6a93fc14..0e14658a 100644 --- a/vendor/github.com/getsentry/sentry-go/client.go +++ b/vendor/github.com/getsentry/sentry-go/client.go @@ -17,6 +17,9 @@ import ( "github.com/getsentry/sentry-go/internal/debug" ) +// The identifier of the SDK. +const sdkIdentifier = "sentry.go" + // maxErrorDepth is the maximum number of errors reported in a chain of errors. // This protects the SDK from an arbitrarily long chain of wrapped errors. // @@ -137,6 +140,9 @@ type ClientOptions struct { // and if applicable, caught errors type and value. // If the match is found, then a whole event will be dropped. IgnoreErrors []string + // List of regexp strings that will be used to match against a transaction's + // name. If a match is found, then the transaction will be dropped. + IgnoreTransactions []string // If this flag is enabled, certain personally identifiable information (PII) is added by active integrations. // By default, no such data is sent. SendDefaultPII bool @@ -218,15 +224,20 @@ type ClientOptions struct { // is not optimized for long chains either. The top-level error together with a // stack trace is often the most useful information. MaxErrorDepth int + // Default event tags. These are overridden by tags set on a scope. + Tags map[string]string } // Client is the underlying processor that is used by the main API and Hub // instances. It must be created with NewClient. type Client struct { + mu sync.RWMutex options ClientOptions dsn *Dsn eventProcessors []EventProcessor integrations []Integration + sdkIdentifier string + sdkVersion string // Transport is read-only. Replacing the transport of an existing client is // not supported, create a new client instead. Transport Transport @@ -323,8 +334,10 @@ func NewClient(options ClientOptions) (*Client, error) { } client := Client{ - options: options, - dsn: dsn, + options: options, + dsn: dsn, + sdkIdentifier: sdkIdentifier, + sdkVersion: SDKVersion, } client.setupTransport() @@ -363,6 +376,8 @@ func (client *Client) setupIntegrations() { new(environmentIntegration), new(modulesIntegration), new(ignoreErrorsIntegration), + new(ignoreTransactionsIntegration), + new(globalTagsIntegration), } if client.options.Integrations != nil { @@ -396,7 +411,7 @@ func (client *Client) AddEventProcessor(processor EventProcessor) { } // Options return ClientOptions for the current Client. -func (client Client) Options() ClientOptions { +func (client *Client) Options() ClientOptions { // Note: internally, consider using `client.options` instead of `client.Options()` to avoid copying the object each time. return client.options } @@ -561,6 +576,20 @@ func (client *Client) EventFromCheckIn(checkIn *CheckIn, monitorConfig *MonitorC return event } +func (client *Client) SetSDKIdentifier(identifier string) { + client.mu.Lock() + defer client.mu.Unlock() + + client.sdkIdentifier = identifier +} + +func (client *Client) GetSDKIdentifier() string { + client.mu.RLock() + defer client.mu.RUnlock() + + return client.sdkIdentifier +} + // reverse reverses the slice a in place. func reverse(a []Exception) { for i := len(a)/2 - 1; i >= 0; i-- { @@ -646,7 +675,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod event.Platform = "go" event.Sdk = SdkInfo{ - Name: SDKIdentifier, + Name: client.GetSDKIdentifier(), Version: SDKVersion, Integrations: client.listIntegrations(), Packages: []SdkPackage{{ @@ -687,7 +716,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod return event } -func (client Client) listIntegrations() []string { +func (client *Client) listIntegrations() []string { integrations := make([]string, len(client.integrations)) for i, integration := range client.integrations { integrations[i] = integration.Name() @@ -695,7 +724,7 @@ func (client Client) listIntegrations() []string { return integrations } -func (client Client) integrationAlreadyInstalled(name string) bool { +func (client *Client) integrationAlreadyInstalled(name string) bool { for _, integration := range client.integrations { if integration.Name() == name { return true diff --git a/vendor/github.com/getsentry/sentry-go/dsn.go b/vendor/github.com/getsentry/sentry-go/dsn.go index e56f5401..52d9b9f7 100644 --- a/vendor/github.com/getsentry/sentry-go/dsn.go +++ b/vendor/github.com/getsentry/sentry-go/dsn.go @@ -196,10 +196,15 @@ func (dsn Dsn) GetAPIURL() *url.URL { return parsedURL } -// RequestHeaders returns all the necessary headers that have to be used in the transport. +// RequestHeaders returns all the necessary headers that have to be used in the transport when seinding events +// to the /store endpoint. +// +// Deprecated: This method shall only be used if you want to implement your own transport that sends events to +// the /store endpoint. If you're using the transport provided by the SDK, all necessary headers to authenticate +// against the /envelope endpoint are added automatically. func (dsn Dsn) RequestHeaders() map[string]string { auth := fmt.Sprintf("Sentry sentry_version=%s, sentry_timestamp=%d, "+ - "sentry_client=sentry.go/%s, sentry_key=%s", apiVersion, time.Now().Unix(), Version, dsn.publicKey) + "sentry_client=sentry.go/%s, sentry_key=%s", apiVersion, time.Now().Unix(), SDKVersion, dsn.publicKey) if dsn.secretKey != "" { auth = fmt.Sprintf("%s, sentry_secret=%s", auth, dsn.secretKey) diff --git a/vendor/github.com/getsentry/sentry-go/integrations.go b/vendor/github.com/getsentry/sentry-go/integrations.go index 5d87f6b2..046ef0a0 100644 --- a/vendor/github.com/getsentry/sentry-go/integrations.go +++ b/vendor/github.com/getsentry/sentry-go/integrations.go @@ -2,6 +2,7 @@ package sentry import ( "fmt" + "os" "regexp" "runtime" "runtime/debug" @@ -177,6 +178,40 @@ func getIgnoreErrorsSuspects(event *Event) []string { return suspects } +// ================================ +// Ignore Transactions Integration +// ================================ + +type ignoreTransactionsIntegration struct { + ignoreTransactions []*regexp.Regexp +} + +func (iei *ignoreTransactionsIntegration) Name() string { + return "IgnoreTransactions" +} + +func (iei *ignoreTransactionsIntegration) SetupOnce(client *Client) { + iei.ignoreTransactions = transformStringsIntoRegexps(client.options.IgnoreTransactions) + client.AddEventProcessor(iei.processor) +} + +func (iei *ignoreTransactionsIntegration) processor(event *Event, hint *EventHint) *Event { + suspect := event.Transaction + if suspect == "" { + return event + } + + for _, pattern := range iei.ignoreTransactions { + if pattern.Match([]byte(suspect)) { + Logger.Printf("Transaction dropped due to being matched by `IgnoreTransactions` option."+ + "| Value matched: %s | Filter used: %s", suspect, pattern) + return nil + } + } + + return event +} + // ================================ // Contextify Frames Integration // ================================ @@ -291,3 +326,66 @@ func (cfi *contextifyFramesIntegration) addContextLinesToFrame(frame Frame, line } return frame } + +// ================================ +// Global Tags Integration +// ================================ + +const envTagsPrefix = "SENTRY_TAGS_" + +type globalTagsIntegration struct { + tags map[string]string + envTags map[string]string +} + +func (ti *globalTagsIntegration) Name() string { + return "GlobalTags" +} + +func (ti *globalTagsIntegration) SetupOnce(client *Client) { + ti.tags = make(map[string]string, len(client.options.Tags)) + for k, v := range client.options.Tags { + ti.tags[k] = v + } + + ti.envTags = loadEnvTags() + + client.AddEventProcessor(ti.processor) +} + +func (ti *globalTagsIntegration) processor(event *Event, hint *EventHint) *Event { + if len(ti.tags) == 0 && len(ti.envTags) == 0 { + return event + } + + if event.Tags == nil { + event.Tags = make(map[string]string, len(ti.tags)+len(ti.envTags)) + } + + for k, v := range ti.tags { + if _, ok := event.Tags[k]; !ok { + event.Tags[k] = v + } + } + + for k, v := range ti.envTags { + if _, ok := event.Tags[k]; !ok { + event.Tags[k] = v + } + } + + return event +} + +func loadEnvTags() map[string]string { + tags := map[string]string{} + for _, pair := range os.Environ() { + parts := strings.Split(pair, "=") + if !strings.HasPrefix(parts[0], envTagsPrefix) { + continue + } + tag := strings.TrimPrefix(parts[0], envTagsPrefix) + tags[tag] = parts[1] + } + return tags +} diff --git a/vendor/github.com/getsentry/sentry-go/internal/traceparser/parser.go b/vendor/github.com/getsentry/sentry-go/internal/traceparser/parser.go index f42f28cc..8a7aab32 100644 --- a/vendor/github.com/getsentry/sentry-go/internal/traceparser/parser.go +++ b/vendor/github.com/getsentry/sentry-go/internal/traceparser/parser.go @@ -178,7 +178,14 @@ var createdByPrefix = []byte("created by ") func (f *Frame) Func() []byte { if bytes.HasPrefix(f.line1, createdByPrefix) { - return f.line1[len(createdByPrefix):] + // Since go1.21, the line ends with " in goroutine X", saying which goroutine created this one. + // We currently don't have use for that so just remove it. + var line = f.line1[len(createdByPrefix):] + var spaceAt = bytes.IndexByte(line, ' ') + if spaceAt < 0 { + return line + } + return line[:spaceAt] } var end = bytes.LastIndexByte(f.line1, '(') diff --git a/vendor/github.com/getsentry/sentry-go/profile_sample.go b/vendor/github.com/getsentry/sentry-go/profile_sample.go index a820fddb..65059872 100644 --- a/vendor/github.com/getsentry/sentry-go/profile_sample.go +++ b/vendor/github.com/getsentry/sentry-go/profile_sample.go @@ -28,7 +28,6 @@ type ( profileSample struct { ElapsedSinceStartNS uint64 `json:"elapsed_since_start_ns"` - QueueAddress string `json:"queue_address,omitempty"` StackID int `json:"stack_id"` ThreadID uint64 `json:"thread_id"` } @@ -41,10 +40,10 @@ type ( profileStack []int profileTrace struct { - Frames []*Frame `json:"frames"` - Samples []*profileSample `json:"samples"` - Stacks []profileStack `json:"stacks"` - ThreadMetadata map[string]profileThreadMetadata `json:"thread_metadata"` + Frames []*Frame `json:"frames"` + Samples []profileSample `json:"samples"` + Stacks []profileStack `json:"stacks"` + ThreadMetadata map[uint64]*profileThreadMetadata `json:"thread_metadata"` } profileInfo struct { diff --git a/vendor/github.com/getsentry/sentry-go/profiler.go b/vendor/github.com/getsentry/sentry-go/profiler.go index 6c77b4df..c0b858cc 100644 --- a/vendor/github.com/getsentry/sentry-go/profiler.go +++ b/vendor/github.com/getsentry/sentry-go/profiler.go @@ -1,156 +1,291 @@ package sentry import ( - "runtime" + "container/ring" "strconv" + + "runtime" + "sync" "sync/atomic" "time" "github.com/getsentry/sentry-go/internal/traceparser" ) -// Start collecting profile data and returns a function that stops profiling, producing a Trace. -// The returned stop function May return nil or an incomplete trace in case of a panic. -func startProfiling(startTime time.Time) (stopFunc func() *profilerResult) { +// Start a profiler that collects samples continuously, with a buffer of up to 30 seconds. +// Later, you can collect a slice from this buffer, producing a Trace. +func startProfiling(startTime time.Time) profiler { onProfilerStart() - // buffered channels to handle the recover() case without blocking - resultChannel := make(chan *profilerResult, 2) - stopSignal := make(chan struct{}, 2) + p := newProfiler(startTime) - go profilerGoroutine(startTime, resultChannel, stopSignal) + // Wait for the profiler to finish setting up before returning to the caller. + started := make(chan struct{}) + go p.run(started) - var goID = getCurrentGoID() + if _, ok := <-started; ok { + return p + } + return nil +} + +type profiler interface { + // GetSlice returns a slice of the profiled data between the given times. + GetSlice(startTime, endTime time.Time) *profilerResult + Stop(wait bool) +} + +type profilerResult struct { + callerGoID uint64 + trace *profileTrace +} + +func getCurrentGoID() uint64 { + // We shouldn't panic but let's be super safe. + defer func() { + if err := recover(); err != nil { + Logger.Printf("Profiler panic in getCurrentGoID(): %v\n", err) + } + }() - return func() *profilerResult { - stopSignal <- struct{}{} - var result = <-resultChannel - if result != nil { - result.callerGoID = goID + // Buffer to read the stack trace into. We should be good with a small buffer because we only need the first line. + var stacksBuffer = make([]byte, 100) + var n = runtime.Stack(stacksBuffer, false) + if n > 0 { + var traces = traceparser.Parse(stacksBuffer[0:n]) + if traces.Length() > 0 { + var trace = traces.Item(0) + return trace.GoID() } - return result + } + return 0 +} + +const profilerSamplingRateHz = 101 // 101 Hz; not 100 Hz because of the lockstep sampling (https://stackoverflow.com/a/45471031/1181370) +const profilerSamplingRate = time.Second / profilerSamplingRateHz +const stackBufferMaxGrowth = 512 * 1024 +const stackBufferLimit = 10 * 1024 * 1024 +const profilerRuntimeLimit = 30 // seconds + +type profileRecorder struct { + startTime time.Time + stopSignal chan struct{} + stopped int64 + mutex sync.RWMutex + testProfilerPanic int64 + + // Map from runtime.StackRecord.Stack0 to an index in stacks. + stackIndexes map[string]int + stacks []profileStack + newStacks []profileStack // New stacks created in the current interation. + stackKeyBuffer []byte + + // Map from runtime.Frame.PC to an index in frames. + frameIndexes map[string]int + frames []*Frame + newFrames []*Frame // New frames created in the current interation. + + // We keep a ring buffer of 30 seconds worth of samples, so that we can later slice it. + // Each bucket is a slice of samples all taken at the same time. + samplesBucketsHead *ring.Ring + + // Buffer to read current stacks - will grow automatically up to stackBufferLimit. + stacksBuffer []byte +} + +func newProfiler(startTime time.Time) *profileRecorder { + // Pre-allocate the profile trace for the currently active number of routines & 100 ms worth of samples. + // Other coefficients are just guesses of what might be a good starting point to avoid allocs on short runs. + return &profileRecorder{ + startTime: startTime, + stopSignal: make(chan struct{}, 1), + + stackIndexes: make(map[string]int, 32), + stacks: make([]profileStack, 0, 32), + newStacks: make([]profileStack, 0, 32), + + frameIndexes: make(map[string]int, 128), + frames: make([]*Frame, 0, 128), + newFrames: make([]*Frame, 0, 128), + + samplesBucketsHead: ring.New(profilerRuntimeLimit * profilerSamplingRateHz), + + // A buffer of 2 KiB per goroutine stack looks like a good starting point (empirically determined). + stacksBuffer: make([]byte, runtime.NumGoroutine()*2048), } } // This allows us to test whether panic during profiling are handled correctly and don't block execution. // If the number is lower than 0, profilerGoroutine() will panic immedately. -// If the number is higher than 0, profiler.onTick() will panic after the given number of samples collected. +// If the number is higher than 0, profiler.onTick() will panic when the given samples-set index is being collected. var testProfilerPanic int64 +var profilerRunning int64 + +func (p *profileRecorder) run(started chan struct{}) { + // Code backup for manual test debugging: + // if !atomic.CompareAndSwapInt64(&profilerRunning, 0, 1) { + // panic("Only one profiler can be running at a time") + // } -func profilerGoroutine(startTime time.Time, result chan<- *profilerResult, stopSignal chan struct{}) { // We shouldn't panic but let's be super safe. defer func() { - _ = recover() - - // Make sure we don't block the caller of stopFn() even if we panic. - result <- nil - + if err := recover(); err != nil { + Logger.Printf("Profiler panic in run(): %v\n", err) + } atomic.StoreInt64(&testProfilerPanic, 0) + close(started) + p.stopSignal <- struct{}{} + atomic.StoreInt64(&p.stopped, 1) + atomic.StoreInt64(&profilerRunning, 0) }() - // Stop after 30 seconds unless stopped manually. - timeout := time.AfterFunc(30*time.Second, func() { stopSignal <- struct{}{} }) - defer timeout.Stop() - - var localTestProfilerPanic = atomic.LoadInt64(&testProfilerPanic) - if localTestProfilerPanic < 0 { + p.testProfilerPanic = atomic.LoadInt64(&testProfilerPanic) + if p.testProfilerPanic < 0 { + Logger.Printf("Profiler panicking during startup because testProfilerPanic == %v\n", p.testProfilerPanic) panic("This is an expected panic in profilerGoroutine() during tests") } - profiler := newProfiler(startTime) - profiler.testProfilerPanic = localTestProfilerPanic - // Collect the first sample immediately. - profiler.onTick() + p.onTick() // Periodically collect stacks, starting after profilerSamplingRate has passed. collectTicker := profilerTickerFactory(profilerSamplingRate) defer collectTicker.Stop() - var tickerChannel = collectTicker.Channel() + var tickerChannel = collectTicker.TickSource() - defer func() { - result <- &profilerResult{0, profiler.trace} - }() + started <- struct{}{} for { select { case <-tickerChannel: - profiler.onTick() - case <-stopSignal: + p.onTick() + collectTicker.Ticked() + case <-p.stopSignal: return } } } -type profilerResult struct { - callerGoID uint64 - trace *profileTrace +func (p *profileRecorder) Stop(wait bool) { + if atomic.LoadInt64(&p.stopped) == 1 { + return + } + p.stopSignal <- struct{}{} + if wait { + <-p.stopSignal + } } -func getCurrentGoID() uint64 { - // We shouldn't panic but let's be super safe. - defer func() { - _ = recover() - }() +func (p *profileRecorder) GetSlice(startTime, endTime time.Time) *profilerResult { + // Unlikely edge cases - profiler wasn't running at all or the given times are invalid in relation to each other. + if p.startTime.After(endTime) || startTime.After(endTime) { + return nil + } - // Buffer to read the stack trace into. We should be good with a small buffer because we only need the first line. - var stacksBuffer = make([]byte, 100) - var n = runtime.Stack(stacksBuffer, false) - if n > 0 { - var traces = traceparser.Parse(stacksBuffer[0:n]) - if traces.Length() > 0 { - var trace = traces.Item(0) - return trace.GoID() + var relativeStartNS = uint64(0) + if p.startTime.Before(startTime) { + relativeStartNS = uint64(startTime.Sub(p.startTime).Nanoseconds()) + } + var relativeEndNS = uint64(endTime.Sub(p.startTime).Nanoseconds()) + + samplesCount, bucketsReversed, trace := p.getBuckets(relativeStartNS, relativeEndNS) + if samplesCount == 0 { + return nil + } + + var result = &profilerResult{ + callerGoID: getCurrentGoID(), + trace: trace, + } + + trace.Samples = make([]profileSample, samplesCount) + trace.ThreadMetadata = make(map[uint64]*profileThreadMetadata, len(bucketsReversed[0].goIDs)) + var s = samplesCount - 1 + for _, bucket := range bucketsReversed { + var elapsedSinceStartNS = bucket.relativeTimeNS - relativeStartNS + for i, goID := range bucket.goIDs { + trace.Samples[s].ElapsedSinceStartNS = elapsedSinceStartNS + trace.Samples[s].ThreadID = goID + trace.Samples[s].StackID = bucket.stackIDs[i] + s-- + + if _, goroutineExists := trace.ThreadMetadata[goID]; !goroutineExists { + trace.ThreadMetadata[goID] = &profileThreadMetadata{ + Name: "Goroutine " + strconv.FormatUint(goID, 10), + } + } } } - return 0 + + return result } -func newProfiler(startTime time.Time) *profileRecorder { - // Pre-allocate the profile trace for the currently active number of routines & 100 ms worth of samples. - // Other coefficients are just guesses of what might be a good starting point to avoid allocs on short runs. - numRoutines := runtime.NumGoroutine() - trace := &profileTrace{ - Frames: make([]*Frame, 0, 32), - Samples: make([]*profileSample, 0, numRoutines*10), // 100 ms @ 101 Hz - Stacks: make([]profileStack, 0, 8), - ThreadMetadata: make(map[string]profileThreadMetadata, numRoutines), +// Collect all buckets of samples in the given time range while holding a read lock. +func (p *profileRecorder) getBuckets(relativeStartNS, relativeEndNS uint64) (samplesCount int, buckets []*profileSamplesBucket, trace *profileTrace) { + p.mutex.RLock() + defer p.mutex.RUnlock() + + // sampleBucketsHead points at the last stored bucket so it's a good starting point to search backwards for the end. + var end = p.samplesBucketsHead + for end.Value != nil && end.Value.(*profileSamplesBucket).relativeTimeNS > relativeEndNS { + end = end.Prev() } - return &profileRecorder{ - startTime: startTime, - trace: trace, - stackIndexes: make(map[string]int, cap(trace.Stacks)), - frameIndexes: make(map[string]int, cap(trace.Frames)), - // A buffer of 2 KiB per stack looks like a good starting point (empirically determined). - stacksBuffer: make([]byte, numRoutines*2048), + // Edge case - no items stored before the given endTime. + if end.Value == nil { + return 0, nil, nil } -} -const profilerSamplingRate = time.Second / 101 // 101 Hz; not 100 Hz because of the lockstep sampling (https://stackoverflow.com/a/45471031/1181370) -const stackBufferMaxGrowth = 512 * 1024 -const stackBufferLimit = 10 * 1024 * 1024 + { // Find the first item after the given startTime. + var start = end + var prevBucket *profileSamplesBucket + samplesCount = 0 + buckets = make([]*profileSamplesBucket, 0, int64((relativeEndNS-relativeStartNS)/uint64(profilerSamplingRate.Nanoseconds()))+1) + for start.Value != nil { + var bucket = start.Value.(*profileSamplesBucket) + + // If this bucket's time is before the requests start time, don't collect it (and stop iterating further). + if bucket.relativeTimeNS < relativeStartNS { + break + } -type profileRecorder struct { - startTime time.Time - trace *profileTrace - testProfilerPanic int64 + // If this bucket time is greater than previous the bucket's time, we have exhausted the whole ring buffer + // before we were able to find the start time. That means the start time is not present and we must break. + // This happens if the slice duration exceeds the ring buffer capacity. + if prevBucket != nil && bucket.relativeTimeNS > prevBucket.relativeTimeNS { + break + } - // Buffer to read current stacks - will grow automatically up to stackBufferLimit. - stacksBuffer []byte + samplesCount += len(bucket.goIDs) + buckets = append(buckets, bucket) - // Map from runtime.StackRecord.Stack0 to an index trace.Stacks. - stackIndexes map[string]int + start = start.Prev() + prevBucket = bucket + } + } - // Map from runtime.Frame.PC to an index trace.Frames. - frameIndexes map[string]int + // Edge case - if the period requested was too short and we haven't collected enough samples. + if len(buckets) < 2 { + return 0, nil, nil + } + + trace = &profileTrace{ + Frames: p.frames, + Stacks: p.stacks, + } + return samplesCount, buckets, trace } func (p *profileRecorder) onTick() { elapsedNs := time.Since(p.startTime).Nanoseconds() - if p.testProfilerPanic > 0 && int64(len(p.trace.Samples)) > p.testProfilerPanic { - panic("This is an expected panic in Profiler.OnTick() during tests") + if p.testProfilerPanic > 0 { + Logger.Printf("Profiler testProfilerPanic == %v\n", p.testProfilerPanic) + if p.testProfilerPanic == 1 { + Logger.Println("Profiler panicking onTick()") + panic("This is an expected panic in Profiler.OnTick() during tests") + } + p.testProfilerPanic-- } records := p.collectRecords() @@ -187,51 +322,76 @@ func (p *profileRecorder) collectRecords() []byte { func (p *profileRecorder) processRecords(elapsedNs uint64, stacksBuffer []byte) { var traces = traceparser.Parse(stacksBuffer) - for i := traces.Length() - 1; i >= 0; i-- { - var stack = traces.Item(i) - threadIndex := p.addThread(stack.GoID()) - stackIndex := p.addStackTrace(stack) - if stackIndex < 0 { - return - } + var length = traces.Length() - p.trace.Samples = append(p.trace.Samples, &profileSample{ - ElapsedSinceStartNS: elapsedNs, - StackID: stackIndex, - ThreadID: threadIndex, - }) + // Shouldn't happen but let's be safe and don't store empty buckets. + if length == 0 { + return } -} -func (p *profileRecorder) addThread(id uint64) uint64 { - index := strconv.FormatUint(id, 10) - if _, exists := p.trace.ThreadMetadata[index]; !exists { - p.trace.ThreadMetadata[index] = profileThreadMetadata{ - Name: "Goroutine " + index, - } + var bucket = &profileSamplesBucket{ + relativeTimeNS: elapsedNs, + stackIDs: make([]int, length), + goIDs: make([]uint64, length), + } + + // reset buffers + p.newFrames = p.newFrames[:0] + p.newStacks = p.newStacks[:0] + + for i := 0; i < length; i++ { + var stack = traces.Item(i) + bucket.stackIDs[i] = p.addStackTrace(stack) + bucket.goIDs[i] = stack.GoID() } - return id + + p.mutex.Lock() + defer p.mutex.Unlock() + + p.stacks = append(p.stacks, p.newStacks...) + p.frames = append(p.frames, p.newFrames...) + + p.samplesBucketsHead = p.samplesBucketsHead.Next() + p.samplesBucketsHead.Value = bucket } func (p *profileRecorder) addStackTrace(capturedStack traceparser.Trace) int { - // NOTE: Don't convert to string yet, it's expensive and compiler can avoid it when - // indexing into a map (only needs a copy when adding a new key to the map). - var key = capturedStack.UniqueIdentifier() + iter := capturedStack.Frames() + stack := make(profileStack, 0, iter.LengthUpperBound()) + + // Originally, we've used `capturedStack.UniqueIdentifier()` as a key but that was incorrect because it also + // contains function arguments and we want to group stacks by function name and file/line only. + // Instead, we need to parse frames and we use a list of their indexes as a key. + // We reuse the same buffer for each stack to avoid allocations; this is a hot spot. + var expectedBufferLen = cap(stack) * 5 // 4 bytes per frame + 1 byte for space + if cap(p.stackKeyBuffer) < expectedBufferLen { + p.stackKeyBuffer = make([]byte, 0, expectedBufferLen) + } else { + p.stackKeyBuffer = p.stackKeyBuffer[:0] + } - stackIndex, exists := p.stackIndexes[string(key)] - if !exists { - iter := capturedStack.Frames() - stack := make(profileStack, 0, iter.LengthUpperBound()) - for iter.HasNext() { - var frame = iter.Next() + for iter.HasNext() { + var frame = iter.Next() + if frameIndex := p.addFrame(frame); frameIndex >= 0 { + stack = append(stack, frameIndex) + + p.stackKeyBuffer = append(p.stackKeyBuffer, 0) // space - if frameIndex := p.addFrame(frame); frameIndex >= 0 { - stack = append(stack, frameIndex) + // The following code is just like binary.AppendUvarint() which isn't yet available in Go 1.18. + x := uint64(frameIndex) + 1 + for x >= 0x80 { + p.stackKeyBuffer = append(p.stackKeyBuffer, byte(x)|0x80) + x >>= 7 } + p.stackKeyBuffer = append(p.stackKeyBuffer, byte(x)) } - stackIndex = len(p.trace.Stacks) - p.trace.Stacks = append(p.trace.Stacks, stack) - p.stackIndexes[string(key)] = stackIndex + } + + stackIndex, exists := p.stackIndexes[string(p.stackKeyBuffer)] + if !exists { + stackIndex = len(p.stacks) + len(p.newStacks) + p.newStacks = append(p.newStacks, stack) + p.stackIndexes[string(p.stackKeyBuffer)] = stackIndex } return stackIndex @@ -247,27 +407,41 @@ func (p *profileRecorder) addFrame(capturedFrame traceparser.Frame) int { module, function := splitQualifiedFunctionName(string(capturedFrame.Func())) file, line := capturedFrame.File() frame := newFrame(module, function, string(file), line) - frameIndex = len(p.trace.Frames) - p.trace.Frames = append(p.trace.Frames, &frame) + frameIndex = len(p.frames) + len(p.newFrames) + p.newFrames = append(p.newFrames, &frame) p.frameIndexes[string(key)] = frameIndex } return frameIndex } +type profileSamplesBucket struct { + relativeTimeNS uint64 + stackIDs []int + goIDs []uint64 +} + // A Ticker holds a channel that delivers “ticks” of a clock at intervals. type profilerTicker interface { + // Stop turns off a ticker. After Stop, no more ticks will be sent. Stop() - Channel() <-chan time.Time + + // TickSource returns a read-only channel of ticks. + TickSource() <-chan time.Time + + // Ticked is called by the Profiler after a tick is processed to notify the ticker. Used for testing. + Ticked() } type timeTicker struct { *time.Ticker } -func (t *timeTicker) Channel() <-chan time.Time { +func (t *timeTicker) TickSource() <-chan time.Time { return t.C } +func (t *timeTicker) Ticked() {} + func profilerTickerFactoryDefault(d time.Duration) profilerTicker { return &timeTicker{time.NewTicker(d)} } diff --git a/vendor/github.com/getsentry/sentry-go/profiler_windows.go b/vendor/github.com/getsentry/sentry-go/profiler_windows.go index fccb425d..33279824 100644 --- a/vendor/github.com/getsentry/sentry-go/profiler_windows.go +++ b/vendor/github.com/getsentry/sentry-go/profiler_windows.go @@ -17,8 +17,8 @@ func setTimeTickerResolution() { } } -var runOnce sync.Once +var setupTickerResolutionOnce sync.Once func onProfilerStart() { - runOnce.Do(setTimeTickerResolution) + setupTickerResolutionOnce.Do(setTimeTickerResolution) } diff --git a/vendor/github.com/getsentry/sentry-go/sentry.go b/vendor/github.com/getsentry/sentry-go/sentry.go index f5b3a6d1..3f8fef15 100644 --- a/vendor/github.com/getsentry/sentry-go/sentry.go +++ b/vendor/github.com/getsentry/sentry-go/sentry.go @@ -5,22 +5,13 @@ import ( "time" ) -// Deprecated: Use SDKVersion instead. -const Version = SDKVersion - -// Version is the version of the SDK. -const SDKVersion = "0.23.0" - -// The identifier of the SDK. -const SDKIdentifier = "sentry.go" +// The version of the SDK. +const SDKVersion = "0.25.0" // apiVersion is the minimum version of the Sentry API compatible with the // sentry-go SDK. const apiVersion = "7" -// userAgent is the User-Agent of outgoing HTTP requests. -const userAgent = "sentry-go/" + SDKVersion - // Init initializes the SDK with options. The returned error is non-nil if // options is invalid, for instance if a malformed DSN is provided. func Init(options ClientOptions) error { diff --git a/vendor/github.com/getsentry/sentry-go/traces_profiler.go b/vendor/github.com/getsentry/sentry-go/traces_profiler.go index a983262c..2655fe84 100644 --- a/vendor/github.com/getsentry/sentry-go/traces_profiler.go +++ b/vendor/github.com/getsentry/sentry-go/traces_profiler.go @@ -1,31 +1,41 @@ package sentry +import ( + "sync" + "time" +) + // Checks whether the transaction should be profiled (according to ProfilesSampleRate) // and starts a profiler if so. func (span *Span) sampleTransactionProfile() { var sampleRate = span.clientOptions().ProfilesSampleRate switch { case sampleRate < 0.0 || sampleRate > 1.0: - Logger.Printf("Skipping transaction profiling: ProfilesSampleRate out of range [0.0, 1.0]: %f", sampleRate) + Logger.Printf("Skipping transaction profiling: ProfilesSampleRate out of range [0.0, 1.0]: %f\n", sampleRate) case sampleRate == 0.0 || rng.Float64() >= sampleRate: - Logger.Printf("Skipping transaction profiling: ProfilesSampleRate is: %f", sampleRate) + Logger.Printf("Skipping transaction profiling: ProfilesSampleRate is: %f\n", sampleRate) default: - span.profiler = &_transactionProfiler{ - stopFunc: startProfiling(span.StartTime), + startProfilerOnce.Do(startGlobalProfiler) + if globalProfiler == nil { + Logger.Println("Skipping transaction profiling: the profiler couldn't be started") + } else { + span.collectProfile = collectTransactionProfile } } } -type transactionProfiler interface { - Finish(span *Span) *profileInfo -} +// transactionProfiler collects a profile for a given span. +type transactionProfiler func(span *Span) *profileInfo + +var startProfilerOnce sync.Once +var globalProfiler profiler -type _transactionProfiler struct { - stopFunc func() *profilerResult +func startGlobalProfiler() { + globalProfiler = startProfiling(time.Now()) } -func (tp *_transactionProfiler) Finish(span *Span) *profileInfo { - result := tp.stopFunc() +func collectTransactionProfile(span *Span) *profileInfo { + result := globalProfiler.GetSlice(span.StartTime, span.EndTime) if result == nil || result.trace == nil { return nil } diff --git a/vendor/github.com/getsentry/sentry-go/tracing.go b/vendor/github.com/getsentry/sentry-go/tracing.go index 38b810c0..dc968d67 100644 --- a/vendor/github.com/getsentry/sentry-go/tracing.go +++ b/vendor/github.com/getsentry/sentry-go/tracing.go @@ -58,8 +58,8 @@ type Span struct { //nolint: maligned // prefer readability over optimal memory recorder *spanRecorder // span context, can only be set on transactions contexts map[string]Context - // profiler instance if attached, nil otherwise. - profiler transactionProfiler + // collectProfile is a function that collects a profile of the current transaction. May be nil. + collectProfile transactionProfiler // a Once instance to make sure that Finish() is only called once. finishOnce sync.Once } @@ -333,12 +333,6 @@ func (s *Span) SetDynamicSamplingContext(dsc DynamicSamplingContext) { // doFinish runs the actual Span.Finish() logic. func (s *Span) doFinish() { - // For the timing to be correct, the profiler must be stopped before s.EndTime. - var profile *profileInfo - if s.profiler != nil { - profile = s.profiler.Finish(s) - } - if s.EndTime.IsZero() { s.EndTime = monotonicTimeSince(s.StartTime) } @@ -351,7 +345,9 @@ func (s *Span) doFinish() { return } - event.sdkMetaData.transactionProfile = profile + if s.collectProfile != nil { + event.sdkMetaData.transactionProfile = s.collectProfile(s) + } // TODO(tracing): add breadcrumbs // (see https://github.com/getsentry/sentry-python/blob/f6f3525f8812f609/sentry_sdk/tracing.py#L372) @@ -839,7 +835,7 @@ type SpanOption func(s *Span) // starting a span affects the span tree as a whole, potentially overwriting a // name set previously. // -// Deprecated: Use WithTransactionName() instead. +// Deprecated: To be removed in 0.26.0. Use WithTransactionName() instead. func TransactionName(name string) SpanOption { return WithTransactionName(name) } @@ -857,7 +853,7 @@ func WithTransactionName(name string) SpanOption { // OpName sets the operation name for a given span. // -// Deprecated: Use WithOpName() instead. +// Deprecated: To be removed in 0.26.0. Use WithOpName() instead. func OpName(name string) SpanOption { return WithOpName(name) } @@ -871,7 +867,7 @@ func WithOpName(name string) SpanOption { // TransctionSource sets the source of the transaction name. // -// Deprecated: Use WithTransactionSource() instead. +// Deprecated: To be removed in 0.26.0. Use WithTransactionSource() instead. func TransctionSource(source TransactionSource) SpanOption { return WithTransactionSource(source) } @@ -889,7 +885,7 @@ func WithTransactionSource(source TransactionSource) SpanOption { // SpanSampled updates the sampling flag for a given span. // -// Deprecated: Use WithSpanSampled() instead. +// Deprecated: To be removed in 0.26.0. Use WithSpanSampled() instead. func SpanSampled(sampled Sampled) SpanOption { return WithSpanSampled(sampled) } diff --git a/vendor/github.com/getsentry/sentry-go/transport.go b/vendor/github.com/getsentry/sentry-go/transport.go index b1be4421..3eee4584 100644 --- a/vendor/github.com/getsentry/sentry-go/transport.go +++ b/vendor/github.com/getsentry/sentry-go/transport.go @@ -209,7 +209,20 @@ func envelopeFromBody(event *Event, dsn *Dsn, sentAt time.Time, body json.RawMes func getRequestFromEvent(event *Event, dsn *Dsn) (r *http.Request, err error) { defer func() { if r != nil { - r.Header.Set("User-Agent", userAgent) + r.Header.Set("User-Agent", fmt.Sprintf("%s/%s", event.Sdk.Name, event.Sdk.Version)) + r.Header.Set("Content-Type", "application/x-sentry-envelope") + + auth := fmt.Sprintf("Sentry sentry_version=%s, "+ + "sentry_client=%s/%s, sentry_key=%s", apiVersion, event.Sdk.Name, event.Sdk.Version, dsn.publicKey) + + // The key sentry_secret is effectively deprecated and no longer needs to be set. + // However, since it was required in older self-hosted versions, + // it should still passed through to Sentry if set. + if dsn.secretKey != "" { + auth = fmt.Sprintf("%s, sentry_secret=%s", auth, dsn.secretKey) + } + + r.Header.Set("X-Sentry-Auth", auth) } }() body := getRequestBodyFromEvent(event) @@ -348,10 +361,6 @@ func (t *HTTPTransport) SendEvent(event *Event) { return } - for headerKey, headerValue := range t.dsn.RequestHeaders() { - request.Header.Set(headerKey, headerValue) - } - // <-t.buffer is equivalent to acquiring a lock to access the current batch. // A few lines below, t.buffer <- b releases the lock. // @@ -573,10 +582,6 @@ func (t *HTTPSyncTransport) SendEvent(event *Event) { return } - for headerKey, headerValue := range t.dsn.RequestHeaders() { - request.Header.Set(headerKey, headerValue) - } - var eventType string if event.Type == transactionType { eventType = "transaction" diff --git a/vendor/github.com/getsentry/sentry-go/util.go b/vendor/github.com/getsentry/sentry-go/util.go index e5717c63..47f81485 100644 --- a/vendor/github.com/getsentry/sentry-go/util.go +++ b/vendor/github.com/getsentry/sentry-go/util.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "os" + "runtime/debug" "strings" "time" @@ -66,6 +67,13 @@ func defaultRelease() (release string) { } } + if info, ok := debug.ReadBuildInfo(); ok { + buildInfoVcsRevision := revisionFromBuildInfo(info) + if len(buildInfoVcsRevision) > 0 { + return buildInfoVcsRevision + } + } + // Derive a version string from Git. Example outputs: // v1.0.1-0-g9de4 // v2.0-8-g77df-dirty @@ -89,3 +97,14 @@ func defaultRelease() (release string) { Logger.Printf("Using release from Git: %s", release) return release } + +func revisionFromBuildInfo(info *debug.BuildInfo) string { + for _, setting := range info.Settings { + if setting.Key == "vcs.revision" && setting.Value != "" { + Logger.Printf("Using release from debug info: %s", setting.Value) + return setting.Value + } + } + + return "" +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a4410afa..086195b2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -7,7 +7,7 @@ github.com/cespare/xxhash/v2 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/getsentry/sentry-go v0.23.0 +# github.com/getsentry/sentry-go v0.25.0 ## explicit; go 1.18 github.com/getsentry/sentry-go github.com/getsentry/sentry-go/internal/debug