From b35a958776fac34756ec58185b0cc3bbe543e7a0 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 6 Sep 2023 05:00:03 +0000
Subject: [PATCH] Bump github.com/getsentry/sentry-go from 0.23.0 to 0.24.0

Bumps [github.com/getsentry/sentry-go](https://github.com/getsentry/sentry-go) from 0.23.0 to 0.24.0.
- [Release notes](https://github.com/getsentry/sentry-go/releases)
- [Changelog](https://github.com/getsentry/sentry-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/getsentry/sentry-go/compare/v0.23.0...v0.24.0)

---
updated-dependencies:
- dependency-name: github.com/getsentry/sentry-go
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
---
 go.mod                                        |   2 +-
 go.sum                                        |   4 +-
 .../getsentry/sentry-go/CHANGELOG.md          |  23 +
 .../github.com/getsentry/sentry-go/Makefile   |   2 +-
 .../github.com/getsentry/sentry-go/client.go  |  34 +-
 vendor/github.com/getsentry/sentry-go/dsn.go  |   1 +
 .../sentry-go/internal/traceparser/parser.go  |   9 +-
 .../getsentry/sentry-go/profile_sample.go     |   9 +-
 .../getsentry/sentry-go/profiler.go           | 412 ++++++++++++------
 .../getsentry/sentry-go/profiler_windows.go   |   4 +-
 .../github.com/getsentry/sentry-go/sentry.go  |  11 +-
 .../getsentry/sentry-go/traces_profiler.go    |  32 +-
 .../github.com/getsentry/sentry-go/tracing.go |  14 +-
 .../getsentry/sentry-go/transport.go          |  23 +-
 vendor/github.com/getsentry/sentry-go/util.go |  19 +
 vendor/modules.txt                            |   2 +-
 16 files changed, 421 insertions(+), 180 deletions(-)

diff --git a/go.mod b/go.mod
index 45ef75e1..bdf97db5 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module github.com/alphagov/router
 go 1.20
 
 require (
-	github.com/getsentry/sentry-go v0.23.0
+	github.com/getsentry/sentry-go v0.24.0
 	github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8
 	github.com/onsi/ginkgo/v2 v2.12.0
 	github.com/onsi/gomega v1.27.10
diff --git a/go.sum b/go.sum
index 1a760bf7..3ff6fc08 100644
--- a/go.sum
+++ b/go.sum
@@ -8,8 +8,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654 h1:XOPLOMn/zT4jIgxfxSsoXPxkrzz0FaCHwp33x5POJ+Q=
-github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE=
-github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
+github.com/getsentry/sentry-go v0.24.0 h1:02b7qEmJ56EHGe9KFgjArjU/vG/aywm7Efgu+iPc01Y=
+github.com/getsentry/sentry-go v0.24.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
 github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is=
 github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
 github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
diff --git a/vendor/github.com/getsentry/sentry-go/CHANGELOG.md b/vendor/github.com/getsentry/sentry-go/CHANGELOG.md
index 662d5871..3ef66efa 100644
--- a/vendor/github.com/getsentry/sentry-go/CHANGELOG.md
+++ b/vendor/github.com/getsentry/sentry-go/CHANGELOG.md
@@ -1,10 +1,32 @@
 # Changelog
 
+## 0.24.0
+
+The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.24.0.
+
+### Features
+
+- Run a single instance of the profiler instead of multiple ones for each Go routine ([#655](https://github.com/getsentry/sentry-go/pull/655))
+- Use the route path as the transaction names when using the Gin integration ([#675](https://github.com/getsentry/sentry-go/pull/675))
+- Set the SDK name accordingly when a framework integration is used ([#694](https://github.com/getsentry/sentry-go/pull/694))
+- Read release information (VCS revision) from `debug.ReadBuildInfo` ([#704](https://github.com/getsentry/sentry-go/pull/704))
+
+### Bug fixes
+
+- [otel] Fix incorrect usage of `attributes.Value.AsString` ([#684](https://github.com/getsentry/sentry-go/pull/684))
+- Fix trace function name parsing in profiler on go1.21+ ([#695](https://github.com/getsentry/sentry-go/pull/695))
+
+### Misc
+
+- Test against Go 1.21 ([#695](https://github.com/getsentry/sentry-go/pull/695))
+- Make tests more robust ([#698](https://github.com/getsentry/sentry-go/pull/698), [#699](https://github.com/getsentry/sentry-go/pull/699), [#700](https://github.com/getsentry/sentry-go/pull/700), [#702](https://github.com/getsentry/sentry-go/pull/702))
+
 ## 0.23.0
 
 The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.23.0.
 
 ### Features
+
 - Initial support for [Cron Monitoring](https://docs.sentry.io/product/crons/) ([#661](https://github.com/getsentry/sentry-go/pull/661))
 
   This is how the basic usage of the feature looks like:
@@ -56,6 +78,7 @@ The Sentry SDK team is happy to announce the immediate availability of Sentry Go
 - Expose SpanFromContext function ([#672](https://github.com/getsentry/sentry-go/pull/672))
 
 ### Bug fixes
+
 - Make `Span.Finish` a no-op when the span is already finished ([#660](https://github.com/getsentry/sentry-go/pull/660))
 
 ## 0.22.0
diff --git a/vendor/github.com/getsentry/sentry-go/Makefile b/vendor/github.com/getsentry/sentry-go/Makefile
index c1e27ebe..89523ac0 100644
--- a/vendor/github.com/getsentry/sentry-go/Makefile
+++ b/vendor/github.com/getsentry/sentry-go/Makefile
@@ -49,7 +49,7 @@ test-coverage: $(COVERAGE_REPORT_DIR) clean-report-dir  ## Test with coverage en
 	  DIR_ABS=$$(python -c 'import os, sys; print(os.path.realpath(sys.argv[1]))' $${dir}) ; \
 	  REPORT_NAME=$$(basename $${DIR_ABS}); \
 	  (cd "$${dir}" && \
-	    $(GO) test -count=1 -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" ./... && \
+	    $(GO) test -count=1 -timeout $(TIMEOUT)s -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" ./... && \
 		cp $(COVERAGE_PROFILE) "$(COVERAGE_REPORT_DIR_ABS)/$${REPORT_NAME}_$(COVERAGE_PROFILE)" && \
 	    $(GO) tool cover -html=$(COVERAGE_PROFILE) -o coverage.html); \
 	done;
diff --git a/vendor/github.com/getsentry/sentry-go/client.go b/vendor/github.com/getsentry/sentry-go/client.go
index 6a93fc14..3804c8a6 100644
--- a/vendor/github.com/getsentry/sentry-go/client.go
+++ b/vendor/github.com/getsentry/sentry-go/client.go
@@ -17,6 +17,9 @@ import (
 	"github.com/getsentry/sentry-go/internal/debug"
 )
 
+// The identifier of the SDK.
+const sdkIdentifier = "sentry.go"
+
 // maxErrorDepth is the maximum number of errors reported in a chain of errors.
 // This protects the SDK from an arbitrarily long chain of wrapped errors.
 //
@@ -223,10 +226,13 @@ type ClientOptions struct {
 // Client is the underlying processor that is used by the main API and Hub
 // instances. It must be created with NewClient.
 type Client struct {
+	mu              sync.RWMutex
 	options         ClientOptions
 	dsn             *Dsn
 	eventProcessors []EventProcessor
 	integrations    []Integration
+	sdkIdentifier   string
+	sdkVersion      string
 	// Transport is read-only. Replacing the transport of an existing client is
 	// not supported, create a new client instead.
 	Transport Transport
@@ -323,8 +329,10 @@ func NewClient(options ClientOptions) (*Client, error) {
 	}
 
 	client := Client{
-		options: options,
-		dsn:     dsn,
+		options:       options,
+		dsn:           dsn,
+		sdkIdentifier: sdkIdentifier,
+		sdkVersion:    SDKVersion,
 	}
 
 	client.setupTransport()
@@ -396,7 +404,7 @@ func (client *Client) AddEventProcessor(processor EventProcessor) {
 }
 
 // Options return ClientOptions for the current Client.
-func (client Client) Options() ClientOptions {
+func (client *Client) Options() ClientOptions {
 	// Note: internally, consider using `client.options` instead of `client.Options()` to avoid copying the object each time.
 	return client.options
 }
@@ -561,6 +569,20 @@ func (client *Client) EventFromCheckIn(checkIn *CheckIn, monitorConfig *MonitorC
 	return event
 }
 
+func (client *Client) SetSDKIdentifier(identifier string) {
+	client.mu.Lock()
+	defer client.mu.Unlock()
+
+	client.sdkIdentifier = identifier
+}
+
+func (client *Client) GetSDKIdentifier() string {
+	client.mu.RLock()
+	defer client.mu.RUnlock()
+
+	return client.sdkIdentifier
+}
+
 // reverse reverses the slice a in place.
 func reverse(a []Exception) {
 	for i := len(a)/2 - 1; i >= 0; i-- {
@@ -646,7 +668,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
 
 	event.Platform = "go"
 	event.Sdk = SdkInfo{
-		Name:         SDKIdentifier,
+		Name:         client.GetSDKIdentifier(),
 		Version:      SDKVersion,
 		Integrations: client.listIntegrations(),
 		Packages: []SdkPackage{{
@@ -687,7 +709,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
 	return event
 }
 
-func (client Client) listIntegrations() []string {
+func (client *Client) listIntegrations() []string {
 	integrations := make([]string, len(client.integrations))
 	for i, integration := range client.integrations {
 		integrations[i] = integration.Name()
@@ -695,7 +717,7 @@ func (client Client) listIntegrations() []string {
 	return integrations
 }
 
-func (client Client) integrationAlreadyInstalled(name string) bool {
+func (client *Client) integrationAlreadyInstalled(name string) bool {
 	for _, integration := range client.integrations {
 		if integration.Name() == name {
 			return true
diff --git a/vendor/github.com/getsentry/sentry-go/dsn.go b/vendor/github.com/getsentry/sentry-go/dsn.go
index e56f5401..02a62098 100644
--- a/vendor/github.com/getsentry/sentry-go/dsn.go
+++ b/vendor/github.com/getsentry/sentry-go/dsn.go
@@ -197,6 +197,7 @@ func (dsn Dsn) GetAPIURL() *url.URL {
 }
 
 // RequestHeaders returns all the necessary headers that have to be used in the transport.
+// Deprecated: To be removed after 0.25.0, but no earlier than December 1, 2023. Requests to /envelope are authenticated using the DSN in the envelope header itself.
 func (dsn Dsn) RequestHeaders() map[string]string {
 	auth := fmt.Sprintf("Sentry sentry_version=%s, sentry_timestamp=%d, "+
 		"sentry_client=sentry.go/%s, sentry_key=%s", apiVersion, time.Now().Unix(), Version, dsn.publicKey)
diff --git a/vendor/github.com/getsentry/sentry-go/internal/traceparser/parser.go b/vendor/github.com/getsentry/sentry-go/internal/traceparser/parser.go
index f42f28cc..8a7aab32 100644
--- a/vendor/github.com/getsentry/sentry-go/internal/traceparser/parser.go
+++ b/vendor/github.com/getsentry/sentry-go/internal/traceparser/parser.go
@@ -178,7 +178,14 @@ var createdByPrefix = []byte("created by ")
 
 func (f *Frame) Func() []byte {
 	if bytes.HasPrefix(f.line1, createdByPrefix) {
-		return f.line1[len(createdByPrefix):]
+		// Since go1.21, the line ends with " in goroutine X", saying which goroutine created this one.
+		// We currently don't have use for that so just remove it.
+		var line = f.line1[len(createdByPrefix):]
+		var spaceAt = bytes.IndexByte(line, ' ')
+		if spaceAt < 0 {
+			return line
+		}
+		return line[:spaceAt]
 	}
 
 	var end = bytes.LastIndexByte(f.line1, '(')
diff --git a/vendor/github.com/getsentry/sentry-go/profile_sample.go b/vendor/github.com/getsentry/sentry-go/profile_sample.go
index a820fddb..65059872 100644
--- a/vendor/github.com/getsentry/sentry-go/profile_sample.go
+++ b/vendor/github.com/getsentry/sentry-go/profile_sample.go
@@ -28,7 +28,6 @@ type (
 
 	profileSample struct {
 		ElapsedSinceStartNS uint64 `json:"elapsed_since_start_ns"`
-		QueueAddress        string `json:"queue_address,omitempty"`
 		StackID             int    `json:"stack_id"`
 		ThreadID            uint64 `json:"thread_id"`
 	}
@@ -41,10 +40,10 @@ type (
 	profileStack []int
 
 	profileTrace struct {
-		Frames         []*Frame                         `json:"frames"`
-		Samples        []*profileSample                 `json:"samples"`
-		Stacks         []profileStack                   `json:"stacks"`
-		ThreadMetadata map[string]profileThreadMetadata `json:"thread_metadata"`
+		Frames         []*Frame                          `json:"frames"`
+		Samples        []profileSample                   `json:"samples"`
+		Stacks         []profileStack                    `json:"stacks"`
+		ThreadMetadata map[uint64]*profileThreadMetadata `json:"thread_metadata"`
 	}
 
 	profileInfo struct {
diff --git a/vendor/github.com/getsentry/sentry-go/profiler.go b/vendor/github.com/getsentry/sentry-go/profiler.go
index 6c77b4df..5517c11d 100644
--- a/vendor/github.com/getsentry/sentry-go/profiler.go
+++ b/vendor/github.com/getsentry/sentry-go/profiler.go
@@ -1,156 +1,277 @@
 package sentry
 
 import (
-	"runtime"
+	"container/ring"
 	"strconv"
+
+	"runtime"
+	"sync"
 	"sync/atomic"
 	"time"
 
 	"github.com/getsentry/sentry-go/internal/traceparser"
 )
 
-// Start collecting profile data and returns a function that stops profiling, producing a Trace.
-// The returned stop function May return nil or an incomplete trace in case of a panic.
-func startProfiling(startTime time.Time) (stopFunc func() *profilerResult) {
+// Start a profiler that collects samples continuously, with a buffer of up to 30 seconds.
+// Later, you can collect a slice from this buffer, producing a Trace.
+func startProfiling(startTime time.Time) profiler {
 	onProfilerStart()
 
-	// buffered channels to handle the recover() case without blocking
-	resultChannel := make(chan *profilerResult, 2)
-	stopSignal := make(chan struct{}, 2)
+	p := newProfiler(startTime)
 
-	go profilerGoroutine(startTime, resultChannel, stopSignal)
+	// Wait for the profiler to finish setting up before returning to the caller.
+	started := make(chan struct{})
+	go p.run(started)
 
-	var goID = getCurrentGoID()
+	if _, ok := <-started; ok {
+		return p
+	}
+	return nil
+}
 
-	return func() *profilerResult {
-		stopSignal <- struct{}{}
-		var result = <-resultChannel
-		if result != nil {
-			result.callerGoID = goID
+type profiler interface {
+	// GetSlice returns a slice of the profiled data between the given times.
+	GetSlice(startTime, endTime time.Time) *profilerResult
+	Stop(wait bool)
+}
+
+type profilerResult struct {
+	callerGoID uint64
+	trace      *profileTrace
+}
+
+func getCurrentGoID() uint64 {
+	// We shouldn't panic but let's be super safe.
+	defer func() {
+		if err := recover(); err != nil {
+			Logger.Printf("Profiler panic in getCurrentGoID(): %v\n", err)
 		}
-		return result
+	}()
+
+	// Buffer to read the stack trace into. We should be good with a small buffer because we only need the first line.
+	var stacksBuffer = make([]byte, 100)
+	var n = runtime.Stack(stacksBuffer, false)
+	if n > 0 {
+		var traces = traceparser.Parse(stacksBuffer[0:n])
+		if traces.Length() > 0 {
+			var trace = traces.Item(0)
+			return trace.GoID()
+		}
+	}
+	return 0
+}
+
+const profilerSamplingRateHz = 101 // 101 Hz; not 100 Hz because of the lockstep sampling (https://stackoverflow.com/a/45471031/1181370)
+const profilerSamplingRate = time.Second / profilerSamplingRateHz
+const stackBufferMaxGrowth = 512 * 1024
+const stackBufferLimit = 10 * 1024 * 1024
+const profilerRuntimeLimit = 30 // seconds
+
+type profileRecorder struct {
+	startTime         time.Time
+	stopSignal        chan struct{}
+	stopped           int64
+	mutex             sync.RWMutex
+	testProfilerPanic int64
+
+	// Map from runtime.StackRecord.Stack0 to an index in stacks.
+	stackIndexes   map[string]int
+	stacks         []profileStack
+	newStacks      []profileStack // New stacks created in the current interation.
+	stackKeyBuffer []byte
+
+	// Map from runtime.Frame.PC to an index in frames.
+	frameIndexes map[string]int
+	frames       []*Frame
+	newFrames    []*Frame // New frames created in the current interation.
+
+	// We keep a ring buffer of 30 seconds worth of samples, so that we can later slice it.
+	// Each bucket is a slice of samples all taken at the same time.
+	samplesBucketsHead *ring.Ring
+
+	// Buffer to read current stacks - will grow automatically up to stackBufferLimit.
+	stacksBuffer []byte
+}
+
+func newProfiler(startTime time.Time) *profileRecorder {
+	// Pre-allocate the profile trace for the currently active number of routines & 100 ms worth of samples.
+	// Other coefficients are just guesses of what might be a good starting point to avoid allocs on short runs.
+	return &profileRecorder{
+		startTime:  startTime,
+		stopSignal: make(chan struct{}, 1),
+
+		stackIndexes: make(map[string]int, 32),
+		stacks:       make([]profileStack, 0, 32),
+		newStacks:    make([]profileStack, 0, 32),
+
+		frameIndexes: make(map[string]int, 128),
+		frames:       make([]*Frame, 0, 128),
+		newFrames:    make([]*Frame, 0, 128),
+
+		samplesBucketsHead: ring.New(profilerRuntimeLimit * profilerSamplingRateHz),
+
+		// A buffer of 2 KiB per goroutine stack looks like a good starting point (empirically determined).
+		stacksBuffer: make([]byte, runtime.NumGoroutine()*2048),
 	}
 }
 
 // This allows us to test whether panic during profiling are handled correctly and don't block execution.
 // If the number is lower than 0, profilerGoroutine() will panic immedately.
-// If the number is higher than 0, profiler.onTick() will panic after the given number of samples collected.
+// If the number is higher than 0, profiler.onTick() will panic when the given samples-set index is being collected.
 var testProfilerPanic int64
+var profilerRunning int64
+
+func (p *profileRecorder) run(started chan struct{}) {
+	// Code backup for manual test debugging:
+	// if !atomic.CompareAndSwapInt64(&profilerRunning, 0, 1) {
+	// 	panic("Only one profiler can be running at a time")
+	// }
 
-func profilerGoroutine(startTime time.Time, result chan<- *profilerResult, stopSignal chan struct{}) {
 	// We shouldn't panic but let's be super safe.
 	defer func() {
-		_ = recover()
-
-		// Make sure we don't block the caller of stopFn() even if we panic.
-		result <- nil
-
+		if err := recover(); err != nil {
+			Logger.Printf("Profiler panic in run(): %v\n", err)
+		}
 		atomic.StoreInt64(&testProfilerPanic, 0)
+		close(started)
+		p.stopSignal <- struct{}{}
+		atomic.StoreInt64(&p.stopped, 1)
+		atomic.StoreInt64(&profilerRunning, 0)
 	}()
 
-	// Stop after 30 seconds unless stopped manually.
-	timeout := time.AfterFunc(30*time.Second, func() { stopSignal <- struct{}{} })
-	defer timeout.Stop()
-
-	var localTestProfilerPanic = atomic.LoadInt64(&testProfilerPanic)
-	if localTestProfilerPanic < 0 {
+	p.testProfilerPanic = atomic.LoadInt64(&testProfilerPanic)
+	if p.testProfilerPanic < 0 {
+		Logger.Printf("Profiler panicking during startup because testProfilerPanic == %v\n", p.testProfilerPanic)
 		panic("This is an expected panic in profilerGoroutine() during tests")
 	}
 
-	profiler := newProfiler(startTime)
-	profiler.testProfilerPanic = localTestProfilerPanic
-
 	// Collect the first sample immediately.
-	profiler.onTick()
+	p.onTick()
 
 	// Periodically collect stacks, starting after profilerSamplingRate has passed.
 	collectTicker := profilerTickerFactory(profilerSamplingRate)
 	defer collectTicker.Stop()
-	var tickerChannel = collectTicker.Channel()
+	var tickerChannel = collectTicker.TickSource()
 
-	defer func() {
-		result <- &profilerResult{0, profiler.trace}
-	}()
+	started <- struct{}{}
 
 	for {
 		select {
 		case <-tickerChannel:
-			profiler.onTick()
-		case <-stopSignal:
+			p.onTick()
+			collectTicker.Ticked()
+		case <-p.stopSignal:
 			return
 		}
 	}
 }
 
-type profilerResult struct {
-	callerGoID uint64
-	trace      *profileTrace
+func (p *profileRecorder) Stop(wait bool) {
+	if atomic.LoadInt64(&p.stopped) == 1 {
+		return
+	}
+	p.stopSignal <- struct{}{}
+	if wait {
+		<-p.stopSignal
+	}
 }
 
-func getCurrentGoID() uint64 {
-	// We shouldn't panic but let's be super safe.
-	defer func() {
-		_ = recover()
-	}()
+func (p *profileRecorder) GetSlice(startTime, endTime time.Time) *profilerResult {
+	// Unlikely edge cases - profiler wasn't running at all or the given times are invalid in relation to each other.
+	if p.startTime.After(endTime) || startTime.After(endTime) {
+		return nil
+	}
 
-	// Buffer to read the stack trace into. We should be good with a small buffer because we only need the first line.
-	var stacksBuffer = make([]byte, 100)
-	var n = runtime.Stack(stacksBuffer, false)
-	if n > 0 {
-		var traces = traceparser.Parse(stacksBuffer[0:n])
-		if traces.Length() > 0 {
-			var trace = traces.Item(0)
-			return trace.GoID()
-		}
+	var relativeStartNS = uint64(0)
+	if p.startTime.Before(startTime) {
+		relativeStartNS = uint64(startTime.Sub(p.startTime).Nanoseconds())
 	}
-	return 0
-}
+	var relativeEndNS = uint64(endTime.Sub(p.startTime).Nanoseconds())
 
-func newProfiler(startTime time.Time) *profileRecorder {
-	// Pre-allocate the profile trace for the currently active number of routines & 100 ms worth of samples.
-	// Other coefficients are just guesses of what might be a good starting point to avoid allocs on short runs.
-	numRoutines := runtime.NumGoroutine()
-	trace := &profileTrace{
-		Frames:         make([]*Frame, 0, 32),
-		Samples:        make([]*profileSample, 0, numRoutines*10), // 100 ms @ 101 Hz
-		Stacks:         make([]profileStack, 0, 8),
-		ThreadMetadata: make(map[string]profileThreadMetadata, numRoutines),
+	samplesCount, bucketsReversed, trace := p.getBuckets(relativeStartNS, relativeEndNS)
+	if samplesCount == 0 {
+		return nil
 	}
 
-	return &profileRecorder{
-		startTime:    startTime,
-		trace:        trace,
-		stackIndexes: make(map[string]int, cap(trace.Stacks)),
-		frameIndexes: make(map[string]int, cap(trace.Frames)),
-		// A buffer of 2 KiB per stack looks like a good starting point (empirically determined).
-		stacksBuffer: make([]byte, numRoutines*2048),
+	var result = &profilerResult{
+		callerGoID: getCurrentGoID(),
+		trace:      trace,
+	}
+
+	trace.Samples = make([]profileSample, samplesCount)
+	trace.ThreadMetadata = make(map[uint64]*profileThreadMetadata, len(bucketsReversed[0].goIDs))
+	var s = samplesCount - 1
+	for _, bucket := range bucketsReversed {
+		var elapsedSinceStartNS = bucket.relativeTimeNS - relativeStartNS
+		for i, goID := range bucket.goIDs {
+			trace.Samples[s].ElapsedSinceStartNS = elapsedSinceStartNS
+			trace.Samples[s].ThreadID = goID
+			trace.Samples[s].StackID = bucket.stackIDs[i]
+			s--
+
+			if _, goroutineExists := trace.ThreadMetadata[goID]; !goroutineExists {
+				trace.ThreadMetadata[goID] = &profileThreadMetadata{
+					Name: "Goroutine " + strconv.FormatUint(goID, 10),
+				}
+			}
+		}
 	}
+
+	return result
 }
 
-const profilerSamplingRate = time.Second / 101 // 101 Hz; not 100 Hz because of the lockstep sampling (https://stackoverflow.com/a/45471031/1181370)
-const stackBufferMaxGrowth = 512 * 1024
-const stackBufferLimit = 10 * 1024 * 1024
+// Collect all buckets of samples in the given time range while holding a read lock.
+func (p *profileRecorder) getBuckets(relativeStartNS, relativeEndNS uint64) (samplesCount int, buckets []*profileSamplesBucket, trace *profileTrace) {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
 
-type profileRecorder struct {
-	startTime         time.Time
-	trace             *profileTrace
-	testProfilerPanic int64
+	// sampleBucketsHead points at the last stored bucket so it's a good starting point to search backwards for the end.
+	var end = p.samplesBucketsHead
+	for end.Value != nil && end.Value.(*profileSamplesBucket).relativeTimeNS > relativeEndNS {
+		end = end.Prev()
+	}
 
-	// Buffer to read current stacks - will grow automatically up to stackBufferLimit.
-	stacksBuffer []byte
+	// Edge case - no items stored before the given endTime.
+	if end.Value == nil {
+		return 0, nil, nil
+	}
 
-	// Map from runtime.StackRecord.Stack0 to an index trace.Stacks.
-	stackIndexes map[string]int
+	// Search for the first item after the given startTime.
+	var start = end
+	samplesCount = 0
+	buckets = make([]*profileSamplesBucket, 0, int64((relativeEndNS-relativeStartNS)/uint64(profilerSamplingRate.Nanoseconds()))+1)
+	for start.Value != nil {
+		var bucket = start.Value.(*profileSamplesBucket)
+		if bucket.relativeTimeNS < relativeStartNS {
+			break
+		}
+		samplesCount += len(bucket.goIDs)
+		buckets = append(buckets, bucket)
+		start = start.Prev()
+	}
 
-	// Map from runtime.Frame.PC to an index trace.Frames.
-	frameIndexes map[string]int
+	// Edge case - if the period requested was too short and we haven't collected enough samples.
+	if len(buckets) < 2 {
+		return 0, nil, nil
+	}
+
+	trace = &profileTrace{
+		Frames: p.frames,
+		Stacks: p.stacks,
+	}
+	return samplesCount, buckets, trace
 }
 
 func (p *profileRecorder) onTick() {
 	elapsedNs := time.Since(p.startTime).Nanoseconds()
 
-	if p.testProfilerPanic > 0 && int64(len(p.trace.Samples)) > p.testProfilerPanic {
-		panic("This is an expected panic in Profiler.OnTick() during tests")
+	if p.testProfilerPanic > 0 {
+		Logger.Printf("Profiler testProfilerPanic == %v\n", p.testProfilerPanic)
+		if p.testProfilerPanic == 1 {
+			Logger.Println("Profiler panicking onTick()")
+			panic("This is an expected panic in Profiler.OnTick() during tests")
+		}
+		p.testProfilerPanic--
 	}
 
 	records := p.collectRecords()
@@ -187,51 +308,76 @@ func (p *profileRecorder) collectRecords() []byte {
 
 func (p *profileRecorder) processRecords(elapsedNs uint64, stacksBuffer []byte) {
 	var traces = traceparser.Parse(stacksBuffer)
-	for i := traces.Length() - 1; i >= 0; i-- {
-		var stack = traces.Item(i)
-		threadIndex := p.addThread(stack.GoID())
-		stackIndex := p.addStackTrace(stack)
-		if stackIndex < 0 {
-			return
-		}
+	var length = traces.Length()
 
-		p.trace.Samples = append(p.trace.Samples, &profileSample{
-			ElapsedSinceStartNS: elapsedNs,
-			StackID:             stackIndex,
-			ThreadID:            threadIndex,
-		})
+	// Shouldn't happen but let's be safe and don't store empty buckets.
+	if length == 0 {
+		return
 	}
-}
 
-func (p *profileRecorder) addThread(id uint64) uint64 {
-	index := strconv.FormatUint(id, 10)
-	if _, exists := p.trace.ThreadMetadata[index]; !exists {
-		p.trace.ThreadMetadata[index] = profileThreadMetadata{
-			Name: "Goroutine " + index,
-		}
+	var bucket = &profileSamplesBucket{
+		relativeTimeNS: elapsedNs,
+		stackIDs:       make([]int, length),
+		goIDs:          make([]uint64, length),
+	}
+
+	// reset buffers
+	p.newFrames = p.newFrames[:0]
+	p.newStacks = p.newStacks[:0]
+
+	for i := 0; i < length; i++ {
+		var stack = traces.Item(i)
+		bucket.stackIDs[i] = p.addStackTrace(stack)
+		bucket.goIDs[i] = stack.GoID()
 	}
-	return id
+
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	p.stacks = append(p.stacks, p.newStacks...)
+	p.frames = append(p.frames, p.newFrames...)
+
+	p.samplesBucketsHead = p.samplesBucketsHead.Next()
+	p.samplesBucketsHead.Value = bucket
 }
 
 func (p *profileRecorder) addStackTrace(capturedStack traceparser.Trace) int {
-	// NOTE: Don't convert to string yet, it's expensive and compiler can avoid it when
-	//       indexing into a map (only needs a copy when adding a new key to the map).
-	var key = capturedStack.UniqueIdentifier()
+	iter := capturedStack.Frames()
+	stack := make(profileStack, 0, iter.LengthUpperBound())
+
+	// Originally, we've used `capturedStack.UniqueIdentifier()` as a key but that was incorrect because it also
+	// contains function arguments and we want to group stacks by function name and file/line only.
+	// Instead, we need to parse frames and we use a list of their indexes as a key.
+	// We reuse the same buffer for each stack to avoid allocations; this is a hot spot.
+	var expectedBufferLen = cap(stack) * 5 // 4 bytes per frame + 1 byte for space
+	if cap(p.stackKeyBuffer) < expectedBufferLen {
+		p.stackKeyBuffer = make([]byte, 0, expectedBufferLen)
+	} else {
+		p.stackKeyBuffer = p.stackKeyBuffer[:0]
+	}
 
-	stackIndex, exists := p.stackIndexes[string(key)]
-	if !exists {
-		iter := capturedStack.Frames()
-		stack := make(profileStack, 0, iter.LengthUpperBound())
-		for iter.HasNext() {
-			var frame = iter.Next()
+	for iter.HasNext() {
+		var frame = iter.Next()
+		if frameIndex := p.addFrame(frame); frameIndex >= 0 {
+			stack = append(stack, frameIndex)
+
+			p.stackKeyBuffer = append(p.stackKeyBuffer, 0) // space
 
-			if frameIndex := p.addFrame(frame); frameIndex >= 0 {
-				stack = append(stack, frameIndex)
+			// The following code is just like binary.AppendUvarint() which isn't yet available in Go 1.18.
+			x := uint64(frameIndex) + 1
+			for x >= 0x80 {
+				p.stackKeyBuffer = append(p.stackKeyBuffer, byte(x)|0x80)
+				x >>= 7
 			}
+			p.stackKeyBuffer = append(p.stackKeyBuffer, byte(x))
 		}
-		stackIndex = len(p.trace.Stacks)
-		p.trace.Stacks = append(p.trace.Stacks, stack)
-		p.stackIndexes[string(key)] = stackIndex
+	}
+
+	stackIndex, exists := p.stackIndexes[string(p.stackKeyBuffer)]
+	if !exists {
+		stackIndex = len(p.stacks) + len(p.newStacks)
+		p.newStacks = append(p.newStacks, stack)
+		p.stackIndexes[string(p.stackKeyBuffer)] = stackIndex
 	}
 
 	return stackIndex
@@ -247,27 +393,41 @@ func (p *profileRecorder) addFrame(capturedFrame traceparser.Frame) int {
 		module, function := splitQualifiedFunctionName(string(capturedFrame.Func()))
 		file, line := capturedFrame.File()
 		frame := newFrame(module, function, string(file), line)
-		frameIndex = len(p.trace.Frames)
-		p.trace.Frames = append(p.trace.Frames, &frame)
+		frameIndex = len(p.frames) + len(p.newFrames)
+		p.newFrames = append(p.newFrames, &frame)
 		p.frameIndexes[string(key)] = frameIndex
 	}
 	return frameIndex
 }
 
+type profileSamplesBucket struct {
+	relativeTimeNS uint64
+	stackIDs       []int
+	goIDs          []uint64
+}
+
 // A Ticker holds a channel that delivers “ticks” of a clock at intervals.
 type profilerTicker interface {
+	// Stop turns off a ticker. After Stop, no more ticks will be sent.
 	Stop()
-	Channel() <-chan time.Time
+
+	// TickSource returns a read-only channel of ticks.
+	TickSource() <-chan time.Time
+
+	// Ticked is called by the Profiler after a tick is processed to notify the ticker. Used for testing.
+	Ticked()
 }
 
 type timeTicker struct {
 	*time.Ticker
 }
 
-func (t *timeTicker) Channel() <-chan time.Time {
+func (t *timeTicker) TickSource() <-chan time.Time {
 	return t.C
 }
 
+func (t *timeTicker) Ticked() {}
+
 func profilerTickerFactoryDefault(d time.Duration) profilerTicker {
 	return &timeTicker{time.NewTicker(d)}
 }
diff --git a/vendor/github.com/getsentry/sentry-go/profiler_windows.go b/vendor/github.com/getsentry/sentry-go/profiler_windows.go
index fccb425d..33279824 100644
--- a/vendor/github.com/getsentry/sentry-go/profiler_windows.go
+++ b/vendor/github.com/getsentry/sentry-go/profiler_windows.go
@@ -17,8 +17,8 @@ func setTimeTickerResolution() {
 	}
 }
 
-var runOnce sync.Once
+var setupTickerResolutionOnce sync.Once
 
 func onProfilerStart() {
-	runOnce.Do(setTimeTickerResolution)
+	setupTickerResolutionOnce.Do(setTimeTickerResolution)
 }
diff --git a/vendor/github.com/getsentry/sentry-go/sentry.go b/vendor/github.com/getsentry/sentry-go/sentry.go
index f5b3a6d1..91adce8c 100644
--- a/vendor/github.com/getsentry/sentry-go/sentry.go
+++ b/vendor/github.com/getsentry/sentry-go/sentry.go
@@ -5,22 +5,21 @@ import (
 	"time"
 )
 
-// Deprecated: Use SDKVersion instead.
+// The version of the SDK.
+// Deprecated: To be removed in 0.25.0. Use SDKVersion instead.
 const Version = SDKVersion
 
-// Version is the version of the SDK.
-const SDKVersion = "0.23.0"
+// The version of the SDK.
+const SDKVersion = "0.24.0"
 
 // The identifier of the SDK.
+// Deprecated: To be removed in 0.25.0. Use Client.GetSDKIdentifier() instead.
 const SDKIdentifier = "sentry.go"
 
 // apiVersion is the minimum version of the Sentry API compatible with the
 // sentry-go SDK.
 const apiVersion = "7"
 
-// userAgent is the User-Agent of outgoing HTTP requests.
-const userAgent = "sentry-go/" + SDKVersion
-
 // Init initializes the SDK with options. The returned error is non-nil if
 // options is invalid, for instance if a malformed DSN is provided.
 func Init(options ClientOptions) error {
diff --git a/vendor/github.com/getsentry/sentry-go/traces_profiler.go b/vendor/github.com/getsentry/sentry-go/traces_profiler.go
index a983262c..2655fe84 100644
--- a/vendor/github.com/getsentry/sentry-go/traces_profiler.go
+++ b/vendor/github.com/getsentry/sentry-go/traces_profiler.go
@@ -1,31 +1,41 @@
 package sentry
 
+import (
+	"sync"
+	"time"
+)
+
 // Checks whether the transaction should be profiled (according to ProfilesSampleRate)
 // and starts a profiler if so.
 func (span *Span) sampleTransactionProfile() {
 	var sampleRate = span.clientOptions().ProfilesSampleRate
 	switch {
 	case sampleRate < 0.0 || sampleRate > 1.0:
-		Logger.Printf("Skipping transaction profiling: ProfilesSampleRate out of range [0.0, 1.0]: %f", sampleRate)
+		Logger.Printf("Skipping transaction profiling: ProfilesSampleRate out of range [0.0, 1.0]: %f\n", sampleRate)
 	case sampleRate == 0.0 || rng.Float64() >= sampleRate:
-		Logger.Printf("Skipping transaction profiling: ProfilesSampleRate is: %f", sampleRate)
+		Logger.Printf("Skipping transaction profiling: ProfilesSampleRate is: %f\n", sampleRate)
 	default:
-		span.profiler = &_transactionProfiler{
-			stopFunc: startProfiling(span.StartTime),
+		startProfilerOnce.Do(startGlobalProfiler)
+		if globalProfiler == nil {
+			Logger.Println("Skipping transaction profiling: the profiler couldn't be started")
+		} else {
+			span.collectProfile = collectTransactionProfile
 		}
 	}
 }
 
-type transactionProfiler interface {
-	Finish(span *Span) *profileInfo
-}
+// transactionProfiler collects a profile for a given span.
+type transactionProfiler func(span *Span) *profileInfo
+
+var startProfilerOnce sync.Once
+var globalProfiler profiler
 
-type _transactionProfiler struct {
-	stopFunc func() *profilerResult
+func startGlobalProfiler() {
+	globalProfiler = startProfiling(time.Now())
 }
 
-func (tp *_transactionProfiler) Finish(span *Span) *profileInfo {
-	result := tp.stopFunc()
+func collectTransactionProfile(span *Span) *profileInfo {
+	result := globalProfiler.GetSlice(span.StartTime, span.EndTime)
 	if result == nil || result.trace == nil {
 		return nil
 	}
diff --git a/vendor/github.com/getsentry/sentry-go/tracing.go b/vendor/github.com/getsentry/sentry-go/tracing.go
index 38b810c0..9b15b510 100644
--- a/vendor/github.com/getsentry/sentry-go/tracing.go
+++ b/vendor/github.com/getsentry/sentry-go/tracing.go
@@ -58,8 +58,8 @@ type Span struct { //nolint: maligned // prefer readability over optimal memory
 	recorder *spanRecorder
 	// span context, can only be set on transactions
 	contexts map[string]Context
-	// profiler instance if attached, nil otherwise.
-	profiler transactionProfiler
+	// collectProfile is a function that collects a profile of the current transaction. May be nil.
+	collectProfile transactionProfiler
 	// a Once instance to make sure that Finish() is only called once.
 	finishOnce sync.Once
 }
@@ -333,12 +333,6 @@ func (s *Span) SetDynamicSamplingContext(dsc DynamicSamplingContext) {
 
 // doFinish runs the actual Span.Finish() logic.
 func (s *Span) doFinish() {
-	// For the timing to be correct, the profiler must be stopped before s.EndTime.
-	var profile *profileInfo
-	if s.profiler != nil {
-		profile = s.profiler.Finish(s)
-	}
-
 	if s.EndTime.IsZero() {
 		s.EndTime = monotonicTimeSince(s.StartTime)
 	}
@@ -351,7 +345,9 @@ func (s *Span) doFinish() {
 		return
 	}
 
-	event.sdkMetaData.transactionProfile = profile
+	if s.collectProfile != nil {
+		event.sdkMetaData.transactionProfile = s.collectProfile(s)
+	}
 
 	// TODO(tracing): add breadcrumbs
 	// (see https://github.com/getsentry/sentry-python/blob/f6f3525f8812f609/sentry_sdk/tracing.py#L372)
diff --git a/vendor/github.com/getsentry/sentry-go/transport.go b/vendor/github.com/getsentry/sentry-go/transport.go
index b1be4421..3eee4584 100644
--- a/vendor/github.com/getsentry/sentry-go/transport.go
+++ b/vendor/github.com/getsentry/sentry-go/transport.go
@@ -209,7 +209,20 @@ func envelopeFromBody(event *Event, dsn *Dsn, sentAt time.Time, body json.RawMes
 func getRequestFromEvent(event *Event, dsn *Dsn) (r *http.Request, err error) {
 	defer func() {
 		if r != nil {
-			r.Header.Set("User-Agent", userAgent)
+			r.Header.Set("User-Agent", fmt.Sprintf("%s/%s", event.Sdk.Name, event.Sdk.Version))
+			r.Header.Set("Content-Type", "application/x-sentry-envelope")
+
+			auth := fmt.Sprintf("Sentry sentry_version=%s, "+
+				"sentry_client=%s/%s, sentry_key=%s", apiVersion, event.Sdk.Name, event.Sdk.Version, dsn.publicKey)
+
+			// The key sentry_secret is effectively deprecated and no longer needs to be set.
+			// However, since it was required in older self-hosted versions,
+			// it should still passed through to Sentry if set.
+			if dsn.secretKey != "" {
+				auth = fmt.Sprintf("%s, sentry_secret=%s", auth, dsn.secretKey)
+			}
+
+			r.Header.Set("X-Sentry-Auth", auth)
 		}
 	}()
 	body := getRequestBodyFromEvent(event)
@@ -348,10 +361,6 @@ func (t *HTTPTransport) SendEvent(event *Event) {
 		return
 	}
 
-	for headerKey, headerValue := range t.dsn.RequestHeaders() {
-		request.Header.Set(headerKey, headerValue)
-	}
-
 	// <-t.buffer is equivalent to acquiring a lock to access the current batch.
 	// A few lines below, t.buffer <- b releases the lock.
 	//
@@ -573,10 +582,6 @@ func (t *HTTPSyncTransport) SendEvent(event *Event) {
 		return
 	}
 
-	for headerKey, headerValue := range t.dsn.RequestHeaders() {
-		request.Header.Set(headerKey, headerValue)
-	}
-
 	var eventType string
 	if event.Type == transactionType {
 		eventType = "transaction"
diff --git a/vendor/github.com/getsentry/sentry-go/util.go b/vendor/github.com/getsentry/sentry-go/util.go
index e5717c63..47f81485 100644
--- a/vendor/github.com/getsentry/sentry-go/util.go
+++ b/vendor/github.com/getsentry/sentry-go/util.go
@@ -6,6 +6,7 @@ import (
 	"encoding/json"
 	"fmt"
 	"os"
+	"runtime/debug"
 	"strings"
 	"time"
 
@@ -66,6 +67,13 @@ func defaultRelease() (release string) {
 		}
 	}
 
+	if info, ok := debug.ReadBuildInfo(); ok {
+		buildInfoVcsRevision := revisionFromBuildInfo(info)
+		if len(buildInfoVcsRevision) > 0 {
+			return buildInfoVcsRevision
+		}
+	}
+
 	// Derive a version string from Git. Example outputs:
 	// 	v1.0.1-0-g9de4
 	// 	v2.0-8-g77df-dirty
@@ -89,3 +97,14 @@ func defaultRelease() (release string) {
 	Logger.Printf("Using release from Git: %s", release)
 	return release
 }
+
+func revisionFromBuildInfo(info *debug.BuildInfo) string {
+	for _, setting := range info.Settings {
+		if setting.Key == "vcs.revision" && setting.Value != "" {
+			Logger.Printf("Using release from debug info: %s", setting.Value)
+			return setting.Value
+		}
+	}
+
+	return ""
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index a4410afa..043f17f3 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -7,7 +7,7 @@ github.com/cespare/xxhash/v2
 # github.com/davecgh/go-spew v1.1.1
 ## explicit
 github.com/davecgh/go-spew/spew
-# github.com/getsentry/sentry-go v0.23.0
+# github.com/getsentry/sentry-go v0.24.0
 ## explicit; go 1.18
 github.com/getsentry/sentry-go
 github.com/getsentry/sentry-go/internal/debug