go.mod: update sentry

Update sentry dep and code that was required.
This commit is contained in:
Lukas Zapletal 2025-07-08 15:35:59 +02:00 committed by Tomáš Hozza
parent d7686244cd
commit 15ec2fc431
46 changed files with 2472 additions and 1832 deletions

View file

@ -108,7 +108,7 @@ func main() {
panic(err)
}
sentryhook := sentrylogrus.NewFromClient([]logrus.Level{logrus.PanicLevel,
sentryhook := sentrylogrus.NewEventHookFromClient([]logrus.Level{logrus.PanicLevel,
logrus.FatalLevel, logrus.ErrorLevel},
sentry.CurrentHub().Client())
logrus.AddHook(sentryhook)

4
go.mod
View file

@ -27,7 +27,9 @@ require (
github.com/aws/smithy-go v1.22.2
github.com/coreos/go-systemd/v22 v22.5.0
github.com/getkin/kin-openapi v0.131.0
github.com/getsentry/sentry-go v0.28.1
github.com/getsentry/sentry-go v0.34.1
github.com/getsentry/sentry-go/echo v0.34.1
github.com/getsentry/sentry-go/logrus v0.34.1
github.com/gobwas/glob v0.2.3
github.com/golang-jwt/jwt/v4 v4.5.2
github.com/google/go-cmp v0.7.0

10
go.sum
View file

@ -256,8 +256,12 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/getkin/kin-openapi v0.131.0 h1:NO2UeHnFKRYhZ8wg6Nyh5Cq7dHk4suQQr72a4pMrDxE=
github.com/getkin/kin-openapi v0.131.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58=
github.com/getsentry/sentry-go v0.28.1 h1:zzaSm/vHmGllRM6Tpx1492r0YDzauArdBfkJRtY6P5k=
github.com/getsentry/sentry-go v0.28.1/go.mod h1:1fQZ+7l7eeJ3wYi82q5Hg8GqAPgefRq+FP/QhafYVgg=
github.com/getsentry/sentry-go v0.34.1 h1:HSjc1C/OsnZttohEPrrqKH42Iud0HuLCXpv8cU1pWcw=
github.com/getsentry/sentry-go v0.34.1/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE=
github.com/getsentry/sentry-go/echo v0.34.1 h1:QmRs8A6SK7YYbc6Dtuyh2RTFd4Fe9v9VH8Ty4h8wC8s=
github.com/getsentry/sentry-go/echo v0.34.1/go.mod h1:4kdQH/69jXiWE7Ve5nwkWa9U4A38FK/Eu/zSQ4tcaHc=
github.com/getsentry/sentry-go/logrus v0.34.1 h1:uHBGJDaOZMnSUtXqM0B+tZTJW3xbMEHG/Wq1Z0FMd0M=
github.com/getsentry/sentry-go/logrus v0.34.1/go.mod h1:ZwweyplbBHvInrnCVv9Gjly8tfGRx6vly1f1YTAzffU=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
@ -751,6 +755,8 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=

View file

@ -11,3 +11,5 @@ coverage:
default:
# Do not fail the commit status if the coverage was reduced up to this value
threshold: 0.5%
ignore:
- "log_fallback.go"

View file

@ -8,6 +8,33 @@ targets:
- name: github
tagPrefix: otel/v
tagOnly: true
- name: github
tagPrefix: echo/v
tagOnly: true
- name: github
tagPrefix: fasthttp/v
tagOnly: true
- name: github
tagPrefix: fiber/v
tagOnly: true
- name: github
tagPrefix: gin/v
tagOnly: true
- name: github
tagPrefix: iris/v
tagOnly: true
- name: github
tagPrefix: negroni/v
tagOnly: true
- name: github
tagPrefix: logrus/v
tagOnly: true
- name: github
tagPrefix: slog/v
tagOnly: true
- name: github
tagPrefix: zerolog/v
tagOnly: true
- name: registry
sdks:
github:getsentry/sentry-go:

View file

@ -1,20 +1,17 @@
version: "2"
linters:
disable-all: true
default: none
enable:
- bodyclose
- dogsled
- dupl
- errcheck
- exportloopref
- gochecknoinits
- goconst
- gocritic
- gocyclo
- godot
- gofmt
- goimports
- gosec
- gosimple
- govet
- ineffassign
- misspell
@ -22,25 +19,44 @@ linters:
- prealloc
- revive
- staticcheck
- typecheck
- unconvert
- unparam
- unused
- whitespace
issues:
exclude-rules:
- path: _test\.go
linters:
- goconst
- prealloc
- path: _test\.go
text: "G306:"
linters:
- gosec
- path: errors_test\.go
linters:
- unused
- path: http/example_test\.go
linters:
- errcheck
- bodyclose
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
rules:
- linters:
- goconst
- prealloc
path: _test\.go
- linters:
- gosec
path: _test\.go
text: 'G306:'
- linters:
- unused
path: errors_test\.go
- linters:
- bodyclose
- errcheck
path: http/example_test\.go
paths:
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofmt
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

View file

@ -1,5 +1,307 @@
# Changelog
## 0.34.1
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.34.1.
### Bug Fixes
- Allow flush to be used multiple times without issues, particularly for the batch logger ([#1051](https://github.com/getsentry/sentry-go/pull/1051))
- Fix race condition in `Scope.GetSpan()` method by adding proper mutex locking ([#1044](https://github.com/getsentry/sentry-go/pull/1044))
- Guard transport on `Close()` to prevent panic when called multiple times ([#1044](https://github.com/getsentry/sentry-go/pull/1044))
## 0.34.0
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.34.0.
### Breaking Changes
- Logrus structured logging support replaces the `sentrylogrus.Hook` signature from a `*Hook` to an interface.
```go
var hook *sentrylogrus.Hook
hook = sentrylogrus.New(
// ... your setup
)
// should change the definition to
var hook sentrylogrus.Hook
hook = sentrylogrus.New(
// ... your setup
)
```
### Features
- Structured logging support for [slog](https://pkg.go.dev/log/slog). ([#1033](https://github.com/getsentry/sentry-go/pull/1033))
```go
ctx := context.Background()
handler := sentryslog.Option{
EventLevel: []slog.Level{slog.LevelError, sentryslog.LevelFatal}, // Only Error and Fatal as events
LogLevel: []slog.Level{slog.LevelWarn, slog.LevelInfo}, // Only Warn and Info as logs
}.NewSentryHandler(ctx)
logger := slog.New(handler)
logger.Info("hello"))
```
- Structured logging support for [logrus](https://github.com/sirupsen/logrus). ([#1036](https://github.com/getsentry/sentry-go/pull/1036))
```go
logHook, _ := sentrylogrus.NewLogHook(
[]logrus.Level{logrus.InfoLevel, logrus.WarnLevel},
sentry.ClientOptions{
Dsn: "your-dsn",
EnableLogs: true, // Required for log entries
})
defer logHook.Flush(5 * time.Secod)
logrus.RegisterExitHandler(func() {
logHook.Flush(5 * time.Second)
})
logger := logrus.New()
logger.AddHook(logHook)
logger.Infof("hello")
```
- Add support for flushing events with context using `FlushWithContext()`. ([#935](https://github.com/getsentry/sentry-go/pull/935))
```go
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if !sentry.FlushWithContext(ctx) {
// Handle timeout or cancellation
}
```
- Add support for custom fingerprints in slog integration. ([#1039](https://github.com/getsentry/sentry-go/pull/1039))
### Deprecations
- Slog structured logging support replaces `Level` option with `EventLevel` and `LogLevel` options, for specifying fine-grained levels for capturing events and logs.
```go
handler := sentryslog.Option{
EventLevel: []slog.Level{slog.LevelWarn, slog.LevelError, sentryslog.LevelFatal},
LogLevel: []slog.Level{slog.LevelDebug, slog.LevelInfo, slog.LevelWarn, slog.LevelError, sentryslog.LevelFatal},
}.NewSentryHandler(ctx)
```
- Logrus structured logging support replaces `New` and `NewFromClient` functions to `NewEventHook`, `NewEventHookFromClient`, to match the newly added `NewLogHook` functions, and specify the hook type being created each time.
```go
logHook, err := sentrylogrus.NewLogHook(
[]logrus.Level{logrus.InfoLevel},
sentry.ClientOptions{})
eventHook, err := sentrylogrus.NewEventHook([]logrus.Level{
logrus.ErrorLevel,
logrus.FatalLevel,
logrus.PanicLevel,
}, sentry.ClientOptions{})
```
### Bug Fixes
- Fix issue where `ContinueTrace()` would panic when `sentry-trace` header does not exist. ([#1026](https://github.com/getsentry/sentry-go/pull/1026))
- Fix incorrect log level signature in structured logging. ([#1034](https://github.com/getsentry/sentry-go/pull/1034))
- Remove `sentry.origin` attribute from Sentry logger to prevent confusion in spans. ([#1038](https://github.com/getsentry/sentry-go/pull/1038))
- Don't gate user information behind `SendDefaultPII` flag for logs. ([#1032](https://github.com/getsentry/sentry-go/pull/1032))
### Misc
- Add more sensitive HTTP headers to the default list of headers that are scrubbed by default. ([#1008](https://github.com/getsentry/sentry-go/pull/1008))
## 0.33.0
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.33.0.
### Breaking Changes
- Rename the internal `Logger` to `DebugLogger`. This feature was only used when you set `Debug: True` in your `sentry.Init()` call. If you haven't used the Logger directly, no changes are necessary. ([#1012](https://github.com/getsentry/sentry-go/issues/1012))
### Features
- Add support for [Structured Logging](https://docs.sentry.io/product/explore/logs/). ([#1010](https://github.com/getsentry/sentry-go/issues/1010))
```go
logger := sentry.NewLogger(ctx)
logger.Info(ctx, "Hello, Logs!")
```
You can learn more about Sentry Logs on our [docs](https://docs.sentry.io/product/explore/logs/) and the [examples](https://github.com/getsentry/sentry-go/blob/master/_examples/logs/main.go).
- Add new attributes APIs, which are currently only exposed on logs. ([#1007](https://github.com/getsentry/sentry-go/issues/1007))
### Bug Fixes
- Do not push a new scope on `StartSpan`. ([#1013](https://github.com/getsentry/sentry-go/issues/1013))
- Fix an issue where the propagated smapling decision wasn't used. ([#995](https://github.com/getsentry/sentry-go/issues/995))
- [Otel] Prefer `httpRoute` over `httpTarget` for span descriptions. ([#1002](https://github.com/getsentry/sentry-go/issues/1002))
### Misc
- Update `github.com/stretchr/testify` to v1.8.4. ([#988](https://github.com/getsentry/sentry-go/issues/988))
## 0.32.0
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.32.0.
### Breaking Changes
- Bump the minimum Go version to 1.22. The supported versions are 1.22, 1.23 and 1.24. ([#967](https://github.com/getsentry/sentry-go/issues/967))
- Setting any values on `span.Extra` has no effect anymore. Use `SetData(name string, value interface{})` instead. ([#864](https://github.com/getsentry/sentry-go/pull/864))
### Features
- Add a `MockTransport` and `MockScope`. ([#972](https://github.com/getsentry/sentry-go/pull/972))
### Bug Fixes
- Fix writing `*http.Request` in the Logrus JSONFormatter. ([#955](https://github.com/getsentry/sentry-go/issues/955))
### Misc
- Transaction `data` attributes are now seralized as trace context data attributes, allowing you to query these attributes in the [Trace Explorer](https://docs.sentry.io/product/explore/traces/).
## 0.31.1
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.31.1.
### Bug Fixes
- Correct wrong module name for `sentry-go/logrus` ([#950](https://github.com/getsentry/sentry-go/pull/950))
## 0.31.0
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.31.0.
### Breaking Changes
- Remove support for metrics. Read more about the end of the Metrics beta [here](https://sentry.zendesk.com/hc/en-us/articles/26369339769883-Metrics-Beta-Ended-on-October-7th). ([#914](https://github.com/getsentry/sentry-go/pull/914))
- Remove support for profiling. ([#915](https://github.com/getsentry/sentry-go/pull/915))
- Remove `Segment` field from the `User` struct. This field is no longer used in the Sentry product. ([#928](https://github.com/getsentry/sentry-go/pull/928))
- Every integration is now a separate module, reducing the binary size and number of dependencies. Once you update `sentry-go` to latest version, you'll need to `go get` the integration you want to use. For example, if you want to use the `echo` integration, you'll need to run `go get github.com/getsentry/sentry-go/echo` ([#919](github.com/getsentry/sentry-go/pull/919)).
### Features
Add the ability to override `hub` in `context` for integrations that use custom context. ([#931](https://github.com/getsentry/sentry-go/pull/931))
- Add `HubProvider` Hook for `sentrylogrus`, enabling dynamic Sentry hub allocation for each log entry or goroutine. ([#936](https://github.com/getsentry/sentry-go/pull/936))
This change enhances compatibility with Sentry's recommendation of using separate hubs per goroutine. To ensure a separate Sentry hub for each goroutine, configure the `HubProvider` like this:
```go
hook, err := sentrylogrus.New(nil, sentry.ClientOptions{})
if err != nil {
log.Fatalf("Failed to initialize Sentry hook: %v", err)
}
// Set a custom HubProvider to generate a new hub for each goroutine or log entry
hook.SetHubProvider(func() *sentry.Hub {
client, _ := sentry.NewClient(sentry.ClientOptions{})
return sentry.NewHub(client, sentry.NewScope())
})
logrus.AddHook(hook)
```
### Bug Fixes
- Add support for closing worker goroutines started by the `HTTPTranport` to prevent goroutine leaks. ([#894](https://github.com/getsentry/sentry-go/pull/894))
```go
client, _ := sentry.NewClient()
defer client.Close()
```
Worker can be also closed by calling `Close()` method on the `HTTPTransport` instance. `Close` should be called after `Flush` and before terminating the program otherwise some events may be lost.
```go
transport := sentry.NewHTTPTransport()
defer transport.Close()
```
### Misc
- Bump [gin-gonic/gin](https://github.com/gin-gonic/gin) to v1.9.1. ([#946](https://github.com/getsentry/sentry-go/pull/946))
## 0.30.0
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.30.0.
### Features
- Add `sentryzerolog` integration ([#857](https://github.com/getsentry/sentry-go/pull/857))
- Add `sentryslog` integration ([#865](https://github.com/getsentry/sentry-go/pull/865))
- Always set Mechanism Type to generic ([#896](https://github.com/getsentry/sentry-go/pull/897))
### Bug Fixes
- Prevent panic in `fasthttp` and `fiber` integration in case a malformed URL has to be parsed ([#912](https://github.com/getsentry/sentry-go/pull/912))
### Misc
Drop support for Go 1.18, 1.19 and 1.20. The currently supported Go versions are the last 3 stable releases: 1.23, 1.22 and 1.21.
## 0.29.1
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.29.1.
### Bug Fixes
- Correlate errors to the current trace ([#886](https://github.com/getsentry/sentry-go/pull/886))
- Set the trace context when the transaction finishes ([#888](https://github.com/getsentry/sentry-go/pull/888))
### Misc
- Update the `sentrynegroni` integration to use the latest (v3.1.1) version of Negroni ([#885](https://github.com/getsentry/sentry-go/pull/885))
## 0.29.0
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.29.0.
### Breaking Changes
- Remove the `sentrymartini` integration ([#861](https://github.com/getsentry/sentry-go/pull/861))
- The `WrapResponseWriter` has been moved from the `sentryhttp` package to the `internal/httputils` package. If you've imported it previosuly, you'll need to copy the implementation in your project. ([#871](https://github.com/getsentry/sentry-go/pull/871))
### Features
- Add new convenience methods to continue a trace and propagate tracing headers for error-only use cases. ([#862](https://github.com/getsentry/sentry-go/pull/862))
If you are not using one of our integrations, you can manually continue an incoming trace by using `sentry.ContinueTrace()` by providing the `sentry-trace` and `baggage` header received from a downstream SDK.
```go
hub := sentry.CurrentHub()
sentry.ContinueTrace(hub, r.Header.Get(sentry.SentryTraceHeader), r.Header.Get(sentry.SentryBaggageHeader)),
```
You can use `hub.GetTraceparent()` and `hub.GetBaggage()` to fetch the necessary header values for outgoing HTTP requests.
```go
hub := sentry.GetHubFromContext(ctx)
req, _ := http.NewRequest("GET", "http://localhost:3000", nil)
req.Header.Add(sentry.SentryTraceHeader, hub.GetTraceparent())
req.Header.Add(sentry.SentryBaggageHeader, hub.GetBaggage())
```
### Bug Fixes
- Initialize `HTTPTransport.limit` if `nil` ([#844](https://github.com/getsentry/sentry-go/pull/844))
- Fix `sentry.StartTransaction()` returning a transaction with an outdated context on existing transactions ([#854](https://github.com/getsentry/sentry-go/pull/854))
- Treat `Proxy-Authorization` as a sensitive header ([#859](https://github.com/getsentry/sentry-go/pull/859))
- Add support for the `http.Hijacker` interface to the `sentrynegroni` package ([#871](https://github.com/getsentry/sentry-go/pull/871))
- Go version >= 1.23: Use value from `http.Request.Pattern` for HTTP transaction names when using `sentryhttp` & `sentrynegroni` ([#875](https://github.com/getsentry/sentry-go/pull/875))
- Go version >= 1.21: Fix closure functions name grouping ([#877](https://github.com/getsentry/sentry-go/pull/877))
### Misc
- Collect `span` origins ([#849](https://github.com/getsentry/sentry-go/pull/849))
## 0.28.1
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.28.1.

View file

@ -58,9 +58,8 @@ test-coverage: $(COVERAGE_REPORT_DIR) clean-report-dir ## Test with coverage en
mod-tidy: ## Check go.mod tidiness
set -e ; \
for dir in $(ALL_GO_MOD_DIRS); do \
cd "$${dir}"; \
echo ">>> Running 'go mod tidy' for module: $${dir}"; \
go mod tidy -go=1.18 -compat=1.18; \
(cd "$${dir}" && go mod tidy -go=1.21 -compat=1.21); \
done; \
git diff --exit-code;
.PHONY: mod-tidy
@ -68,12 +67,12 @@ mod-tidy: ## Check go.mod tidiness
vet: ## Run "go vet"
set -e ; \
for dir in $(ALL_GO_MOD_DIRS); do \
cd "$${dir}"; \
echo ">>> Running 'go vet' for module: $${dir}"; \
go vet ./...; \
(cd "$${dir}" && go vet ./...); \
done;
.PHONY: vet
lint: ## Lint (using "golangci-lint")
golangci-lint run
.PHONY: lint

View file

@ -10,7 +10,7 @@
# Official Sentry SDK for Go
[![Build Status](https://github.com/getsentry/sentry-go/workflows/go-workflow/badge.svg)](https://github.com/getsentry/sentry-go/actions?query=workflow%3Ago-workflow)
[![Build Status](https://github.com/getsentry/sentry-go/actions/workflows/test.yml/badge.svg)](https://github.com/getsentry/sentry-go/actions/workflows/test.yml)
[![Go Report Card](https://goreportcard.com/badge/github.com/getsentry/sentry-go)](https://goreportcard.com/report/github.com/getsentry/sentry-go)
[![Discord](https://img.shields.io/discord/621778831602221064)](https://discord.gg/Ww9hbqr)
[![go.dev](https://img.shields.io/badge/go.dev-pkg-007d9c.svg?style=flat)](https://pkg.go.dev/github.com/getsentry/sentry-go)
@ -68,17 +68,20 @@ To get started, have a look at one of our [examples](_examples/):
We also provide a [complete API reference](https://pkg.go.dev/github.com/getsentry/sentry-go).
For more detailed information about how to get the most out of `sentry-go`,
checkout the official documentation:
check out the official documentation:
- [Sentry Go SDK documentation](https://docs.sentry.io/platforms/go/)
- Guides:
- [net/http](https://docs.sentry.io/platforms/go/guides/http/)
- [echo](https://docs.sentry.io/platforms/go/guides/echo/)
- [fasthttp](https://docs.sentry.io/platforms/go/guides/fasthttp/)
- [fiber](https://docs.sentry.io/platforms/go/guides/fiber/)
- [gin](https://docs.sentry.io/platforms/go/guides/gin/)
- [iris](https://docs.sentry.io/platforms/go/guides/iris/)
- [martini](https://docs.sentry.io/platforms/go/guides/martini/)
- [logrus](https://docs.sentry.io/platforms/go/guides/logrus/)
- [negroni](https://docs.sentry.io/platforms/go/guides/negroni/)
- [slog](https://docs.sentry.io/platforms/go/guides/slog/)
- [zerolog](https://docs.sentry.io/platforms/go/guides/zerolog/)
## Resources

View file

@ -0,0 +1,36 @@
package attribute
type Builder struct {
Key string
Value Value
}
// String returns a Builder for a string value.
func String(key, value string) Builder {
return Builder{key, StringValue(value)}
}
// Int64 returns a Builder for an int64.
func Int64(key string, value int64) Builder {
return Builder{key, Int64Value(value)}
}
// Int returns a Builder for an int64.
func Int(key string, value int) Builder {
return Builder{key, IntValue(value)}
}
// Float64 returns a Builder for a float64.
func Float64(key string, v float64) Builder {
return Builder{key, Float64Value(v)}
}
// Bool returns a Builder for a boolean.
func Bool(key string, v bool) Builder {
return Builder{key, BoolValue(v)}
}
// Valid checks for valid key and type.
func (b *Builder) Valid() bool {
return len(b.Key) > 0 && b.Value.Type() != INVALID
}

View file

@ -0,0 +1,49 @@
// Copied from https://github.com/open-telemetry/opentelemetry-go/blob/cc43e01c27892252aac9a8f20da28cdde957a289/attribute/rawhelpers.go
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute
import (
"math"
)
func boolToRaw(b bool) uint64 { // b is not a control flag.
if b {
return 1
}
return 0
}
func rawToBool(r uint64) bool {
return r != 0
}
func int64ToRaw(i int64) uint64 {
// Assumes original was a valid int64 (overflow not checked).
return uint64(i) // nolint: gosec
}
func rawToInt64(r uint64) int64 {
// Assumes original was a valid int64 (overflow not checked).
return int64(r) // nolint: gosec
}
func float64ToRaw(f float64) uint64 {
return math.Float64bits(f)
}
func rawToFloat64(r uint64) float64 {
return math.Float64frombits(r)
}

View file

@ -0,0 +1,170 @@
// Adapted from https://github.com/open-telemetry/opentelemetry-go/blob/cc43e01c27892252aac9a8f20da28cdde957a289/attribute/value.go
//
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute
import (
"encoding/json"
"fmt"
"strconv"
)
// Type describes the type of the data Value holds.
type Type int // redefines builtin Type.
// Value represents the value part in key-value pairs.
type Value struct {
vtype Type
numeric uint64
stringly string
}
const (
// INVALID is used for a Value with no value set.
INVALID Type = iota
// BOOL is a boolean Type Value.
BOOL
// INT64 is a 64-bit signed integral Type Value.
INT64
// FLOAT64 is a 64-bit floating point Type Value.
FLOAT64
// STRING is a string Type Value.
STRING
)
// BoolValue creates a BOOL Value.
func BoolValue(v bool) Value {
return Value{
vtype: BOOL,
numeric: boolToRaw(v),
}
}
// IntValue creates an INT64 Value.
func IntValue(v int) Value {
return Int64Value(int64(v))
}
// Int64Value creates an INT64 Value.
func Int64Value(v int64) Value {
return Value{
vtype: INT64,
numeric: int64ToRaw(v),
}
}
// Float64Value creates a FLOAT64 Value.
func Float64Value(v float64) Value {
return Value{
vtype: FLOAT64,
numeric: float64ToRaw(v),
}
}
// StringValue creates a STRING Value.
func StringValue(v string) Value {
return Value{
vtype: STRING,
stringly: v,
}
}
// Type returns a type of the Value.
func (v Value) Type() Type {
return v.vtype
}
// AsBool returns the bool value. Make sure that the Value's type is
// BOOL.
func (v Value) AsBool() bool {
return rawToBool(v.numeric)
}
// AsInt64 returns the int64 value. Make sure that the Value's type is
// INT64.
func (v Value) AsInt64() int64 {
return rawToInt64(v.numeric)
}
// AsFloat64 returns the float64 value. Make sure that the Value's
// type is FLOAT64.
func (v Value) AsFloat64() float64 {
return rawToFloat64(v.numeric)
}
// AsString returns the string value. Make sure that the Value's type
// is STRING.
func (v Value) AsString() string {
return v.stringly
}
type unknownValueType struct{}
// AsInterface returns Value's data as interface{}.
func (v Value) AsInterface() interface{} {
switch v.Type() {
case BOOL:
return v.AsBool()
case INT64:
return v.AsInt64()
case FLOAT64:
return v.AsFloat64()
case STRING:
return v.stringly
}
return unknownValueType{}
}
// String returns a string representation of Value's data.
func (v Value) String() string {
switch v.Type() {
case BOOL:
return strconv.FormatBool(v.AsBool())
case INT64:
return strconv.FormatInt(v.AsInt64(), 10)
case FLOAT64:
return fmt.Sprint(v.AsFloat64())
case STRING:
return v.stringly
default:
return "unknown"
}
}
// MarshalJSON returns the JSON encoding of the Value.
func (v Value) MarshalJSON() ([]byte, error) {
var jsonVal struct {
Type string
Value interface{}
}
jsonVal.Type = v.Type().String()
jsonVal.Value = v.AsInterface()
return json.Marshal(jsonVal)
}
func (t Type) String() string {
switch t {
case BOOL:
return "bool"
case INT64:
return "int64"
case FLOAT64:
return "float64"
case STRING:
return "string"
}
return "invalid"
}

131
vendor/github.com/getsentry/sentry-go/batch_logger.go generated vendored Normal file
View file

@ -0,0 +1,131 @@
package sentry
import (
"context"
"sync"
"time"
)
const (
batchSize = 100
batchTimeout = 5 * time.Second
)
type BatchLogger struct {
client *Client
logCh chan Log
flushCh chan chan struct{}
cancel context.CancelFunc
wg sync.WaitGroup
startOnce sync.Once
shutdownOnce sync.Once
}
func NewBatchLogger(client *Client) *BatchLogger {
return &BatchLogger{
client: client,
logCh: make(chan Log, batchSize),
flushCh: make(chan chan struct{}),
}
}
func (l *BatchLogger) Start() {
l.startOnce.Do(func() {
ctx, cancel := context.WithCancel(context.Background())
l.cancel = cancel
l.wg.Add(1)
go l.run(ctx)
})
}
func (l *BatchLogger) Flush(timeout <-chan struct{}) {
done := make(chan struct{})
select {
case l.flushCh <- done:
select {
case <-done:
case <-timeout:
}
case <-timeout:
}
}
func (l *BatchLogger) Shutdown() {
l.shutdownOnce.Do(func() {
if l.cancel != nil {
l.cancel()
l.wg.Wait()
}
})
}
func (l *BatchLogger) run(ctx context.Context) {
defer l.wg.Done()
var logs []Log
timer := time.NewTimer(batchTimeout)
defer timer.Stop()
for {
select {
case log := <-l.logCh:
logs = append(logs, log)
if len(logs) >= batchSize {
l.processEvent(logs)
logs = nil
if !timer.Stop() {
<-timer.C
}
timer.Reset(batchTimeout)
}
case <-timer.C:
if len(logs) > 0 {
l.processEvent(logs)
logs = nil
}
timer.Reset(batchTimeout)
case done := <-l.flushCh:
flushDrain:
for {
select {
case log := <-l.logCh:
logs = append(logs, log)
default:
break flushDrain
}
}
if len(logs) > 0 {
l.processEvent(logs)
logs = nil
}
if !timer.Stop() {
<-timer.C
}
timer.Reset(batchTimeout)
close(done)
case <-ctx.Done():
drain:
for {
select {
case log := <-l.logCh:
logs = append(logs, log)
default:
break drain
}
}
if len(logs) > 0 {
l.processEvent(logs)
}
return
}
}
}
func (l *BatchLogger) processEvent(logs []Log) {
event := NewEvent()
event.Timestamp = time.Now()
event.Type = logEvent.Type
event.Logs = logs
l.client.CaptureEvent(event, nil, nil)
}

View file

@ -77,9 +77,9 @@ type usageError struct {
error
}
// Logger is an instance of log.Logger that is use to provide debug information about running Sentry Client
// can be enabled by either using Logger.SetOutput directly or with Debug client option.
var Logger = log.New(io.Discard, "[Sentry] ", log.LstdFlags)
// DebugLogger is an instance of log.Logger that is used to provide debug information about running Sentry Client
// can be enabled by either using DebugLogger.SetOutput directly or with Debug client option.
var DebugLogger = log.New(io.Discard, "[Sentry] ", log.LstdFlags)
// EventProcessor is a function that processes an event.
// Event processors are used to change an event before it is sent to Sentry.
@ -90,7 +90,7 @@ type EventProcessor func(event *Event, hint *EventHint) *Event
// ApplyToEvent changes an event based on external data and/or
// an event hint.
type EventModifier interface {
ApplyToEvent(event *Event, hint *EventHint) *Event
ApplyToEvent(event *Event, hint *EventHint, client *Client) *Event
}
var globalEventProcessors []EventProcessor
@ -133,9 +133,6 @@ type ClientOptions struct {
TracesSampleRate float64
// Used to customize the sampling of traces, overrides TracesSampleRate.
TracesSampler TracesSampler
// The sample rate for profiling traces in the range [0.0, 1.0].
// This is relative to TracesSampleRate - it is a ratio of profiled traces out of all sampled traces.
ProfilesSampleRate float64
// List of regexp strings that will be used to match against event's message
// and if applicable, caught errors type and value.
// If the match is found, then a whole event will be dropped.
@ -147,8 +144,11 @@ type ClientOptions struct {
// By default, no such data is sent.
SendDefaultPII bool
// BeforeSend is called before error events are sent to Sentry.
// Use it to mutate the event or return nil to discard the event.
// You can use it to mutate the event or return nil to discard it.
BeforeSend func(event *Event, hint *EventHint) *Event
// BeforeSendLong is called before log events are sent to Sentry.
// You can use it to mutate the log event or return nil to discard it.
BeforeSendLog func(event *Log) *Log
// BeforeSendTransaction is called before transaction events are sent to Sentry.
// Use it to mutate the transaction or return nil to discard the transaction.
BeforeSendTransaction func(event *Event, hint *EventHint) *Event
@ -226,6 +226,8 @@ type ClientOptions struct {
MaxErrorDepth int
// Default event tags. These are overridden by tags set on a scope.
Tags map[string]string
// EnableLogs controls when logs should be emitted.
EnableLogs bool
}
// Client is the underlying processor that is used by the main API and Hub
@ -240,7 +242,8 @@ type Client struct {
sdkVersion string
// Transport is read-only. Replacing the transport of an existing client is
// not supported, create a new client instead.
Transport Transport
Transport Transport
batchLogger *BatchLogger
}
// NewClient creates and returns an instance of Client configured using
@ -278,7 +281,7 @@ func NewClient(options ClientOptions) (*Client, error) {
if debugWriter == nil {
debugWriter = os.Stderr
}
Logger.SetOutput(debugWriter)
DebugLogger.SetOutput(debugWriter)
}
if options.Dsn == "" {
@ -340,6 +343,11 @@ func NewClient(options ClientOptions) (*Client, error) {
sdkVersion: SDKVersion,
}
if options.EnableLogs {
client.batchLogger = NewBatchLogger(&client)
client.batchLogger.Start()
}
client.setupTransport()
client.setupIntegrations()
@ -354,15 +362,7 @@ func (client *Client) setupTransport() {
if opts.Dsn == "" {
transport = new(noopTransport)
} else {
httpTransport := NewHTTPTransport()
// When tracing is enabled, use larger buffer to
// accommodate more concurrent events.
// TODO(tracing): consider using separate buffers per
// event type.
if opts.EnableTracing {
httpTransport.BufferSize = 1000
}
transport = httpTransport
transport = NewHTTPTransport()
}
}
@ -386,12 +386,12 @@ func (client *Client) setupIntegrations() {
for _, integration := range integrations {
if client.integrationAlreadyInstalled(integration.Name()) {
Logger.Printf("Integration %s is already installed\n", integration.Name())
DebugLogger.Printf("Integration %s is already installed\n", integration.Name())
continue
}
client.integrations = append(client.integrations, integration)
integration.SetupOnce(client)
Logger.Printf("Integration installed: %s\n", integration.Name())
DebugLogger.Printf("Integration installed: %s\n", integration.Name())
}
sort.Slice(client.integrations, func(i, j int) bool {
@ -510,9 +510,49 @@ func (client *Client) RecoverWithContext(
// the network synchronously, configure it to use the HTTPSyncTransport in the
// call to Init.
func (client *Client) Flush(timeout time.Duration) bool {
if client.batchLogger != nil {
start := time.Now()
timeoutCh := make(chan struct{})
time.AfterFunc(timeout, func() {
close(timeoutCh)
})
client.batchLogger.Flush(timeoutCh)
// update the timeout with the time passed
timeout -= time.Since(start)
if timeout <= 0 {
return false
}
}
return client.Transport.Flush(timeout)
}
// FlushWithContext waits until the underlying Transport sends any buffered events
// to the Sentry server, blocking for at most the duration specified by the context.
// It returns false if the context is canceled before the events are sent. In such a case,
// some events may not be delivered.
//
// FlushWithContext should be called before terminating the program to ensure no
// events are unintentionally dropped.
//
// Avoid calling FlushWithContext indiscriminately after each call to CaptureEvent,
// CaptureException, or CaptureMessage. To send events synchronously over the network,
// configure the SDK to use HTTPSyncTransport during initialization with Init.
func (client *Client) FlushWithContext(ctx context.Context) bool {
if client.batchLogger != nil {
client.batchLogger.Flush(ctx.Done())
}
return client.Transport.FlushWithContext(ctx)
}
// Close clean up underlying Transport resources.
//
// Close should be called after Flush and before terminating the program
// otherwise some events may be lost.
func (client *Client) Close() {
client.Transport.Close()
}
// EventFromMessage creates an event from the given message string.
func (client *Client) EventFromMessage(message string, level Level) *Event {
if message == "" {
@ -590,14 +630,6 @@ func (client *Client) GetSDKIdentifier() string {
return client.sdkIdentifier
}
// reverse reverses the slice a in place.
func reverse(a []Exception) {
for i := len(a)/2 - 1; i >= 0; i-- {
opp := len(a) - 1 - i
a[i], a[opp] = a[opp], a[i]
}
}
func (client *Client) processEvent(event *Event, hint *EventHint, scope EventModifier) *EventID {
if event == nil {
err := usageError{fmt.Errorf("%s called with nil event", callerFunctionName())}
@ -608,7 +640,7 @@ func (client *Client) processEvent(event *Event, hint *EventHint, scope EventMod
// options.TracesSampler when they are started. Other events
// (errors, messages) are sampled here. Does not apply to check-ins.
if event.Type != transactionType && event.Type != checkInType && !sample(client.options.SampleRate) {
Logger.Println("Event dropped due to SampleRate hit.")
DebugLogger.Println("Event dropped due to SampleRate hit.")
return nil
}
@ -620,17 +652,21 @@ func (client *Client) processEvent(event *Event, hint *EventHint, scope EventMod
if hint == nil {
hint = &EventHint{}
}
if event.Type == transactionType && client.options.BeforeSendTransaction != nil {
// Transaction events
if event = client.options.BeforeSendTransaction(event, hint); event == nil {
Logger.Println("Transaction dropped due to BeforeSendTransaction callback.")
return nil
switch event.Type {
case transactionType:
if client.options.BeforeSendTransaction != nil {
if event = client.options.BeforeSendTransaction(event, hint); event == nil {
DebugLogger.Println("Transaction dropped due to BeforeSendTransaction callback.")
return nil
}
}
} else if event.Type != transactionType && event.Type != checkInType && client.options.BeforeSend != nil {
// All other events
if event = client.options.BeforeSend(event, hint); event == nil {
Logger.Println("Event dropped due to BeforeSend callback.")
return nil
case checkInType: // not a default case, since we shouldn't apply BeforeSend on check-in events
default:
if client.options.BeforeSend != nil {
if event = client.options.BeforeSend(event, hint); event == nil {
DebugLogger.Println("Event dropped due to BeforeSend callback.")
return nil
}
}
}
@ -685,7 +721,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
}
if scope != nil {
event = scope.ApplyToEvent(event, hint)
event = scope.ApplyToEvent(event, hint, client)
if event == nil {
return nil
}
@ -695,7 +731,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Client EventProcessors: %s\n", id)
DebugLogger.Printf("Event dropped by one of the Client EventProcessors: %s\n", id)
return nil
}
}
@ -704,15 +740,11 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Global EventProcessors: %s\n", id)
DebugLogger.Printf("Event dropped by one of the Global EventProcessors: %s\n", id)
return nil
}
}
if event.sdkMetaData.transactionProfile != nil {
event.sdkMetaData.transactionProfile.UpdateFromEvent(event)
}
return event
}

View file

@ -89,8 +89,8 @@ func NewDsn(rawURL string) (*Dsn, error) {
// Port
var port int
if parsedURL.Port() != "" {
port, err = strconv.Atoi(parsedURL.Port())
if p := parsedURL.Port(); p != "" {
port, err = strconv.Atoi(p)
if err != nil {
return nil, &DsnParseError{"invalid port"}
}

View file

@ -39,8 +39,6 @@ func DynamicSamplingContextFromHeader(header []byte) (DynamicSamplingContext, er
}
func DynamicSamplingContextFromTransaction(span *Span) DynamicSamplingContext {
entries := map[string]string{}
hub := hubFromContext(span.Context())
scope := hub.Scope()
client := hub.Client()
@ -52,6 +50,8 @@ func DynamicSamplingContextFromTransaction(span *Span) DynamicSamplingContext {
}
}
entries := make(map[string]string)
if traceID := span.TraceID.String(); traceID != "" {
entries["trace_id"] = traceID
}
@ -78,20 +78,9 @@ func DynamicSamplingContextFromTransaction(span *Span) DynamicSamplingContext {
}
}
if userSegment := scope.user.Segment; userSegment != "" {
entries["user_segment"] = userSegment
}
entries["sampled"] = strconv.FormatBool(span.Sampled.Bool())
if span.Sampled.Bool() {
entries["sampled"] = "true"
} else {
entries["sampled"] = "false"
}
return DynamicSamplingContext{
Entries: entries,
Frozen: true,
}
return DynamicSamplingContext{Entries: entries, Frozen: true}
}
func (d DynamicSamplingContext) HasEntries() bool {
@ -111,13 +100,55 @@ func (d DynamicSamplingContext) String() string {
}
members = append(members, member)
}
if len(members) > 0 {
baggage, err := baggage.New(members...)
if err != nil {
return ""
}
return baggage.String()
if len(members) == 0 {
return ""
}
return ""
baggage, err := baggage.New(members...)
if err != nil {
return ""
}
return baggage.String()
}
// Constructs a new DynamicSamplingContext using a scope and client. Accessing
// fields on the scope are not thread safe, and this function should only be
// called within scope methods.
func DynamicSamplingContextFromScope(scope *Scope, client *Client) DynamicSamplingContext {
entries := map[string]string{}
if client == nil || scope == nil {
return DynamicSamplingContext{
Entries: entries,
Frozen: false,
}
}
propagationContext := scope.propagationContext
if traceID := propagationContext.TraceID.String(); traceID != "" {
entries["trace_id"] = traceID
}
if sampleRate := client.options.TracesSampleRate; sampleRate != 0 {
entries["sample_rate"] = strconv.FormatFloat(sampleRate, 'f', -1, 64)
}
if dsn := client.dsn; dsn != nil {
if publicKey := dsn.publicKey; publicKey != "" {
entries["public_key"] = publicKey
}
}
if release := client.options.Release; release != "" {
entries["release"] = release
}
if environment := client.options.Environment; environment != "" {
entries["environment"] = environment
}
return DynamicSamplingContext{
Entries: entries,
Frozen: true,
}
}

21
vendor/github.com/getsentry/sentry-go/echo/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 Functional Software, Inc. dba Sentry
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -10,11 +10,19 @@ import (
"github.com/labstack/echo/v4"
)
// The identifier of the Echo SDK.
const sdkIdentifier = "sentry.go.echo"
const (
// sdkIdentifier is the identifier of the Echo SDK.
sdkIdentifier = "sentry.go.echo"
const valuesKey = "sentry"
const transactionKey = "sentry_transaction"
// valuesKey is used as a key to store the Sentry Hub instance on the echo.Context.
valuesKey = "sentry"
// transactionKey is used as a key to store the Sentry transaction on the echo.Context.
transactionKey = "sentry_transaction"
// errorKey is used as a key to store the error on the echo.Context.
errorKey = "error"
)
type handler struct {
repanic bool
@ -24,7 +32,7 @@ type handler struct {
type Options struct {
// Repanic configures whether Sentry should repanic after recovery, in most cases it should be set to true,
// as echo includes its own Recover middleware what handles http responses.
// as Echo includes its own Recover middleware that handles HTTP responses.
Repanic bool
// WaitForDelivery configures whether you want to block the request before moving forward with the response.
// Because Echo's Recover handler doesn't restart the application,
@ -37,20 +45,20 @@ type Options struct {
// New returns a function that satisfies echo.HandlerFunc interface
// It can be used with Use() methods.
func New(options Options) echo.MiddlewareFunc {
timeout := options.Timeout
if timeout == 0 {
timeout = 2 * time.Second
if options.Timeout == 0 {
options.Timeout = 2 * time.Second
}
return (&handler{
repanic: options.Repanic,
timeout: timeout,
timeout: options.Timeout,
waitForDelivery: options.WaitForDelivery,
}).handle
}
func (h *handler) handle(next echo.HandlerFunc) echo.HandlerFunc {
return func(ctx echo.Context) error {
hub := sentry.GetHubFromContext(ctx.Request().Context())
hub := GetHubFromContext(ctx)
if hub == nil {
hub = sentry.CurrentHub().Clone()
}
@ -70,9 +78,10 @@ func (h *handler) handle(next echo.HandlerFunc) echo.HandlerFunc {
}
options := []sentry.SpanOption{
sentry.ContinueTrace(hub, r.Header.Get(sentry.SentryTraceHeader), r.Header.Get(sentry.SentryBaggageHeader)),
sentry.WithOpName("http.server"),
sentry.ContinueFromRequest(r),
sentry.WithTransactionSource(transactionSource),
sentry.WithSpanOrigin(sentry.SpanOriginEcho),
}
transaction := sentry.StartTransaction(
@ -81,11 +90,11 @@ func (h *handler) handle(next echo.HandlerFunc) echo.HandlerFunc {
options...,
)
transaction.SetData("http.request.method", ctx.Request().Method)
transaction.SetData("http.request.method", r.Method)
defer func() {
status := ctx.Response().Status
if err := ctx.Get("error"); err != nil {
if err := ctx.Get(errorKey); err != nil {
if httpError, ok := err.(*echo.HTTPError); ok {
status = httpError.Code
}
@ -104,7 +113,7 @@ func (h *handler) handle(next echo.HandlerFunc) echo.HandlerFunc {
err := next(ctx)
if err != nil {
// Store the error so it can be used in the deferred function
ctx.Set("error", err)
ctx.Set(errorKey, err)
}
return err
@ -134,6 +143,11 @@ func GetHubFromContext(ctx echo.Context) *sentry.Hub {
return nil
}
// SetHubOnContext attaches *sentry.Hub instance to echo.Context.
func SetHubOnContext(ctx echo.Context, hub *sentry.Hub) {
ctx.Set(valuesKey, hub)
}
// GetSpanFromContext retrieves attached *sentry.Span instance from echo.Context.
// If there is no transaction on echo.Context, it will return nil.
func GetSpanFromContext(ctx echo.Context) *sentry.Span {

View file

@ -2,6 +2,7 @@ package sentry
import (
"context"
"fmt"
"sync"
"time"
)
@ -292,9 +293,14 @@ func (hub *Hub) AddBreadcrumb(breadcrumb *Breadcrumb, hint *BreadcrumbHint) {
return
}
max := client.options.MaxBreadcrumbs
if max < 0 {
limit := client.options.MaxBreadcrumbs
switch {
case limit < 0:
return
case limit == 0:
limit = defaultMaxBreadcrumbs
case limit > maxBreadcrumbs:
limit = maxBreadcrumbs
}
if client.options.BeforeBreadcrumb != nil {
@ -302,18 +308,12 @@ func (hub *Hub) AddBreadcrumb(breadcrumb *Breadcrumb, hint *BreadcrumbHint) {
hint = &BreadcrumbHint{}
}
if breadcrumb = client.options.BeforeBreadcrumb(breadcrumb, hint); breadcrumb == nil {
Logger.Println("breadcrumb dropped due to BeforeBreadcrumb callback.")
DebugLogger.Println("breadcrumb dropped due to BeforeBreadcrumb callback.")
return
}
}
if max == 0 {
max = defaultMaxBreadcrumbs
} else if max > maxBreadcrumbs {
max = maxBreadcrumbs
}
hub.Scope().AddBreadcrumb(breadcrumb, max)
hub.Scope().AddBreadcrumb(breadcrumb, limit)
}
// Recover calls the method of a same name on currently bound Client instance
@ -365,6 +365,56 @@ func (hub *Hub) Flush(timeout time.Duration) bool {
return client.Flush(timeout)
}
// FlushWithContext waits until the underlying Transport sends any buffered events
// to the Sentry server, blocking for at most the duration specified by the context.
// It returns false if the context is canceled before the events are sent. In such a case,
// some events may not be delivered.
//
// FlushWithContext should be called before terminating the program to ensure no
// events are unintentionally dropped.
//
// Avoid calling FlushWithContext indiscriminately after each call to CaptureEvent,
// CaptureException, or CaptureMessage. To send events synchronously over the network,
// configure the SDK to use HTTPSyncTransport during initialization with Init.
func (hub *Hub) FlushWithContext(ctx context.Context) bool {
client := hub.Client()
if client == nil {
return false
}
return client.FlushWithContext(ctx)
}
// GetTraceparent returns the current Sentry traceparent string, to be used as a HTTP header value
// or HTML meta tag value.
// This function is context aware, as in it either returns the traceparent based
// on the current span, or the scope's propagation context.
func (hub *Hub) GetTraceparent() string {
scope := hub.Scope()
if scope.span != nil {
return scope.span.ToSentryTrace()
}
return fmt.Sprintf("%s-%s", scope.propagationContext.TraceID, scope.propagationContext.SpanID)
}
// GetBaggage returns the current Sentry baggage string, to be used as a HTTP header value
// or HTML meta tag value.
// This function is context aware, as in it either returns the baggage based
// on the current span or the scope's propagation context.
func (hub *Hub) GetBaggage() string {
scope := hub.Scope()
if scope.span != nil {
return scope.span.ToBaggage()
}
return scope.propagationContext.DynamicSamplingContext.String()
}
// HasHubOnContext checks whether Hub instance is bound to a given Context struct.
func HasHubOnContext(ctx context.Context) bool {
_, ok := ctx.Value(HubContextKey).(*Hub)

View file

@ -32,7 +32,7 @@ func (mi *modulesIntegration) processor(event *Event, _ *EventHint) *Event {
mi.once.Do(func() {
info, ok := debug.ReadBuildInfo()
if !ok {
Logger.Print("The Modules integration is not available in binaries built without module support.")
DebugLogger.Print("The Modules integration is not available in binaries built without module support.")
return
}
mi.modules = extractModules(info)
@ -141,7 +141,7 @@ func (iei *ignoreErrorsIntegration) processor(event *Event, _ *EventHint) *Event
for _, suspect := range suspects {
for _, pattern := range iei.ignoreErrors {
if pattern.Match([]byte(suspect)) || strings.Contains(suspect, pattern.String()) {
Logger.Printf("Event dropped due to being matched by `IgnoreErrors` option."+
DebugLogger.Printf("Event dropped due to being matched by `IgnoreErrors` option."+
"| Value matched: %s | Filter used: %s", suspect, pattern)
return nil
}
@ -203,7 +203,7 @@ func (iei *ignoreTransactionsIntegration) processor(event *Event, _ *EventHint)
for _, pattern := range iei.ignoreTransactions {
if pattern.Match([]byte(suspect)) || strings.Contains(suspect, pattern.String()) {
Logger.Printf("Transaction dropped due to being matched by `IgnoreTransactions` option."+
DebugLogger.Printf("Transaction dropped due to being matched by `IgnoreTransactions` option."+
"| Value matched: %s | Filter used: %s", suspect, pattern)
return nil
}

View file

@ -8,25 +8,24 @@ import (
"net"
"net/http"
"reflect"
"slices"
"strings"
"time"
"github.com/getsentry/sentry-go/attribute"
)
// eventType is the type of an error event.
const eventType = "event"
// transactionType is the type of a transaction event.
const transactionType = "transaction"
// profileType is the type of a profile event.
// currently, profiles are always sent as part of a transaction event.
const profileType = "profile"
// checkInType is the type of a check in event.
const checkInType = "check_in"
// metricType is the type of a metric event.
const metricType = "statsd"
var logEvent = struct {
Type string
ContentType string
}{
"log",
"application/vnd.sentry.items.log+json",
}
// Level marks the severity of the event.
type Level string
@ -40,7 +39,7 @@ const (
LevelFatal Level = "fatal"
)
// SdkInfo contains all metadata about about the SDK being used.
// SdkInfo contains all metadata about the SDK.
type SdkInfo struct {
Name string `json:"name,omitempty"`
Version string `json:"version,omitempty"`
@ -102,6 +101,60 @@ func (b *Breadcrumb) MarshalJSON() ([]byte, error) {
return json.Marshal((*breadcrumb)(b))
}
type Logger interface {
// Write implements the io.Writer interface. Currently, the [sentry.Hub] is
// context aware, in order to get the correct trace correlation. Using this
// might result in incorrect span association on logs. If you need to use
// Write it is recommended to create a NewLogger so that the associated context
// is passed correctly.
Write(p []byte) (n int, err error)
// Trace emits a [LogLevelTrace] log to Sentry.
// Arguments are handled in the manner of [fmt.Print].
Trace(ctx context.Context, v ...interface{})
// Debug emits a [LogLevelDebug] log to Sentry.
// Arguments are handled in the manner of [fmt.Print].
Debug(ctx context.Context, v ...interface{})
// Info emits a [LogLevelInfo] log to Sentry.
// Arguments are handled in the manner of [fmt.Print].
Info(ctx context.Context, v ...interface{})
// Warn emits a [LogLevelWarn] log to Sentry.
// Arguments are handled in the manner of [fmt.Print].
Warn(ctx context.Context, v ...interface{})
// Error emits a [LogLevelError] log to Sentry.
// Arguments are handled in the manner of [fmt.Print].
Error(ctx context.Context, v ...interface{})
// Fatal emits a [LogLevelFatal] log to Sentry followed by a call to [os.Exit](1).
// Arguments are handled in the manner of [fmt.Print].
Fatal(ctx context.Context, v ...interface{})
// Panic emits a [LogLevelFatal] log to Sentry followed by a call to panic().
// Arguments are handled in the manner of [fmt.Print].
Panic(ctx context.Context, v ...interface{})
// Tracef emits a [LogLevelTrace] log to Sentry.
// Arguments are handled in the manner of [fmt.Printf].
Tracef(ctx context.Context, format string, v ...interface{})
// Debugf emits a [LogLevelDebug] log to Sentry.
// Arguments are handled in the manner of [fmt.Printf].
Debugf(ctx context.Context, format string, v ...interface{})
// Infof emits a [LogLevelInfo] log to Sentry.
// Arguments are handled in the manner of [fmt.Printf].
Infof(ctx context.Context, format string, v ...interface{})
// Warnf emits a [LogLevelWarn] log to Sentry.
// Arguments are handled in the manner of [fmt.Printf].
Warnf(ctx context.Context, format string, v ...interface{})
// Errorf emits a [LogLevelError] log to Sentry.
// Arguments are handled in the manner of [fmt.Printf].
Errorf(ctx context.Context, format string, v ...interface{})
// Fatalf emits a [LogLevelFatal] log to Sentry followed by a call to [os.Exit](1).
// Arguments are handled in the manner of [fmt.Printf].
Fatalf(ctx context.Context, format string, v ...interface{})
// Panicf emits a [LogLevelFatal] log to Sentry followed by a call to panic().
// Arguments are handled in the manner of [fmt.Printf].
Panicf(ctx context.Context, format string, v ...interface{})
// SetAttributes allows attaching parameters to the log message using the attribute API.
SetAttributes(...attribute.Builder)
}
// Attachment allows associating files with your events to aid in investigation.
// An event may contain one or more attachments.
type Attachment struct {
@ -118,32 +171,27 @@ type User struct {
IPAddress string `json:"ip_address,omitempty"`
Username string `json:"username,omitempty"`
Name string `json:"name,omitempty"`
Segment string `json:"segment,omitempty"`
Data map[string]string `json:"data,omitempty"`
}
func (u User) IsEmpty() bool {
if len(u.ID) > 0 {
if u.ID != "" {
return false
}
if len(u.Email) > 0 {
if u.Email != "" {
return false
}
if len(u.IPAddress) > 0 {
if u.IPAddress != "" {
return false
}
if len(u.Username) > 0 {
if u.Username != "" {
return false
}
if len(u.Name) > 0 {
return false
}
if len(u.Segment) > 0 {
if u.Name != "" {
return false
}
@ -166,10 +214,36 @@ type Request struct {
}
var sensitiveHeaders = map[string]struct{}{
"Authorization": {},
"Cookie": {},
"X-Forwarded-For": {},
"X-Real-Ip": {},
"_csrf": {},
"_csrf_token": {},
"_session": {},
"_xsrf": {},
"Api-Key": {},
"Apikey": {},
"Auth": {},
"Authorization": {},
"Cookie": {},
"Credentials": {},
"Csrf": {},
"Csrf-Token": {},
"Csrftoken": {},
"Ip-Address": {},
"Passwd": {},
"Password": {},
"Private-Key": {},
"Privatekey": {},
"Proxy-Authorization": {},
"Remote-Addr": {},
"Secret": {},
"Session": {},
"Sessionid": {},
"Token": {},
"User-Session": {},
"X-Api-Key": {},
"X-Csrftoken": {},
"X-Forwarded-For": {},
"X-Real-Ip": {},
"XSRF-TOKEN": {},
}
// NewRequest returns a new Sentry Request from the given http.Request.
@ -194,6 +268,7 @@ func NewRequest(r *http.Request) *Request {
// attach more than one Cookie header field.
cookies = r.Header.Get("Cookie")
headers = make(map[string]string, len(r.Header))
for k, v := range r.Header {
headers[k] = strings.Join(v, ",")
}
@ -237,8 +312,7 @@ type Mechanism struct {
// SetUnhandled indicates that the exception is an unhandled exception, i.e.
// from a panic.
func (m *Mechanism) SetUnhandled() {
h := false
m.Handled = &h
m.Handled = Pointer(false)
}
// Exception specifies an error that occurred.
@ -254,8 +328,7 @@ type Exception struct {
// SDKMetaData is a struct to stash data which is needed at some point in the SDK's event processing pipeline
// but which shouldn't get send to Sentry.
type SDKMetaData struct {
dsc DynamicSamplingContext
transactionProfile *profileInfo
dsc DynamicSamplingContext
}
// Contains information about how the name of the transaction was determined.
@ -323,7 +396,6 @@ type Event struct {
Exception []Exception `json:"exception,omitempty"`
DebugMeta *DebugMeta `json:"debug_meta,omitempty"`
Attachments []*Attachment `json:"-"`
Metrics []Metric `json:"-"`
// The fields below are only relevant for transactions.
@ -337,6 +409,9 @@ type Event struct {
CheckIn *CheckIn `json:"check_in,omitempty"`
MonitorConfig *MonitorConfig `json:"monitor_config,omitempty"`
// The fields below are only relevant for logs
Logs []Log `json:"items,omitempty"`
// The fields below are not part of the final JSON payload.
sdkMetaData SDKMetaData
@ -397,12 +472,13 @@ func (e *Event) SetException(exception error, maxErrorDepth int) {
}
// event.Exception should be sorted such that the most recent error is last.
reverse(e.Exception)
slices.Reverse(e.Exception)
for i := range e.Exception {
e.Exception[i].Mechanism = &Mechanism{
IsExceptionGroup: true,
ExceptionID: i,
Type: "generic",
}
if i == 0 {
continue
@ -428,7 +504,9 @@ func (e *Event) MarshalJSON() ([]byte, error) {
// and a few type tricks.
if e.Type == transactionType {
return e.transactionMarshalJSON()
} else if e.Type == checkInType {
}
if e.Type == checkInType {
return e.checkInMarshalJSON()
}
return e.defaultMarshalJSON()
@ -556,3 +634,17 @@ type EventHint struct {
Request *http.Request
Response *http.Response
}
type Log struct {
Timestamp time.Time `json:"timestamp,omitempty"`
TraceID TraceID `json:"trace_id,omitempty"`
Level LogLevel `json:"level"`
Severity int `json:"severity_number,omitempty"`
Body string `json:"body,omitempty"`
Attributes map[string]Attribute `json:"attributes,omitempty"`
}
type Attribute struct {
Value any `json:"value"`
Type string `json:"type"`
}

View file

@ -1,15 +0,0 @@
## Benchmark results
```
goos: windows
goarch: amd64
pkg: github.com/getsentry/sentry-go/internal/trace
cpu: 12th Gen Intel(R) Core(TM) i7-12700K
BenchmarkEqualBytes-20 44323621 26.08 ns/op
BenchmarkStringEqual-20 60980257 18.27 ns/op
BenchmarkEqualPrefix-20 41369181 31.12 ns/op
BenchmarkFullParse-20 702012 1507 ns/op 1353.42 MB/s 1024 B/op 6 allocs/op
BenchmarkFramesIterator-20 1229971 969.3 ns/op 896 B/op 5 allocs/op
BenchmarkFramesReversedIterator-20 1271061 944.5 ns/op 896 B/op 5 allocs/op
BenchmarkSplitOnly-20 2250800 534.0 ns/op 3818.23 MB/s 128 B/op 1 allocs/op
```

View file

@ -1,217 +0,0 @@
package traceparser
import (
"bytes"
"strconv"
)
var blockSeparator = []byte("\n\n")
var lineSeparator = []byte("\n")
// Parses multi-stacktrace text dump produced by runtime.Stack([]byte, all=true).
// The parser prioritizes performance but requires the input to be well-formed in order to return correct data.
// See https://github.com/golang/go/blob/go1.20.4/src/runtime/mprof.go#L1191
func Parse(data []byte) TraceCollection {
var it = TraceCollection{}
if len(data) > 0 {
it.blocks = bytes.Split(data, blockSeparator)
}
return it
}
type TraceCollection struct {
blocks [][]byte
}
func (it TraceCollection) Length() int {
return len(it.blocks)
}
// Returns the stacktrace item at the given index.
func (it *TraceCollection) Item(i int) Trace {
// The first item may have a leading data separator and the last one may have a trailing one.
// Note: Trim() doesn't make a copy for single-character cutset under 0x80. It will just slice the original.
var data []byte
switch {
case i == 0:
data = bytes.TrimLeft(it.blocks[i], "\n")
case i == len(it.blocks)-1:
data = bytes.TrimRight(it.blocks[i], "\n")
default:
data = it.blocks[i]
}
var splitAt = bytes.IndexByte(data, '\n')
if splitAt < 0 {
return Trace{header: data}
}
return Trace{
header: data[:splitAt],
data: data[splitAt+1:],
}
}
// Trace represents a single stacktrace block, identified by a Goroutine ID and a sequence of Frames.
type Trace struct {
header []byte
data []byte
}
var goroutinePrefix = []byte("goroutine ")
// GoID parses the Goroutine ID from the header.
func (t *Trace) GoID() (id uint64) {
if bytes.HasPrefix(t.header, goroutinePrefix) {
var line = t.header[len(goroutinePrefix):]
var splitAt = bytes.IndexByte(line, ' ')
if splitAt >= 0 {
id, _ = strconv.ParseUint(string(line[:splitAt]), 10, 64)
}
}
return id
}
// UniqueIdentifier can be used as a map key to identify the trace.
func (t *Trace) UniqueIdentifier() []byte {
return t.data
}
func (t *Trace) Frames() FrameIterator {
var lines = bytes.Split(t.data, lineSeparator)
return FrameIterator{lines: lines, i: 0, len: len(lines)}
}
func (t *Trace) FramesReversed() ReverseFrameIterator {
var lines = bytes.Split(t.data, lineSeparator)
return ReverseFrameIterator{lines: lines, i: len(lines)}
}
const framesElided = "...additional frames elided..."
// FrameIterator iterates over stack frames.
type FrameIterator struct {
lines [][]byte
i int
len int
}
// Next returns the next frame, or nil if there are none.
func (it *FrameIterator) Next() Frame {
return Frame{it.popLine(), it.popLine()}
}
func (it *FrameIterator) popLine() []byte {
switch {
case it.i >= it.len:
return nil
case string(it.lines[it.i]) == framesElided:
it.i++
return it.popLine()
default:
it.i++
return it.lines[it.i-1]
}
}
// HasNext return true if there are values to be read.
func (it *FrameIterator) HasNext() bool {
return it.i < it.len
}
// LengthUpperBound returns the maximum number of elements this stacks may contain.
// The actual number may be lower because of elided frames. As such, the returned value
// cannot be used to iterate over the frames but may be used to reserve capacity.
func (it *FrameIterator) LengthUpperBound() int {
return it.len / 2
}
// ReverseFrameIterator iterates over stack frames in reverse order.
type ReverseFrameIterator struct {
lines [][]byte
i int
}
// Next returns the next frame, or nil if there are none.
func (it *ReverseFrameIterator) Next() Frame {
var line2 = it.popLine()
return Frame{it.popLine(), line2}
}
func (it *ReverseFrameIterator) popLine() []byte {
it.i--
switch {
case it.i < 0:
return nil
case string(it.lines[it.i]) == framesElided:
return it.popLine()
default:
return it.lines[it.i]
}
}
// HasNext return true if there are values to be read.
func (it *ReverseFrameIterator) HasNext() bool {
return it.i > 1
}
// LengthUpperBound returns the maximum number of elements this stacks may contain.
// The actual number may be lower because of elided frames. As such, the returned value
// cannot be used to iterate over the frames but may be used to reserve capacity.
func (it *ReverseFrameIterator) LengthUpperBound() int {
return len(it.lines) / 2
}
type Frame struct {
line1 []byte
line2 []byte
}
// UniqueIdentifier can be used as a map key to identify the frame.
func (f *Frame) UniqueIdentifier() []byte {
// line2 contains file path, line number and program-counter offset from the beginning of a function
// e.g. C:/Users/name/scoop/apps/go/current/src/testing/testing.go:1906 +0x63a
return f.line2
}
var createdByPrefix = []byte("created by ")
func (f *Frame) Func() []byte {
if bytes.HasPrefix(f.line1, createdByPrefix) {
// Since go1.21, the line ends with " in goroutine X", saying which goroutine created this one.
// We currently don't have use for that so just remove it.
var line = f.line1[len(createdByPrefix):]
var spaceAt = bytes.IndexByte(line, ' ')
if spaceAt < 0 {
return line
}
return line[:spaceAt]
}
var end = bytes.LastIndexByte(f.line1, '(')
if end >= 0 {
return f.line1[:end]
}
return f.line1
}
func (f *Frame) File() (path []byte, lineNumber int) {
var line = f.line2
if len(line) > 0 && line[0] == '\t' {
line = line[1:]
}
var splitAt = bytes.IndexByte(line, ' ')
if splitAt >= 0 {
line = line[:splitAt]
}
splitAt = bytes.LastIndexByte(line, ':')
if splitAt < 0 {
return line, 0
}
lineNumber, _ = strconv.Atoi(string(line[splitAt+1:]))
return line[:splitAt], lineNumber
}

230
vendor/github.com/getsentry/sentry-go/log.go generated vendored Normal file
View file

@ -0,0 +1,230 @@
package sentry
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/getsentry/sentry-go/attribute"
)
type LogLevel string
const (
LogLevelTrace LogLevel = "trace"
LogLevelDebug LogLevel = "debug"
LogLevelInfo LogLevel = "info"
LogLevelWarn LogLevel = "warn"
LogLevelError LogLevel = "error"
LogLevelFatal LogLevel = "fatal"
)
const (
LogSeverityTrace int = 1
LogSeverityDebug int = 5
LogSeverityInfo int = 9
LogSeverityWarning int = 13
LogSeverityError int = 17
LogSeverityFatal int = 21
)
var mapTypesToStr = map[attribute.Type]string{
attribute.INVALID: "",
attribute.BOOL: "boolean",
attribute.INT64: "integer",
attribute.FLOAT64: "double",
attribute.STRING: "string",
}
type sentryLogger struct {
client *Client
attributes map[string]Attribute
}
// NewLogger returns a Logger that emits logs to Sentry. If logging is turned off, all logs get discarded.
func NewLogger(ctx context.Context) Logger {
var hub *Hub
hub = GetHubFromContext(ctx)
if hub == nil {
hub = CurrentHub()
}
client := hub.Client()
if client != nil && client.batchLogger != nil {
return &sentryLogger{client, make(map[string]Attribute)}
}
DebugLogger.Println("fallback to noopLogger: enableLogs disabled")
return &noopLogger{} // fallback: does nothing
}
func (l *sentryLogger) Write(p []byte) (int, error) {
// Avoid sending double newlines to Sentry
msg := strings.TrimRight(string(p), "\n")
l.log(context.Background(), LogLevelInfo, LogSeverityInfo, msg)
return len(p), nil
}
func (l *sentryLogger) log(ctx context.Context, level LogLevel, severity int, message string, args ...interface{}) {
if message == "" {
return
}
hub := GetHubFromContext(ctx)
if hub == nil {
hub = CurrentHub()
}
var traceID TraceID
var spanID SpanID
span := hub.Scope().span
if span != nil {
traceID = span.TraceID
spanID = span.SpanID
} else {
traceID = hub.Scope().propagationContext.TraceID
}
attrs := map[string]Attribute{}
if len(args) > 0 {
attrs["sentry.message.template"] = Attribute{
Value: message, Type: "string",
}
for i, p := range args {
attrs[fmt.Sprintf("sentry.message.parameters.%d", i)] = Attribute{
Value: fmt.Sprint(p), Type: "string",
}
}
}
// If `log` was called with SetAttributes, pass the attributes to attrs
if len(l.attributes) > 0 {
for k, v := range l.attributes {
attrs[k] = v
}
// flush attributes from logger after send
clear(l.attributes)
}
// Set default attributes
if release := l.client.options.Release; release != "" {
attrs["sentry.release"] = Attribute{Value: release, Type: "string"}
}
if environment := l.client.options.Environment; environment != "" {
attrs["sentry.environment"] = Attribute{Value: environment, Type: "string"}
}
if serverName := l.client.options.ServerName; serverName != "" {
attrs["sentry.server.address"] = Attribute{Value: serverName, Type: "string"}
} else if serverAddr, err := os.Hostname(); err == nil {
attrs["sentry.server.address"] = Attribute{Value: serverAddr, Type: "string"}
}
scope := hub.Scope()
if scope != nil {
user := scope.user
if !user.IsEmpty() {
if user.ID != "" {
attrs["user.id"] = Attribute{Value: user.ID, Type: "string"}
}
if user.Name != "" {
attrs["user.name"] = Attribute{Value: user.Name, Type: "string"}
}
if user.Email != "" {
attrs["user.email"] = Attribute{Value: user.Email, Type: "string"}
}
}
}
if spanID.String() != "0000000000000000" {
attrs["sentry.trace.parent_span_id"] = Attribute{Value: spanID.String(), Type: "string"}
}
if sdkIdentifier := l.client.sdkIdentifier; sdkIdentifier != "" {
attrs["sentry.sdk.name"] = Attribute{Value: sdkIdentifier, Type: "string"}
}
if sdkVersion := l.client.sdkVersion; sdkVersion != "" {
attrs["sentry.sdk.version"] = Attribute{Value: sdkVersion, Type: "string"}
}
log := &Log{
Timestamp: time.Now(),
TraceID: traceID,
Level: level,
Severity: severity,
Body: fmt.Sprintf(message, args...),
Attributes: attrs,
}
if l.client.options.BeforeSendLog != nil {
log = l.client.options.BeforeSendLog(log)
}
if log != nil {
l.client.batchLogger.logCh <- *log
}
if l.client.options.Debug {
DebugLogger.Printf(message, args...)
}
}
func (l *sentryLogger) SetAttributes(attrs ...attribute.Builder) {
for _, v := range attrs {
t, ok := mapTypesToStr[v.Value.Type()]
if !ok || t == "" {
DebugLogger.Printf("invalid attribute type set: %v", t)
continue
}
l.attributes[v.Key] = Attribute{
Value: v.Value.AsInterface(),
Type: t,
}
}
}
func (l *sentryLogger) Trace(ctx context.Context, v ...interface{}) {
l.log(ctx, LogLevelTrace, LogSeverityTrace, fmt.Sprint(v...))
}
func (l *sentryLogger) Debug(ctx context.Context, v ...interface{}) {
l.log(ctx, LogLevelDebug, LogSeverityDebug, fmt.Sprint(v...))
}
func (l *sentryLogger) Info(ctx context.Context, v ...interface{}) {
l.log(ctx, LogLevelInfo, LogSeverityInfo, fmt.Sprint(v...))
}
func (l *sentryLogger) Warn(ctx context.Context, v ...interface{}) {
l.log(ctx, LogLevelWarn, LogSeverityWarning, fmt.Sprint(v...))
}
func (l *sentryLogger) Error(ctx context.Context, v ...interface{}) {
l.log(ctx, LogLevelError, LogSeverityError, fmt.Sprint(v...))
}
func (l *sentryLogger) Fatal(ctx context.Context, v ...interface{}) {
l.log(ctx, LogLevelFatal, LogSeverityFatal, fmt.Sprint(v...))
os.Exit(1)
}
func (l *sentryLogger) Panic(ctx context.Context, v ...interface{}) {
l.log(ctx, LogLevelFatal, LogSeverityFatal, fmt.Sprint(v...))
panic(fmt.Sprint(v...))
}
func (l *sentryLogger) Tracef(ctx context.Context, format string, v ...interface{}) {
l.log(ctx, LogLevelTrace, LogSeverityTrace, format, v...)
}
func (l *sentryLogger) Debugf(ctx context.Context, format string, v ...interface{}) {
l.log(ctx, LogLevelDebug, LogSeverityDebug, format, v...)
}
func (l *sentryLogger) Infof(ctx context.Context, format string, v ...interface{}) {
l.log(ctx, LogLevelInfo, LogSeverityInfo, format, v...)
}
func (l *sentryLogger) Warnf(ctx context.Context, format string, v ...interface{}) {
l.log(ctx, LogLevelWarn, LogSeverityWarning, format, v...)
}
func (l *sentryLogger) Errorf(ctx context.Context, format string, v ...interface{}) {
l.log(ctx, LogLevelError, LogSeverityError, format, v...)
}
func (l *sentryLogger) Fatalf(ctx context.Context, format string, v ...interface{}) {
l.log(ctx, LogLevelFatal, LogSeverityFatal, format, v...)
os.Exit(1)
}
func (l *sentryLogger) Panicf(ctx context.Context, format string, v ...interface{}) {
l.log(ctx, LogLevelFatal, LogSeverityFatal, format, v...)
panic(fmt.Sprint(v...))
}

65
vendor/github.com/getsentry/sentry-go/log_fallback.go generated vendored Normal file
View file

@ -0,0 +1,65 @@
package sentry
import (
"context"
"fmt"
"os"
"github.com/getsentry/sentry-go/attribute"
)
// Fallback, no-op logger if logging is disabled.
type noopLogger struct{}
func (*noopLogger) Trace(_ context.Context, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelTrace)
}
func (*noopLogger) Debug(_ context.Context, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelDebug)
}
func (*noopLogger) Info(_ context.Context, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelInfo)
}
func (*noopLogger) Warn(_ context.Context, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelWarn)
}
func (*noopLogger) Error(_ context.Context, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelError)
}
func (*noopLogger) Fatal(_ context.Context, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelFatal)
os.Exit(1)
}
func (*noopLogger) Panic(_ context.Context, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelFatal)
panic(fmt.Sprintf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelFatal))
}
func (*noopLogger) Tracef(_ context.Context, _ string, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelTrace)
}
func (*noopLogger) Debugf(_ context.Context, _ string, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelDebug)
}
func (*noopLogger) Infof(_ context.Context, _ string, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelInfo)
}
func (*noopLogger) Warnf(_ context.Context, _ string, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelWarn)
}
func (*noopLogger) Errorf(_ context.Context, _ string, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelError)
}
func (*noopLogger) Fatalf(_ context.Context, _ string, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelFatal)
os.Exit(1)
}
func (*noopLogger) Panicf(_ context.Context, _ string, _ ...interface{}) {
DebugLogger.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelFatal)
panic(fmt.Sprintf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelFatal))
}
func (*noopLogger) SetAttributes(...attribute.Builder) {
DebugLogger.Printf("No attributes attached. Turn on logging via EnableLogs")
}
func (*noopLogger) Write(_ []byte) (n int, err error) {
return 0, fmt.Errorf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelInfo)
}

21
vendor/github.com/getsentry/sentry-go/logrus/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 Functional Software, Inc. dba Sentry
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

152
vendor/github.com/getsentry/sentry-go/logrus/README.md generated vendored Normal file
View file

@ -0,0 +1,152 @@
<p align="center">
<a href="https://sentry.io" target="_blank" align="center">
<img src="https://sentry-brand.storage.googleapis.com/sentry-logo-black.png" width="280">
</a>
<br />
</p>
# Official Sentry Logrus Hook for Sentry-go SDK
**Go.dev Documentation:** https://pkg.go.dev/github.com/getsentry/sentry-go/logrus
**Example Usage:** https://github.com/getsentry/sentry-go/tree/master/_examples/logrus
## Installation
```sh
go get github.com/getsentry/sentry-go/logrus
```
## Usage
```go
import (
"fmt"
"os"
"time"
"github.com/sirupsen/logrus"
"github.com/getsentry/sentry-go"
sentrylogrus "github.com/getsentry/sentry-go/logrus"
)
func main() {
// Initialize Logrus
logger := logrus.New()
// Log DEBUG and higher level logs to STDERR
logger.Level = logrus.DebugLevel
logger.Out = os.Stderr
// send logs on InfoLevel
logHook, err := sentrylogrus.NewLogHook(
[]logrus.Level{logrus.InfoLevel},
sentry.ClientOptions{
Dsn: "your-public-dsn",
BeforeSend: func(event *sentry.Event, hint *sentry.EventHint) *sentry.Event {
if hint.Context != nil {
if req, ok := hint.Context.Value(sentry.RequestContextKey).(*http.Request); ok {
// You have access to the original Request
fmt.Println(req)
}
}
fmt.Println(event)
return event
},
// need to have logs enabled
EnableLogs: true,
Debug: true,
AttachStacktrace: true,
})
// send events on Error, Fatal, Panic levels
eventHook, err := sentrylogrus.NewEventHook([]logrus.Level{
logrus.ErrorLevel,
logrus.FatalLevel,
logrus.PanicLevel,
}, sentry.ClientOptions{
Dsn: "your-public-dsn",
BeforeSend: func(event *sentry.Event, hint *sentry.EventHint) *sentry.Event {
if hint.Context != nil {
if req, ok := hint.Context.Value(sentry.RequestContextKey).(*http.Request); ok {
// You have access to the original Request
fmt.Println(req)
}
}
fmt.Println(event)
return event
},
Debug: true,
AttachStacktrace: true,
})
if err != nil {
panic(err)
}
defer eventHook.Flush(5 * time.Second)
defer logHook.Flush(5 * time.Second)
logger.AddHook(eventHook)
logger.AddHook(logHook)
// Flushes before calling os.Exit(1) when using logger.Fatal
// (else all defers are not called, and Sentry does not have time to send the event)
logrus.RegisterExitHandler(func() {
eventHook.Flush(5 * time.Second)
logHook.Flush(5 * time.Second)
})
// Log a InfoLevel entry STDERR which is sent as a log to Sentry
logger.Infof("Application has started")
// Log an error to STDERR which is also sent to Sentry
logger.Errorf("oh no!")
// Log a fatal error to STDERR, which sends an event to Sentry and terminates the application
logger.Fatalf("can't continue...")
// Example of logging with attributes
logger.WithField("user", "test-user").Error("An error occurred")
}
```
## Configuration
The `sentrylogrus` package accepts an array of `logrus.Level` and a struct of `sentry.ClientOptions` that allows you to configure how the hook will behave.
The `logrus.Level` array defines which log levels should be sent to Sentry.
In addition, the Hook returned by `sentrylogrus.New` can be configured with the following options:
- Fallback Functionality: Configure a fallback for handling errors during log transmission.
```go
sentryHook.Fallback = func(entry *logrus.Entry, err error) {
// Handle error
}
```
- Setting default tags for all events sent to Sentry
```go
sentryHook.AddTags(map[string]string{
"key": "value",
})
```
- Using `hubProvider` for Scoped Sentry Hubs
The hubProvider allows you to configure the Sentry hook to use a custom Sentry hub. This can be particularly useful when you want to scope logs to specific goroutines or operations, enabling more precise grouping and context in Sentry.
You can set a custom hubProvider function using the SetHubProvider method:
```go
sentryHook.SetHubProvider(func() *sentry.Hub {
// Create or return a specific Sentry hub
return sentry.NewHub(sentry.GetCurrentHub().Client(), sentry.NewScope())
})
```
This ensures that logs from specific contexts or threads use the appropriate Sentry hub and scope.
## Notes
- Always call `Flush` or `FlushWithContext` to ensure all events are sent to Sentry before program termination

View file

@ -2,17 +2,26 @@
package sentrylogrus
import (
"context"
"errors"
"fmt"
"math"
"net/http"
"reflect"
"strconv"
"time"
"github.com/getsentry/sentry-go"
"github.com/getsentry/sentry-go/attribute"
"github.com/sirupsen/logrus"
sentry "github.com/getsentry/sentry-go"
)
// The identifier of the Logrus SDK.
const sdkIdentifier = "sentry.go.logrus"
const (
// sdkIdentifier is the identifier of the Logrus SDK.
sdkIdentifier = "sentry.go.logrus"
// the name of the logger.
name = "logrus"
)
// These default log field keys are used to pass specific metadata in a way that
// Sentry understands. If they are found in the log fields, and the value is of
@ -36,67 +45,79 @@ const (
FieldMaxProcs = "go_maxprocs"
)
var levelMap = map[logrus.Level]sentry.Level{
logrus.TraceLevel: sentry.LevelDebug,
logrus.DebugLevel: sentry.LevelDebug,
logrus.InfoLevel: sentry.LevelInfo,
logrus.WarnLevel: sentry.LevelWarning,
logrus.ErrorLevel: sentry.LevelError,
logrus.FatalLevel: sentry.LevelFatal,
logrus.PanicLevel: sentry.LevelFatal,
}
// Hook is the logrus hook for Sentry.
//
// It is not safe to configure the hook while logging is happening. Please
// perform all configuration before using it.
type Hook struct {
hub *sentry.Hub
fallback FallbackFunc
keys map[string]string
levels []logrus.Level
type Hook interface {
// SetHubProvider sets a function to provide a hub for each log entry.
SetHubProvider(provider func() *sentry.Hub)
// AddTags adds tags to the hook's scope.
AddTags(tags map[string]string)
// SetFallback sets a fallback function for the eventHook.
SetFallback(fb FallbackFunc)
// SetKey sets an alternate field key for the eventHook.
SetKey(oldKey, newKey string)
// Levels returns the list of logging levels that will be sent to Sentry as events.
Levels() []logrus.Level
// Fire sends entry to Sentry as an event.
Fire(entry *logrus.Entry) error
// Flush waits until the underlying Sentry transport sends any buffered events.
Flush(timeout time.Duration) bool
// FlushWithContext waits for the underlying Sentry transport to send any buffered
// events, blocking until the context's deadline is reached or the context is canceled.
// It returns false if the context is canceled or its deadline expires before the events
// are sent, meaning some events may not have been sent.
FlushWithContext(ctx context.Context) bool
}
var _ logrus.Hook = &Hook{}
// New initializes a new Logrus hook which sends logs to a new Sentry client
// configured according to opts.
func New(levels []logrus.Level, opts sentry.ClientOptions) (*Hook, error) {
client, err := sentry.NewClient(opts)
if err != nil {
return nil, err
}
client.SetSDKIdentifier(sdkIdentifier)
return NewFromClient(levels, client), nil
// Deprecated: New just makes an underlying call to NewEventHook.
func New(levels []logrus.Level, opts sentry.ClientOptions) (Hook, error) {
return NewEventHook(levels, opts)
}
// NewFromClient initializes a new Logrus hook which sends logs to the provided
// sentry client.
func NewFromClient(levels []logrus.Level, client *sentry.Client) *Hook {
h := &Hook{
levels: levels,
hub: sentry.NewHub(client, sentry.NewScope()),
keys: make(map[string]string),
}
return h
}
// AddTags adds tags to the hook's scope.
func (h *Hook) AddTags(tags map[string]string) {
h.hub.Scope().SetTags(tags)
// Deprecated: NewFromClient just makes an underlying call to NewEventHookFromClient.
func NewFromClient(levels []logrus.Level, client *sentry.Client) Hook {
return NewEventHookFromClient(levels, client)
}
// A FallbackFunc can be used to attempt to handle any errors in logging, before
// resorting to Logrus's standard error reporting.
type FallbackFunc func(*logrus.Entry) error
// SetFallback sets a fallback function, which will be called in case logging to
// sentry fails. In case of a logging failure in the Fire() method, the
// fallback function is called with the original logrus entry. If the
// fallback function returns nil, the error is considered handled. If it returns
// an error, that error is passed along to logrus as the return value from the
// Fire() call. If no fallback function is defined, a default error message is
// returned to Logrus in case of failure to send to Sentry.
func (h *Hook) SetFallback(fb FallbackFunc) {
type eventHook struct {
hubProvider func() *sentry.Hub
fallback FallbackFunc
keys map[string]string
levels []logrus.Level
}
var _ Hook = &eventHook{}
var _ logrus.Hook = &eventHook{} // eventHook still needs to be a logrus.Hook
func (h *eventHook) SetHubProvider(provider func() *sentry.Hub) {
h.hubProvider = provider
}
func (h *eventHook) AddTags(tags map[string]string) {
h.hubProvider().Scope().SetTags(tags)
}
func (h *eventHook) SetFallback(fb FallbackFunc) {
h.fallback = fb
}
// SetKey sets an alternate field key. Use this if the default values conflict
// with other loggers, for instance. You may pass "" for new, to unset an
// existing alternate.
func (h *Hook) SetKey(oldKey, newKey string) {
func (h *eventHook) SetKey(oldKey, newKey string) {
if oldKey == "" {
return
}
@ -108,23 +129,21 @@ func (h *Hook) SetKey(oldKey, newKey string) {
h.keys[oldKey] = newKey
}
func (h *Hook) key(key string) string {
func (h *eventHook) key(key string) string {
if val := h.keys[key]; val != "" {
return val
}
return key
}
// Levels returns the list of logging levels that will be sent to
// Sentry.
func (h *Hook) Levels() []logrus.Level {
func (h *eventHook) Levels() []logrus.Level {
return h.levels
}
// Fire sends entry to Sentry.
func (h *Hook) Fire(entry *logrus.Entry) error {
func (h *eventHook) Fire(entry *logrus.Entry) error {
hub := h.hubProvider()
event := h.entryToEvent(entry)
if id := h.hub.CaptureEvent(event); id == nil {
if id := hub.CaptureEvent(event); id == nil {
if h.fallback != nil {
return h.fallback(entry)
}
@ -133,17 +152,7 @@ func (h *Hook) Fire(entry *logrus.Entry) error {
return nil
}
var levelMap = map[logrus.Level]sentry.Level{
logrus.TraceLevel: sentry.LevelDebug,
logrus.DebugLevel: sentry.LevelDebug,
logrus.InfoLevel: sentry.LevelInfo,
logrus.WarnLevel: sentry.LevelWarning,
logrus.ErrorLevel: sentry.LevelError,
logrus.FatalLevel: sentry.LevelFatal,
logrus.PanicLevel: sentry.LevelFatal,
}
func (h *Hook) entryToEvent(l *logrus.Entry) *sentry.Event {
func (h *eventHook) entryToEvent(l *logrus.Entry) *sentry.Event {
data := make(logrus.Fields, len(l.Data))
for k, v := range l.Data {
data[k] = v
@ -153,43 +162,246 @@ func (h *Hook) entryToEvent(l *logrus.Entry) *sentry.Event {
Extra: data,
Message: l.Message,
Timestamp: l.Time,
Logger: name,
}
key := h.key(FieldRequest)
if req, ok := s.Extra[key].(*http.Request); ok {
switch request := s.Extra[key].(type) {
case *http.Request:
delete(s.Extra, key)
s.Request = sentry.NewRequest(req)
s.Request = sentry.NewRequest(request)
case sentry.Request:
delete(s.Extra, key)
s.Request = &request
case *sentry.Request:
delete(s.Extra, key)
s.Request = request
}
if err, ok := s.Extra[logrus.ErrorKey].(error); ok {
delete(s.Extra, logrus.ErrorKey)
s.SetException(err, -1)
}
key = h.key(FieldUser)
if user, ok := s.Extra[key].(sentry.User); ok {
switch user := s.Extra[key].(type) {
case sentry.User:
delete(s.Extra, key)
s.User = user
}
if user, ok := s.Extra[key].(*sentry.User); ok {
case *sentry.User:
delete(s.Extra, key)
s.User = *user
}
key = h.key(FieldTransaction)
if txn, ok := s.Extra[key].(string); ok {
delete(s.Extra, key)
s.Transaction = txn
}
key = h.key(FieldFingerprint)
if fp, ok := s.Extra[key].([]string); ok {
delete(s.Extra, key)
s.Fingerprint = fp
}
delete(s.Extra, FieldGoVersion)
delete(s.Extra, FieldMaxProcs)
return s
}
// Flush waits until the underlying Sentry transport sends any buffered events,
// blocking for at most the given timeout. It returns false if the timeout was
// reached, in which case some events may not have been sent.
func (h *Hook) Flush(timeout time.Duration) bool {
return h.hub.Client().Flush(timeout)
func (h *eventHook) Flush(timeout time.Duration) bool {
return h.hubProvider().Client().Flush(timeout)
}
func (h *eventHook) FlushWithContext(ctx context.Context) bool {
return h.hubProvider().Client().FlushWithContext(ctx)
}
// NewEventHook initializes a new Logrus hook which sends events to a new Sentry client
// configured according to opts.
func NewEventHook(levels []logrus.Level, opts sentry.ClientOptions) (Hook, error) {
client, err := sentry.NewClient(opts)
if err != nil {
return nil, err
}
client.SetSDKIdentifier(sdkIdentifier)
return NewEventHookFromClient(levels, client), nil
}
// NewEventHookFromClient initializes a new Logrus hook which sends events to the provided
// sentry client.
func NewEventHookFromClient(levels []logrus.Level, client *sentry.Client) Hook {
defaultHub := sentry.NewHub(client, sentry.NewScope())
return &eventHook{
levels: levels,
hubProvider: func() *sentry.Hub {
// Default to using the same hub if no specific provider is set
return defaultHub
},
keys: make(map[string]string),
}
}
type logHook struct {
hubProvider func() *sentry.Hub
fallback FallbackFunc
keys map[string]string
levels []logrus.Level
logger sentry.Logger
}
var _ Hook = &logHook{}
var _ logrus.Hook = &logHook{} // logHook also needs to be a logrus.Hook
func (h *logHook) SetHubProvider(provider func() *sentry.Hub) {
h.hubProvider = provider
}
func (h *logHook) AddTags(tags map[string]string) {
// for logs convert tags to attributes
for k, v := range tags {
h.logger.SetAttributes(attribute.String(k, v))
}
}
func (h *logHook) SetFallback(fb FallbackFunc) {
h.fallback = fb
}
func (h *logHook) SetKey(oldKey, newKey string) {
if oldKey == "" {
return
}
if newKey == "" {
delete(h.keys, oldKey)
return
}
delete(h.keys, newKey)
h.keys[oldKey] = newKey
}
func (h *logHook) key(key string) string {
if val := h.keys[key]; val != "" {
return val
}
return key
}
func (h *logHook) Fire(entry *logrus.Entry) error {
ctx := context.Background()
if entry.Context != nil {
ctx = entry.Context
}
for k, v := range entry.Data {
// Skip specific fields that might be handled separately
if k == h.key(FieldRequest) || k == h.key(FieldUser) ||
k == h.key(FieldFingerprint) || k == FieldGoVersion ||
k == FieldMaxProcs || k == logrus.ErrorKey {
continue
}
switch val := v.(type) {
case int8:
h.logger.SetAttributes(attribute.Int(k, int(val)))
case int16:
h.logger.SetAttributes(attribute.Int(k, int(val)))
case int32:
h.logger.SetAttributes(attribute.Int(k, int(val)))
case int64:
h.logger.SetAttributes(attribute.Int(k, int(val)))
case int:
h.logger.SetAttributes(attribute.Int(k, val))
case uint, uint8, uint16, uint32, uint64:
uval := reflect.ValueOf(val).Convert(reflect.TypeOf(uint64(0))).Uint()
if uval <= math.MaxInt64 {
h.logger.SetAttributes(attribute.Int64(k, int64(uval)))
} else {
// For values larger than int64 can handle, we are using string.
h.logger.SetAttributes(attribute.String(k, strconv.FormatUint(uval, 10)))
}
case string:
h.logger.SetAttributes(attribute.String(k, val))
case float32:
h.logger.SetAttributes(attribute.Float64(k, float64(val)))
case float64:
h.logger.SetAttributes(attribute.Float64(k, val))
case bool:
h.logger.SetAttributes(attribute.Bool(k, val))
default:
// can't drop argument, fallback to string conversion
h.logger.SetAttributes(attribute.String(k, fmt.Sprint(v)))
}
}
h.logger.SetAttributes(attribute.String("sentry.origin", "auto.logger.logrus"))
switch entry.Level {
case logrus.TraceLevel:
h.logger.Trace(ctx, entry.Message)
case logrus.DebugLevel:
h.logger.Debug(ctx, entry.Message)
case logrus.InfoLevel:
h.logger.Info(ctx, entry.Message)
case logrus.WarnLevel:
h.logger.Warn(ctx, entry.Message)
case logrus.ErrorLevel:
h.logger.Error(ctx, entry.Message)
case logrus.FatalLevel:
h.logger.Fatal(ctx, entry.Message)
case logrus.PanicLevel:
h.logger.Panic(ctx, entry.Message)
default:
sentry.DebugLogger.Printf("Invalid logrus logging level: %v. Dropping log.", entry.Level)
if h.fallback != nil {
return h.fallback(entry)
}
return errors.New("invalid log level")
}
return nil
}
func (h *logHook) Levels() []logrus.Level {
return h.levels
}
func (h *logHook) Flush(timeout time.Duration) bool {
return h.hubProvider().Client().Flush(timeout)
}
func (h *logHook) FlushWithContext(ctx context.Context) bool {
return h.hubProvider().Client().FlushWithContext(ctx)
}
// NewLogHook initializes a new Logrus hook which sends logs to a new Sentry client
// configured according to opts.
func NewLogHook(levels []logrus.Level, opts sentry.ClientOptions) (Hook, error) {
if !opts.EnableLogs {
return nil, errors.New("cannot create log hook, EnableLogs is set to false")
}
client, err := sentry.NewClient(opts)
if err != nil {
return nil, err
}
client.SetSDKIdentifier(sdkIdentifier)
return NewLogHookFromClient(levels, client), nil
}
// NewLogHookFromClient initializes a new Logrus hook which sends logs to the provided
// sentry client.
func NewLogHookFromClient(levels []logrus.Level, client *sentry.Client) Hook {
defaultHub := sentry.NewHub(client, sentry.NewScope())
ctx := sentry.SetHubOnContext(context.Background(), defaultHub)
return &logHook{
logger: sentry.NewLogger(ctx),
levels: levels,
hubProvider: func() *sentry.Hub {
// Default to using the same hub if no specific provider is set
return defaultHub
},
keys: make(map[string]string),
}
}

View file

@ -1,427 +0,0 @@
package sentry
import (
"fmt"
"hash/crc32"
"math"
"regexp"
"sort"
"strings"
)
type (
NumberOrString interface {
int | string
}
void struct{}
)
var (
member void
keyRegex = regexp.MustCompile(`[^a-zA-Z0-9_/.-]+`)
valueRegex = regexp.MustCompile(`[^\w\d\s_:/@\.{}\[\]$-]+`)
unitRegex = regexp.MustCompile(`[^a-z]+`)
)
type MetricUnit struct {
unit string
}
func (m MetricUnit) toString() string {
return m.unit
}
func NanoSecond() MetricUnit {
return MetricUnit{
"nanosecond",
}
}
func MicroSecond() MetricUnit {
return MetricUnit{
"microsecond",
}
}
func MilliSecond() MetricUnit {
return MetricUnit{
"millisecond",
}
}
func Second() MetricUnit {
return MetricUnit{
"second",
}
}
func Minute() MetricUnit {
return MetricUnit{
"minute",
}
}
func Hour() MetricUnit {
return MetricUnit{
"hour",
}
}
func Day() MetricUnit {
return MetricUnit{
"day",
}
}
func Week() MetricUnit {
return MetricUnit{
"week",
}
}
func Bit() MetricUnit {
return MetricUnit{
"bit",
}
}
func Byte() MetricUnit {
return MetricUnit{
"byte",
}
}
func KiloByte() MetricUnit {
return MetricUnit{
"kilobyte",
}
}
func KibiByte() MetricUnit {
return MetricUnit{
"kibibyte",
}
}
func MegaByte() MetricUnit {
return MetricUnit{
"megabyte",
}
}
func MebiByte() MetricUnit {
return MetricUnit{
"mebibyte",
}
}
func GigaByte() MetricUnit {
return MetricUnit{
"gigabyte",
}
}
func GibiByte() MetricUnit {
return MetricUnit{
"gibibyte",
}
}
func TeraByte() MetricUnit {
return MetricUnit{
"terabyte",
}
}
func TebiByte() MetricUnit {
return MetricUnit{
"tebibyte",
}
}
func PetaByte() MetricUnit {
return MetricUnit{
"petabyte",
}
}
func PebiByte() MetricUnit {
return MetricUnit{
"pebibyte",
}
}
func ExaByte() MetricUnit {
return MetricUnit{
"exabyte",
}
}
func ExbiByte() MetricUnit {
return MetricUnit{
"exbibyte",
}
}
func Ratio() MetricUnit {
return MetricUnit{
"ratio",
}
}
func Percent() MetricUnit {
return MetricUnit{
"percent",
}
}
func CustomUnit(unit string) MetricUnit {
return MetricUnit{
unitRegex.ReplaceAllString(unit, ""),
}
}
type Metric interface {
GetType() string
GetTags() map[string]string
GetKey() string
GetUnit() string
GetTimestamp() int64
SerializeValue() string
SerializeTags() string
}
type abstractMetric struct {
key string
unit MetricUnit
tags map[string]string
// A unix timestamp (full seconds elapsed since 1970-01-01 00:00 UTC).
timestamp int64
}
func (am abstractMetric) GetTags() map[string]string {
return am.tags
}
func (am abstractMetric) GetKey() string {
return am.key
}
func (am abstractMetric) GetUnit() string {
return am.unit.toString()
}
func (am abstractMetric) GetTimestamp() int64 {
return am.timestamp
}
func (am abstractMetric) SerializeTags() string {
var sb strings.Builder
values := make([]string, 0, len(am.tags))
for k := range am.tags {
values = append(values, k)
}
sortSlice(values)
for _, key := range values {
val := sanitizeValue(am.tags[key])
key = sanitizeKey(key)
sb.WriteString(fmt.Sprintf("%s:%s,", key, val))
}
s := sb.String()
if len(s) > 0 {
s = s[:len(s)-1]
}
return s
}
// Counter Metric.
type CounterMetric struct {
value float64
abstractMetric
}
func (c *CounterMetric) Add(value float64) {
c.value += value
}
func (c CounterMetric) GetType() string {
return "c"
}
func (c CounterMetric) SerializeValue() string {
return fmt.Sprintf(":%v", c.value)
}
// timestamp: A unix timestamp (full seconds elapsed since 1970-01-01 00:00 UTC).
func NewCounterMetric(key string, unit MetricUnit, tags map[string]string, timestamp int64, value float64) CounterMetric {
am := abstractMetric{
key,
unit,
tags,
timestamp,
}
return CounterMetric{
value,
am,
}
}
// Distribution Metric.
type DistributionMetric struct {
values []float64
abstractMetric
}
func (d *DistributionMetric) Add(value float64) {
d.values = append(d.values, value)
}
func (d DistributionMetric) GetType() string {
return "d"
}
func (d DistributionMetric) SerializeValue() string {
var sb strings.Builder
for _, el := range d.values {
sb.WriteString(fmt.Sprintf(":%v", el))
}
return sb.String()
}
// timestamp: A unix timestamp (full seconds elapsed since 1970-01-01 00:00 UTC).
func NewDistributionMetric(key string, unit MetricUnit, tags map[string]string, timestamp int64, value float64) DistributionMetric {
am := abstractMetric{
key,
unit,
tags,
timestamp,
}
return DistributionMetric{
[]float64{value},
am,
}
}
// Gauge Metric.
type GaugeMetric struct {
last float64
min float64
max float64
sum float64
count float64
abstractMetric
}
func (g *GaugeMetric) Add(value float64) {
g.last = value
g.min = math.Min(g.min, value)
g.max = math.Max(g.max, value)
g.sum += value
g.count++
}
func (g GaugeMetric) GetType() string {
return "g"
}
func (g GaugeMetric) SerializeValue() string {
return fmt.Sprintf(":%v:%v:%v:%v:%v", g.last, g.min, g.max, g.sum, g.count)
}
// timestamp: A unix timestamp (full seconds elapsed since 1970-01-01 00:00 UTC).
func NewGaugeMetric(key string, unit MetricUnit, tags map[string]string, timestamp int64, value float64) GaugeMetric {
am := abstractMetric{
key,
unit,
tags,
timestamp,
}
return GaugeMetric{
value, // last
value, // min
value, // max
value, // sum
value, // count
am,
}
}
// Set Metric.
type SetMetric[T NumberOrString] struct {
values map[T]void
abstractMetric
}
func (s *SetMetric[T]) Add(value T) {
s.values[value] = member
}
func (s SetMetric[T]) GetType() string {
return "s"
}
func (s SetMetric[T]) SerializeValue() string {
_hash := func(s string) uint32 {
return crc32.ChecksumIEEE([]byte(s))
}
values := make([]T, 0, len(s.values))
for k := range s.values {
values = append(values, k)
}
sortSlice(values)
var sb strings.Builder
for _, el := range values {
switch any(el).(type) {
case int:
sb.WriteString(fmt.Sprintf(":%v", el))
case string:
s := fmt.Sprintf("%v", el)
sb.WriteString(fmt.Sprintf(":%d", _hash(s)))
}
}
return sb.String()
}
// timestamp: A unix timestamp (full seconds elapsed since 1970-01-01 00:00 UTC).
func NewSetMetric[T NumberOrString](key string, unit MetricUnit, tags map[string]string, timestamp int64, value T) SetMetric[T] {
am := abstractMetric{
key,
unit,
tags,
timestamp,
}
return SetMetric[T]{
map[T]void{
value: member,
},
am,
}
}
func sanitizeKey(s string) string {
return keyRegex.ReplaceAllString(s, "_")
}
func sanitizeValue(s string) string {
return valueRegex.ReplaceAllString(s, "")
}
type Ordered interface {
~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 | ~string
}
func sortSlice[T Ordered](s []T) {
sort.Slice(s, func(i, j int) bool {
return s[i] < s[j]
})
}

49
vendor/github.com/getsentry/sentry-go/mocks.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
package sentry
import (
"context"
"sync"
"time"
)
// MockScope implements [Scope] for use in tests.
type MockScope struct {
breadcrumb *Breadcrumb
shouldDropEvent bool
}
func (scope *MockScope) AddBreadcrumb(breadcrumb *Breadcrumb, _ int) {
scope.breadcrumb = breadcrumb
}
func (scope *MockScope) ApplyToEvent(event *Event, _ *EventHint, _ *Client) *Event {
if scope.shouldDropEvent {
return nil
}
return event
}
// MockTransport implements [Transport] for use in tests.
type MockTransport struct {
mu sync.Mutex
events []*Event
lastEvent *Event
}
func (t *MockTransport) Configure(_ ClientOptions) {}
func (t *MockTransport) SendEvent(event *Event) {
t.mu.Lock()
defer t.mu.Unlock()
t.events = append(t.events, event)
t.lastEvent = event
}
func (t *MockTransport) Flush(_ time.Duration) bool {
return true
}
func (t *MockTransport) FlushWithContext(_ context.Context) bool { return true }
func (t *MockTransport) Events() []*Event {
t.mu.Lock()
defer t.mu.Unlock()
return t.events
}
func (t *MockTransport) Close() {}

View file

@ -1,73 +0,0 @@
package sentry
// Based on https://github.com/getsentry/vroom/blob/d11c26063e802d66b9a592c4010261746ca3dfa4/internal/sample/sample.go
import (
"time"
)
type (
profileDevice struct {
Architecture string `json:"architecture"`
Classification string `json:"classification"`
Locale string `json:"locale"`
Manufacturer string `json:"manufacturer"`
Model string `json:"model"`
}
profileOS struct {
BuildNumber string `json:"build_number"`
Name string `json:"name"`
Version string `json:"version"`
}
profileRuntime struct {
Name string `json:"name"`
Version string `json:"version"`
}
profileSample struct {
ElapsedSinceStartNS uint64 `json:"elapsed_since_start_ns"`
StackID int `json:"stack_id"`
ThreadID uint64 `json:"thread_id"`
}
profileThreadMetadata struct {
Name string `json:"name,omitempty"`
Priority int `json:"priority,omitempty"`
}
profileStack []int
profileTrace struct {
Frames []*Frame `json:"frames"`
Samples []profileSample `json:"samples"`
Stacks []profileStack `json:"stacks"`
ThreadMetadata map[uint64]*profileThreadMetadata `json:"thread_metadata"`
}
profileInfo struct {
DebugMeta *DebugMeta `json:"debug_meta,omitempty"`
Device profileDevice `json:"device"`
Environment string `json:"environment,omitempty"`
EventID string `json:"event_id"`
OS profileOS `json:"os"`
Platform string `json:"platform"`
Release string `json:"release"`
Dist string `json:"dist"`
Runtime profileRuntime `json:"runtime"`
Timestamp time.Time `json:"timestamp"`
Trace *profileTrace `json:"profile"`
Transaction profileTransaction `json:"transaction"`
Version string `json:"version"`
}
// see https://github.com/getsentry/vroom/blob/a91e39416723ec44fc54010257020eeaf9a77cbd/internal/transaction/transaction.go
profileTransaction struct {
ActiveThreadID uint64 `json:"active_thread_id"`
DurationNS uint64 `json:"duration_ns,omitempty"`
ID EventID `json:"id"`
Name string `json:"name"`
TraceID string `json:"trace_id"`
}
)

View file

@ -1,451 +0,0 @@
package sentry
import (
"container/ring"
"strconv"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/getsentry/sentry-go/internal/traceparser"
)
// Start a profiler that collects samples continuously, with a buffer of up to 30 seconds.
// Later, you can collect a slice from this buffer, producing a Trace.
func startProfiling(startTime time.Time) profiler {
onProfilerStart()
p := newProfiler(startTime)
// Wait for the profiler to finish setting up before returning to the caller.
started := make(chan struct{})
go p.run(started)
if _, ok := <-started; ok {
return p
}
return nil
}
type profiler interface {
// GetSlice returns a slice of the profiled data between the given times.
GetSlice(startTime, endTime time.Time) *profilerResult
Stop(wait bool)
}
type profilerResult struct {
callerGoID uint64
trace *profileTrace
}
func getCurrentGoID() uint64 {
// We shouldn't panic but let's be super safe.
defer func() {
if err := recover(); err != nil {
Logger.Printf("Profiler panic in getCurrentGoID(): %v\n", err)
}
}()
// Buffer to read the stack trace into. We should be good with a small buffer because we only need the first line.
var stacksBuffer = make([]byte, 100)
var n = runtime.Stack(stacksBuffer, false)
if n > 0 {
var traces = traceparser.Parse(stacksBuffer[0:n])
if traces.Length() > 0 {
var trace = traces.Item(0)
return trace.GoID()
}
}
return 0
}
const profilerSamplingRateHz = 101 // 101 Hz; not 100 Hz because of the lockstep sampling (https://stackoverflow.com/a/45471031/1181370)
const profilerSamplingRate = time.Second / profilerSamplingRateHz
const stackBufferMaxGrowth = 512 * 1024
const stackBufferLimit = 10 * 1024 * 1024
const profilerRuntimeLimit = 30 // seconds
type profileRecorder struct {
startTime time.Time
stopSignal chan struct{}
stopped int64
mutex sync.RWMutex
testProfilerPanic int64
// Map from runtime.StackRecord.Stack0 to an index in stacks.
stackIndexes map[string]int
stacks []profileStack
newStacks []profileStack // New stacks created in the current interation.
stackKeyBuffer []byte
// Map from runtime.Frame.PC to an index in frames.
frameIndexes map[string]int
frames []*Frame
newFrames []*Frame // New frames created in the current interation.
// We keep a ring buffer of 30 seconds worth of samples, so that we can later slice it.
// Each bucket is a slice of samples all taken at the same time.
samplesBucketsHead *ring.Ring
// Buffer to read current stacks - will grow automatically up to stackBufferLimit.
stacksBuffer []byte
}
func newProfiler(startTime time.Time) *profileRecorder {
// Pre-allocate the profile trace for the currently active number of routines & 100 ms worth of samples.
// Other coefficients are just guesses of what might be a good starting point to avoid allocs on short runs.
return &profileRecorder{
startTime: startTime,
stopSignal: make(chan struct{}, 1),
stackIndexes: make(map[string]int, 32),
stacks: make([]profileStack, 0, 32),
newStacks: make([]profileStack, 0, 32),
frameIndexes: make(map[string]int, 128),
frames: make([]*Frame, 0, 128),
newFrames: make([]*Frame, 0, 128),
samplesBucketsHead: ring.New(profilerRuntimeLimit * profilerSamplingRateHz),
// A buffer of 2 KiB per goroutine stack looks like a good starting point (empirically determined).
stacksBuffer: make([]byte, runtime.NumGoroutine()*2048),
}
}
// This allows us to test whether panic during profiling are handled correctly and don't block execution.
// If the number is lower than 0, profilerGoroutine() will panic immedately.
// If the number is higher than 0, profiler.onTick() will panic when the given samples-set index is being collected.
var testProfilerPanic int64
var profilerRunning int64
func (p *profileRecorder) run(started chan struct{}) {
// Code backup for manual test debugging:
// if !atomic.CompareAndSwapInt64(&profilerRunning, 0, 1) {
// panic("Only one profiler can be running at a time")
// }
// We shouldn't panic but let's be super safe.
defer func() {
if err := recover(); err != nil {
Logger.Printf("Profiler panic in run(): %v\n", err)
}
atomic.StoreInt64(&testProfilerPanic, 0)
close(started)
p.stopSignal <- struct{}{}
atomic.StoreInt64(&p.stopped, 1)
atomic.StoreInt64(&profilerRunning, 0)
}()
p.testProfilerPanic = atomic.LoadInt64(&testProfilerPanic)
if p.testProfilerPanic < 0 {
Logger.Printf("Profiler panicking during startup because testProfilerPanic == %v\n", p.testProfilerPanic)
panic("This is an expected panic in profilerGoroutine() during tests")
}
// Collect the first sample immediately.
p.onTick()
// Periodically collect stacks, starting after profilerSamplingRate has passed.
collectTicker := profilerTickerFactory(profilerSamplingRate)
defer collectTicker.Stop()
var tickerChannel = collectTicker.TickSource()
started <- struct{}{}
for {
select {
case <-tickerChannel:
p.onTick()
collectTicker.Ticked()
case <-p.stopSignal:
return
}
}
}
func (p *profileRecorder) Stop(wait bool) {
if atomic.LoadInt64(&p.stopped) == 1 {
return
}
p.stopSignal <- struct{}{}
if wait {
<-p.stopSignal
}
}
func (p *profileRecorder) GetSlice(startTime, endTime time.Time) *profilerResult {
// Unlikely edge cases - profiler wasn't running at all or the given times are invalid in relation to each other.
if p.startTime.After(endTime) || startTime.After(endTime) {
return nil
}
var relativeStartNS = uint64(0)
if p.startTime.Before(startTime) {
relativeStartNS = uint64(startTime.Sub(p.startTime).Nanoseconds())
}
var relativeEndNS = uint64(endTime.Sub(p.startTime).Nanoseconds())
samplesCount, bucketsReversed, trace := p.getBuckets(relativeStartNS, relativeEndNS)
if samplesCount == 0 {
return nil
}
var result = &profilerResult{
callerGoID: getCurrentGoID(),
trace: trace,
}
trace.Samples = make([]profileSample, samplesCount)
trace.ThreadMetadata = make(map[uint64]*profileThreadMetadata, len(bucketsReversed[0].goIDs))
var s = samplesCount - 1
for _, bucket := range bucketsReversed {
var elapsedSinceStartNS = bucket.relativeTimeNS - relativeStartNS
for i, goID := range bucket.goIDs {
trace.Samples[s].ElapsedSinceStartNS = elapsedSinceStartNS
trace.Samples[s].ThreadID = goID
trace.Samples[s].StackID = bucket.stackIDs[i]
s--
if _, goroutineExists := trace.ThreadMetadata[goID]; !goroutineExists {
trace.ThreadMetadata[goID] = &profileThreadMetadata{
Name: "Goroutine " + strconv.FormatUint(goID, 10),
}
}
}
}
return result
}
// Collect all buckets of samples in the given time range while holding a read lock.
func (p *profileRecorder) getBuckets(relativeStartNS, relativeEndNS uint64) (samplesCount int, buckets []*profileSamplesBucket, trace *profileTrace) {
p.mutex.RLock()
defer p.mutex.RUnlock()
// sampleBucketsHead points at the last stored bucket so it's a good starting point to search backwards for the end.
var end = p.samplesBucketsHead
for end.Value != nil && end.Value.(*profileSamplesBucket).relativeTimeNS > relativeEndNS {
end = end.Prev()
}
// Edge case - no items stored before the given endTime.
if end.Value == nil {
return 0, nil, nil
}
{ // Find the first item after the given startTime.
var start = end
var prevBucket *profileSamplesBucket
samplesCount = 0
buckets = make([]*profileSamplesBucket, 0, int64((relativeEndNS-relativeStartNS)/uint64(profilerSamplingRate.Nanoseconds()))+1)
for start.Value != nil {
var bucket = start.Value.(*profileSamplesBucket)
// If this bucket's time is before the requests start time, don't collect it (and stop iterating further).
if bucket.relativeTimeNS < relativeStartNS {
break
}
// If this bucket time is greater than previous the bucket's time, we have exhausted the whole ring buffer
// before we were able to find the start time. That means the start time is not present and we must break.
// This happens if the slice duration exceeds the ring buffer capacity.
if prevBucket != nil && bucket.relativeTimeNS > prevBucket.relativeTimeNS {
break
}
samplesCount += len(bucket.goIDs)
buckets = append(buckets, bucket)
start = start.Prev()
prevBucket = bucket
}
}
// Edge case - if the period requested was too short and we haven't collected enough samples.
if len(buckets) < 2 {
return 0, nil, nil
}
trace = &profileTrace{
Frames: p.frames,
Stacks: p.stacks,
}
return samplesCount, buckets, trace
}
func (p *profileRecorder) onTick() {
elapsedNs := time.Since(p.startTime).Nanoseconds()
if p.testProfilerPanic > 0 {
Logger.Printf("Profiler testProfilerPanic == %v\n", p.testProfilerPanic)
if p.testProfilerPanic == 1 {
Logger.Println("Profiler panicking onTick()")
panic("This is an expected panic in Profiler.OnTick() during tests")
}
p.testProfilerPanic--
}
records := p.collectRecords()
p.processRecords(uint64(elapsedNs), records)
// Free up some memory if we don't need such a large buffer anymore.
if len(p.stacksBuffer) > len(records)*3 {
p.stacksBuffer = make([]byte, len(records)*3)
}
}
func (p *profileRecorder) collectRecords() []byte {
for {
// Capture stacks for all existing goroutines.
// Note: runtime.GoroutineProfile() would be better but we can't use it at the moment because
// it doesn't give us `gid` for each routine, see https://github.com/golang/go/issues/59663
n := runtime.Stack(p.stacksBuffer, true)
// If we couldn't read everything, increase the buffer and try again.
if n >= len(p.stacksBuffer) && n < stackBufferLimit {
var newSize = n * 2
if newSize > n+stackBufferMaxGrowth {
newSize = n + stackBufferMaxGrowth
}
if newSize > stackBufferLimit {
newSize = stackBufferLimit
}
p.stacksBuffer = make([]byte, newSize)
} else {
return p.stacksBuffer[0:n]
}
}
}
func (p *profileRecorder) processRecords(elapsedNs uint64, stacksBuffer []byte) {
var traces = traceparser.Parse(stacksBuffer)
var length = traces.Length()
// Shouldn't happen but let's be safe and don't store empty buckets.
if length == 0 {
return
}
var bucket = &profileSamplesBucket{
relativeTimeNS: elapsedNs,
stackIDs: make([]int, length),
goIDs: make([]uint64, length),
}
// reset buffers
p.newFrames = p.newFrames[:0]
p.newStacks = p.newStacks[:0]
for i := 0; i < length; i++ {
var stack = traces.Item(i)
bucket.stackIDs[i] = p.addStackTrace(stack)
bucket.goIDs[i] = stack.GoID()
}
p.mutex.Lock()
defer p.mutex.Unlock()
p.stacks = append(p.stacks, p.newStacks...)
p.frames = append(p.frames, p.newFrames...)
p.samplesBucketsHead = p.samplesBucketsHead.Next()
p.samplesBucketsHead.Value = bucket
}
func (p *profileRecorder) addStackTrace(capturedStack traceparser.Trace) int {
iter := capturedStack.Frames()
stack := make(profileStack, 0, iter.LengthUpperBound())
// Originally, we've used `capturedStack.UniqueIdentifier()` as a key but that was incorrect because it also
// contains function arguments and we want to group stacks by function name and file/line only.
// Instead, we need to parse frames and we use a list of their indexes as a key.
// We reuse the same buffer for each stack to avoid allocations; this is a hot spot.
var expectedBufferLen = cap(stack) * 5 // 4 bytes per frame + 1 byte for space
if cap(p.stackKeyBuffer) < expectedBufferLen {
p.stackKeyBuffer = make([]byte, 0, expectedBufferLen)
} else {
p.stackKeyBuffer = p.stackKeyBuffer[:0]
}
for iter.HasNext() {
var frame = iter.Next()
if frameIndex := p.addFrame(frame); frameIndex >= 0 {
stack = append(stack, frameIndex)
p.stackKeyBuffer = append(p.stackKeyBuffer, 0) // space
// The following code is just like binary.AppendUvarint() which isn't yet available in Go 1.18.
x := uint64(frameIndex) + 1
for x >= 0x80 {
p.stackKeyBuffer = append(p.stackKeyBuffer, byte(x)|0x80)
x >>= 7
}
p.stackKeyBuffer = append(p.stackKeyBuffer, byte(x))
}
}
stackIndex, exists := p.stackIndexes[string(p.stackKeyBuffer)]
if !exists {
stackIndex = len(p.stacks) + len(p.newStacks)
p.newStacks = append(p.newStacks, stack)
p.stackIndexes[string(p.stackKeyBuffer)] = stackIndex
}
return stackIndex
}
func (p *profileRecorder) addFrame(capturedFrame traceparser.Frame) int {
// NOTE: Don't convert to string yet, it's expensive and compiler can avoid it when
// indexing into a map (only needs a copy when adding a new key to the map).
var key = capturedFrame.UniqueIdentifier()
frameIndex, exists := p.frameIndexes[string(key)]
if !exists {
module, function := splitQualifiedFunctionName(string(capturedFrame.Func()))
file, line := capturedFrame.File()
frame := newFrame(module, function, string(file), line)
frameIndex = len(p.frames) + len(p.newFrames)
p.newFrames = append(p.newFrames, &frame)
p.frameIndexes[string(key)] = frameIndex
}
return frameIndex
}
type profileSamplesBucket struct {
relativeTimeNS uint64
stackIDs []int
goIDs []uint64
}
// A Ticker holds a channel that delivers “ticks” of a clock at intervals.
type profilerTicker interface {
// Stop turns off a ticker. After Stop, no more ticks will be sent.
Stop()
// TickSource returns a read-only channel of ticks.
TickSource() <-chan time.Time
// Ticked is called by the Profiler after a tick is processed to notify the ticker. Used for testing.
Ticked()
}
type timeTicker struct {
*time.Ticker
}
func (t *timeTicker) TickSource() <-chan time.Time {
return t.C
}
func (t *timeTicker) Ticked() {}
func profilerTickerFactoryDefault(d time.Duration) profilerTicker {
return &timeTicker{time.NewTicker(d)}
}
// We allow overriding the ticker for tests. CI is terribly flaky
// because the time.Ticker doesn't guarantee regular ticks - they may come (a lot) later than the given interval.
var profilerTickerFactory = profilerTickerFactoryDefault

View file

@ -1,5 +0,0 @@
//go:build !windows
package sentry
func onProfilerStart() {}

View file

@ -1,24 +0,0 @@
package sentry
import (
"sync"
"syscall"
)
// This works around the ticker resolution on Windows being ~15ms by default.
// See https://github.com/golang/go/issues/44343
func setTimeTickerResolution() {
var winmmDLL = syscall.NewLazyDLL("winmm.dll")
if winmmDLL != nil {
var timeBeginPeriod = winmmDLL.NewProc("timeBeginPeriod")
if timeBeginPeriod != nil {
timeBeginPeriod.Call(uintptr(1))
}
}
}
var setupTickerResolutionOnce sync.Once
func onProfilerStart() {
setupTickerResolutionOnce.Do(setTimeTickerResolution)
}

View file

@ -0,0 +1,90 @@
package sentry
import (
"crypto/rand"
"encoding/json"
)
type PropagationContext struct {
TraceID TraceID `json:"trace_id"`
SpanID SpanID `json:"span_id"`
ParentSpanID SpanID `json:"parent_span_id"`
DynamicSamplingContext DynamicSamplingContext `json:"-"`
}
func (p PropagationContext) MarshalJSON() ([]byte, error) {
type propagationContext PropagationContext
var parentSpanID string
if p.ParentSpanID != zeroSpanID {
parentSpanID = p.ParentSpanID.String()
}
return json.Marshal(struct {
*propagationContext
ParentSpanID string `json:"parent_span_id,omitempty"`
}{
propagationContext: (*propagationContext)(&p),
ParentSpanID: parentSpanID,
})
}
func (p PropagationContext) Map() map[string]interface{} {
m := map[string]interface{}{
"trace_id": p.TraceID,
"span_id": p.SpanID,
}
if p.ParentSpanID != zeroSpanID {
m["parent_span_id"] = p.ParentSpanID
}
return m
}
func NewPropagationContext() PropagationContext {
p := PropagationContext{}
if _, err := rand.Read(p.TraceID[:]); err != nil {
panic(err)
}
if _, err := rand.Read(p.SpanID[:]); err != nil {
panic(err)
}
return p
}
func PropagationContextFromHeaders(trace, baggage string) (PropagationContext, error) {
p := NewPropagationContext()
if _, err := rand.Read(p.SpanID[:]); err != nil {
panic(err)
}
hasTrace := false
if trace != "" {
if tpc, valid := ParseTraceParentContext([]byte(trace)); valid {
hasTrace = true
p.TraceID = tpc.TraceID
p.ParentSpanID = tpc.ParentSpanID
}
}
if baggage != "" {
dsc, err := DynamicSamplingContextFromHeader([]byte(baggage))
if err != nil {
return PropagationContext{}, err
}
p.DynamicSamplingContext = dsc
}
// In case a sentry-trace header is present but there are no sentry-related
// values in the baggage, create an empty, frozen DynamicSamplingContext.
if hasTrace && !p.DynamicSamplingContext.HasEntries() {
p.DynamicSamplingContext = DynamicSamplingContext{
Frozen: true,
}
}
return p, nil
}

View file

@ -43,20 +43,22 @@ type Scope struct {
Overflow() bool
}
eventProcessors []EventProcessor
propagationContext PropagationContext
span *Span
}
// NewScope creates a new Scope.
func NewScope() *Scope {
scope := Scope{
breadcrumbs: make([]*Breadcrumb, 0),
attachments: make([]*Attachment, 0),
tags: make(map[string]string),
contexts: make(map[string]Context),
extra: make(map[string]interface{}),
fingerprint: make([]string, 0),
return &Scope{
breadcrumbs: make([]*Breadcrumb, 0),
attachments: make([]*Attachment, 0),
tags: make(map[string]string),
contexts: make(map[string]Context),
extra: make(map[string]interface{}),
fingerprint: make([]string, 0),
propagationContext: NewPropagationContext(),
}
return &scope
}
// AddBreadcrumb adds new breadcrumb to the current scope
@ -292,6 +294,30 @@ func (scope *Scope) SetLevel(level Level) {
scope.level = level
}
// SetPropagationContext sets the propagation context for the current scope.
func (scope *Scope) SetPropagationContext(propagationContext PropagationContext) {
scope.mu.Lock()
defer scope.mu.Unlock()
scope.propagationContext = propagationContext
}
// GetSpan returns the span from the current scope.
func (scope *Scope) GetSpan() *Span {
scope.mu.RLock()
defer scope.mu.RUnlock()
return scope.span
}
// SetSpan sets a span for the current scope.
func (scope *Scope) SetSpan(span *Span) {
scope.mu.Lock()
defer scope.mu.Unlock()
scope.span = span
}
// Clone returns a copy of the current scope with all data copied over.
func (scope *Scope) Clone() *Scope {
scope.mu.RLock()
@ -318,6 +344,8 @@ func (scope *Scope) Clone() *Scope {
clone.request = scope.request
clone.requestBody = scope.requestBody
clone.eventProcessors = scope.eventProcessors
clone.propagationContext = scope.propagationContext
clone.span = scope.span
return clone
}
@ -335,7 +363,7 @@ func (scope *Scope) AddEventProcessor(processor EventProcessor) {
}
// ApplyToEvent takes the data from the current scope and attaches it to the event.
func (scope *Scope) ApplyToEvent(event *Event, hint *EventHint) *Event {
func (scope *Scope) ApplyToEvent(event *Event, hint *EventHint, client *Client) *Event {
scope.mu.RLock()
defer scope.mu.RUnlock()
@ -379,6 +407,29 @@ func (scope *Scope) ApplyToEvent(event *Event, hint *EventHint) *Event {
}
}
if event.Contexts == nil {
event.Contexts = make(map[string]Context)
}
if scope.span != nil {
if _, ok := event.Contexts["trace"]; !ok {
event.Contexts["trace"] = scope.span.traceContext().Map()
}
transaction := scope.span.GetTransaction()
if transaction != nil {
event.sdkMetaData.dsc = DynamicSamplingContextFromTransaction(transaction)
}
} else {
event.Contexts["trace"] = scope.propagationContext.Map()
dsc := scope.propagationContext.DynamicSamplingContext
if !dsc.HasEntries() && client != nil {
dsc = DynamicSamplingContextFromScope(scope, client)
}
event.sdkMetaData.dsc = dsc
}
if len(scope.extra) > 0 {
if event.Extra == nil {
event.Extra = make(map[string]interface{}, len(scope.extra))
@ -421,7 +472,7 @@ func (scope *Scope) ApplyToEvent(event *Event, hint *EventHint) *Event {
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Scope EventProcessors: %s\n", id)
DebugLogger.Printf("Event dropped by one of the Scope EventProcessors: %s\n", id)
return nil
}
}
@ -435,7 +486,7 @@ func (scope *Scope) ApplyToEvent(event *Event, hint *EventHint) *Event {
// a proper deep copy: if some context values are pointer types (e.g. maps),
// they won't be properly copied.
func cloneContext(c Context) Context {
res := Context{}
res := make(Context, len(c))
for k, v := range c {
res[k] = v
}

View file

@ -6,7 +6,7 @@ import (
)
// The version of the SDK.
const SDKVersion = "0.28.1"
const SDKVersion = "0.34.1"
// apiVersion is the minimum version of the Sentry API compatible with the
// sentry-go SDK.
@ -125,6 +125,23 @@ func Flush(timeout time.Duration) bool {
return hub.Flush(timeout)
}
// FlushWithContext waits until the underlying Transport sends any buffered events
// to the Sentry server, blocking for at most the duration specified by the context.
// It returns false if the context is canceled before the events are sent. In such a case,
// some events may not be delivered.
//
// FlushWithContext should be called before terminating the program to ensure no
// events are unintentionally dropped.
//
// Avoid calling FlushWithContext indiscriminately after each call to CaptureEvent,
// CaptureException, or CaptureMessage. To send events synchronously over the network,
// configure the SDK to use HTTPSyncTransport during initialization with Init.
func FlushWithContext(ctx context.Context) bool {
hub := CurrentHub()
return hub.FlushWithContext(ctx)
}
// LastEventID returns an ID of last captured event.
func LastEventID() EventID {
hub := CurrentHub()

View file

@ -24,7 +24,7 @@ func (r *spanRecorder) record(s *Span) {
if len(r.spans) >= maxSpans {
r.overflowOnce.Do(func() {
root := r.spans[0]
Logger.Printf("Too many spans: dropping spans from transaction with TraceID=%s SpanID=%s limit=%d",
DebugLogger.Printf("Too many spans: dropping spans from transaction with TraceID=%s SpanID=%s limit=%d",
root.TraceID, root.SpanID, maxSpans)
})
// TODO(tracing): mark the transaction event in some way to

View file

@ -4,6 +4,7 @@ import (
"go/build"
"reflect"
"runtime"
"slices"
"strings"
)
@ -277,12 +278,7 @@ func extractFrames(pcs []uintptr) []runtime.Frame {
}
}
// TODO don't append and reverse, put in the right place from the start.
// reverse
for i, j := 0, len(frames)-1; i < j; i, j = i+1, j-1 {
frames[i], frames[j] = frames[j], frames[i]
}
slices.Reverse(frames)
return frames
}
@ -307,6 +303,9 @@ func createFrames(frames []runtime.Frame) []Frame {
}
}
// Fix issues grouping errors with the new fully qualified function names
// introduced from Go 1.21
result = cleanupFunctionNamePrefix(result)
return result
}
@ -333,12 +332,10 @@ func shouldSkipFrame(module string) bool {
var goRoot = strings.ReplaceAll(build.Default.GOROOT, "\\", "/")
func setInAppFrame(frame *Frame) {
if strings.HasPrefix(frame.AbsPath, goRoot) ||
strings.Contains(frame.Module, "vendor") ||
frame.InApp = true
if strings.HasPrefix(frame.AbsPath, goRoot) || strings.Contains(frame.Module, "vendor") ||
strings.Contains(frame.Module, "third_party") {
frame.InApp = false
} else {
frame.InApp = true
}
}
@ -379,3 +376,32 @@ func baseName(name string) string {
}
return name
}
func isCompilerGeneratedSymbol(name string) bool {
// In versions of Go 1.20 and above a prefix of "type:" and "go:" is a
// compiler-generated symbol that doesn't belong to any package.
// See variable reservedimports in cmd/compile/internal/gc/subr.go
if strings.HasPrefix(name, "go:") || strings.HasPrefix(name, "type:") {
return true
}
return false
}
// Walk backwards through the results and for the current function name
// remove it's parent function's prefix, leaving only it's actual name. This
// fixes issues grouping errors with the new fully qualified function names
// introduced from Go 1.21.
func cleanupFunctionNamePrefix(f []Frame) []Frame {
for i := len(f) - 1; i > 0; i-- {
name := f[i].Function
parentName := f[i-1].Function + "."
if !strings.HasPrefix(name, parentName) {
continue
}
f[i].Function = name[len(parentName):]
}
return f
}

View file

@ -1,15 +0,0 @@
//go:build !go1.20
package sentry
import "strings"
func isCompilerGeneratedSymbol(name string) bool {
// In versions of Go below 1.20 a prefix of "type." and "go." is a
// compiler-generated symbol that doesn't belong to any package.
// See variable reservedimports in cmd/compile/internal/gc/subr.go
if strings.HasPrefix(name, "go.") || strings.HasPrefix(name, "type.") {
return true
}
return false
}

View file

@ -1,15 +0,0 @@
//go:build go1.20
package sentry
import "strings"
func isCompilerGeneratedSymbol(name string) bool {
// In versions of Go 1.20 and above a prefix of "type:" and "go:" is a
// compiler-generated symbol that doesn't belong to any package.
// See variable reservedimports in cmd/compile/internal/gc/subr.go
if strings.HasPrefix(name, "go:") || strings.HasPrefix(name, "type:") {
return true
}
return false
}

View file

@ -1,95 +0,0 @@
package sentry
import (
"sync"
"time"
)
// Checks whether the transaction should be profiled (according to ProfilesSampleRate)
// and starts a profiler if so.
func (s *Span) sampleTransactionProfile() {
var sampleRate = s.clientOptions().ProfilesSampleRate
switch {
case sampleRate < 0.0 || sampleRate > 1.0:
Logger.Printf("Skipping transaction profiling: ProfilesSampleRate out of range [0.0, 1.0]: %f\n", sampleRate)
case sampleRate == 0.0 || rng.Float64() >= sampleRate:
Logger.Printf("Skipping transaction profiling: ProfilesSampleRate is: %f\n", sampleRate)
default:
startProfilerOnce.Do(startGlobalProfiler)
if globalProfiler == nil {
Logger.Println("Skipping transaction profiling: the profiler couldn't be started")
} else {
s.collectProfile = collectTransactionProfile
}
}
}
// transactionProfiler collects a profile for a given span.
type transactionProfiler func(span *Span) *profileInfo
var startProfilerOnce sync.Once
var globalProfiler profiler
func startGlobalProfiler() {
globalProfiler = startProfiling(time.Now())
}
func collectTransactionProfile(span *Span) *profileInfo {
result := globalProfiler.GetSlice(span.StartTime, span.EndTime)
if result == nil || result.trace == nil {
return nil
}
info := &profileInfo{
Version: "1",
EventID: uuid(),
// See https://github.com/getsentry/sentry-go/pull/626#discussion_r1204870340 for explanation why we use the Transaction time.
Timestamp: span.StartTime,
Trace: result.trace,
Transaction: profileTransaction{
DurationNS: uint64(span.EndTime.Sub(span.StartTime).Nanoseconds()),
Name: span.Name,
TraceID: span.TraceID.String(),
},
}
if len(info.Transaction.Name) == 0 {
// Name is required by Relay so use the operation name if the span name is empty.
info.Transaction.Name = span.Op
}
if result.callerGoID > 0 {
info.Transaction.ActiveThreadID = result.callerGoID
}
return info
}
func (info *profileInfo) UpdateFromEvent(event *Event) {
info.Environment = event.Environment
info.Platform = event.Platform
info.Release = event.Release
info.Dist = event.Dist
info.Transaction.ID = event.EventID
getStringFromContext := func(context map[string]interface{}, originalValue, key string) string {
v, ok := context[key]
if !ok {
return originalValue
}
if s, ok := v.(string); ok {
return s
}
return originalValue
}
if runtimeContext, ok := event.Contexts["runtime"]; ok {
info.Runtime.Name = getStringFromContext(runtimeContext, info.Runtime.Name, "name")
info.Runtime.Version = getStringFromContext(runtimeContext, info.Runtime.Version, "version")
}
if osContext, ok := event.Contexts["os"]; ok {
info.OS.Name = getStringFromContext(osContext, info.OS.Name, "name")
}
if deviceContext, ok := event.Contexts["device"]; ok {
info.Device.Architecture = getStringFromContext(deviceContext, info.Device.Architecture, "arch")
}
}

View file

@ -8,6 +8,7 @@ import (
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
"sync"
"time"
@ -18,25 +19,42 @@ const (
SentryBaggageHeader = "baggage"
)
// SpanOrigin indicates what created a trace or a span. See: https://develop.sentry.dev/sdk/performance/trace-origin/
type SpanOrigin string
const (
SpanOriginManual = "manual"
SpanOriginEcho = "auto.http.echo"
SpanOriginFastHTTP = "auto.http.fasthttp"
SpanOriginFiber = "auto.http.fiber"
SpanOriginGin = "auto.http.gin"
SpanOriginStdLib = "auto.http.stdlib"
SpanOriginIris = "auto.http.iris"
SpanOriginNegroni = "auto.http.negroni"
)
// A Span is the building block of a Sentry transaction. Spans build up a tree
// structure of timed operations. The span tree makes up a transaction event
// that is sent to Sentry when the root span is finished.
//
// Spans must be started with either StartSpan or Span.StartChild.
type Span struct { //nolint: maligned // prefer readability over optimal memory layout (see note below *)
TraceID TraceID `json:"trace_id"`
SpanID SpanID `json:"span_id"`
ParentSpanID SpanID `json:"parent_span_id"`
Name string `json:"name,omitempty"`
Op string `json:"op,omitempty"`
Description string `json:"description,omitempty"`
Status SpanStatus `json:"status,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
StartTime time.Time `json:"start_timestamp"`
EndTime time.Time `json:"timestamp"`
Data map[string]interface{} `json:"data,omitempty"`
Sampled Sampled `json:"-"`
Source TransactionSource `json:"-"`
TraceID TraceID `json:"trace_id"`
SpanID SpanID `json:"span_id"`
ParentSpanID SpanID `json:"parent_span_id"`
Name string `json:"name,omitempty"`
Op string `json:"op,omitempty"`
Description string `json:"description,omitempty"`
Status SpanStatus `json:"status,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
StartTime time.Time `json:"start_timestamp"`
EndTime time.Time `json:"timestamp"`
// Deprecated: use Data instead. To be removed in 0.33.0
Extra map[string]interface{} `json:"-"`
Data map[string]interface{} `json:"data,omitempty"`
Sampled Sampled `json:"-"`
Source TransactionSource `json:"-"`
Origin SpanOrigin `json:"origin,omitempty"`
// mu protects concurrent writes to map fields
mu sync.RWMutex
@ -53,10 +71,10 @@ type Span struct { //nolint: maligned // prefer readability over optimal memory
recorder *spanRecorder
// span context, can only be set on transactions
contexts map[string]Context
// collectProfile is a function that collects a profile of the current transaction. May be nil.
collectProfile transactionProfiler
// a Once instance to make sure that Finish() is only called once.
finishOnce sync.Once
// explicitSampled is a flag for configuring sampling by using `WithSpanSampled` option.
explicitSampled Sampled
}
// TraceParentContext describes the context of a (remote) parent span.
@ -113,11 +131,19 @@ func StartSpan(ctx context.Context, operation string, options ...SpanOption) *Sp
parent: parent,
}
_, err := rand.Read(span.SpanID[:])
if err != nil {
panic(err)
}
if hasParent {
span.TraceID = parent.TraceID
span.ParentSpanID = parent.SpanID
span.Origin = parent.Origin
} else {
// Only set the Source if this is a transaction
span.Source = SourceCustom
span.Origin = SpanOriginManual
// Implementation note:
//
@ -154,13 +180,6 @@ func StartSpan(ctx context.Context, operation string, options ...SpanOption) *Sp
panic(err)
}
}
_, err := rand.Read(span.SpanID[:])
if err != nil {
panic(err)
}
if hasParent {
span.ParentSpanID = parent.SpanID
}
// Apply options to override defaults.
for _, option := range options {
@ -176,15 +195,10 @@ func StartSpan(ctx context.Context, operation string, options ...SpanOption) *Sp
span.recorder.record(&span)
hub := hubFromContext(ctx)
// Update scope so that all events include a trace context, allowing
// Sentry to correlate errors to transactions/spans.
hub.Scope().SetContext("trace", span.traceContext().Map())
// Start profiling only if it's a sampled root transaction.
if span.IsTransaction() && span.Sampled.Bool() {
span.sampleTransactionProfile()
clientOptions := span.clientOptions()
if clientOptions.EnableTracing {
hub := hubFromContext(ctx)
hub.Scope().SetSpan(&span)
}
return &span
@ -287,7 +301,7 @@ func (s *Span) GetTransaction() *Span {
// func (s *Span) TransactionName() string
// func (s *Span) SetTransactionName(name string)
// ToSentryTrace returns the seralized TraceParentContext from a transaction/span.
// ToSentryTrace returns the serialized TraceParentContext from a transaction/span.
// Use this function to propagate the TraceParentContext to a downstream SDK,
// either as the value of the "sentry-trace" HTTP header, or as an html "sentry-trace" meta tag.
func (s *Span) ToSentryTrace() string {
@ -308,17 +322,21 @@ func (s *Span) ToSentryTrace() string {
// Use this function to propagate the DynamicSamplingContext to a downstream SDK,
// either as the value of the "baggage" HTTP header, or as an html "baggage" meta tag.
func (s *Span) ToBaggage() string {
if containingTransaction := s.GetTransaction(); containingTransaction != nil {
// In case there is currently no frozen DynamicSamplingContext attached to the transaction,
// create one from the properties of the transaction.
if !s.dynamicSamplingContext.IsFrozen() {
// This will return a frozen DynamicSamplingContext.
s.dynamicSamplingContext = DynamicSamplingContextFromTransaction(containingTransaction)
}
return containingTransaction.dynamicSamplingContext.String()
t := s.GetTransaction()
if t == nil {
return ""
}
return ""
// In case there is currently no frozen DynamicSamplingContext attached to the transaction,
// create one from the properties of the transaction.
if !s.dynamicSamplingContext.IsFrozen() {
// This will return a frozen DynamicSamplingContext.
if dsc := DynamicSamplingContextFromTransaction(t); dsc.HasEntries() {
t.dynamicSamplingContext = dsc
}
}
return t.dynamicSamplingContext.String()
}
// SetDynamicSamplingContext sets the given dynamic sampling context on the
@ -335,6 +353,13 @@ func (s *Span) doFinish() {
s.EndTime = monotonicTimeSince(s.StartTime)
}
hub := hubFromContext(s.ctx)
if !s.IsTransaction() {
if s.parent != nil {
hub.Scope().SetSpan(s.parent)
}
}
if !s.Sampled.Bool() {
return
}
@ -343,14 +368,9 @@ func (s *Span) doFinish() {
return
}
if s.collectProfile != nil {
event.sdkMetaData.transactionProfile = s.collectProfile(s)
}
// TODO(tracing): add breadcrumbs
// (see https://github.com/getsentry/sentry-python/blob/f6f3525f8812f609/sentry_sdk/tracing.py#L372)
hub := hubFromContext(s.ctx)
hub.CaptureEvent(event)
}
@ -429,21 +449,21 @@ func (s *Span) sample() Sampled {
// https://develop.sentry.dev/sdk/performance/#sampling
// #1 tracing is not enabled.
if !clientOptions.EnableTracing {
Logger.Printf("Dropping transaction: EnableTracing is set to %t", clientOptions.EnableTracing)
DebugLogger.Printf("Dropping transaction: EnableTracing is set to %t", clientOptions.EnableTracing)
s.sampleRate = 0.0
return SampledFalse
}
// #2 explicit sampling decision via StartSpan/StartTransaction options.
if s.Sampled != SampledUndefined {
Logger.Printf("Using explicit sampling decision from StartSpan/StartTransaction: %v", s.Sampled)
switch s.Sampled {
if s.explicitSampled != SampledUndefined {
DebugLogger.Printf("Using explicit sampling decision from StartSpan/StartTransaction: %v", s.explicitSampled)
switch s.explicitSampled {
case SampledTrue:
s.sampleRate = 1.0
case SampledFalse:
s.sampleRate = 0.0
}
return s.Sampled
return s.explicitSampled
}
// Variant for non-transaction spans: they inherit the parent decision.
@ -464,42 +484,52 @@ func (s *Span) sample() Sampled {
if sampler != nil {
tracesSamplerSampleRate := sampler.Sample(samplingContext)
s.sampleRate = tracesSamplerSampleRate
// tracesSampler can update the sample_rate on frozen DSC
if s.dynamicSamplingContext.HasEntries() {
s.dynamicSamplingContext.Entries["sample_rate"] = strconv.FormatFloat(tracesSamplerSampleRate, 'f', -1, 64)
}
if tracesSamplerSampleRate < 0.0 || tracesSamplerSampleRate > 1.0 {
Logger.Printf("Dropping transaction: Returned TracesSampler rate is out of range [0.0, 1.0]: %f", tracesSamplerSampleRate)
DebugLogger.Printf("Dropping transaction: Returned TracesSampler rate is out of range [0.0, 1.0]: %f", tracesSamplerSampleRate)
return SampledFalse
}
if tracesSamplerSampleRate == 0 {
Logger.Printf("Dropping transaction: Returned TracesSampler rate is: %f", tracesSamplerSampleRate)
if tracesSamplerSampleRate == 0.0 {
DebugLogger.Printf("Dropping transaction: Returned TracesSampler rate is: %f", tracesSamplerSampleRate)
return SampledFalse
}
if rng.Float64() < tracesSamplerSampleRate {
return SampledTrue
}
Logger.Printf("Dropping transaction: TracesSampler returned rate: %f", tracesSamplerSampleRate)
DebugLogger.Printf("Dropping transaction: TracesSampler returned rate: %f", tracesSamplerSampleRate)
return SampledFalse
}
// #4 inherit parent decision.
if s.parent != nil {
Logger.Printf("Using sampling decision from parent: %v", s.parent.Sampled)
switch s.parent.Sampled {
if s.Sampled != SampledUndefined {
DebugLogger.Printf("Using sampling decision from parent: %v", s.Sampled)
switch s.Sampled {
case SampledTrue:
s.sampleRate = 1.0
case SampledFalse:
s.sampleRate = 0.0
}
return s.parent.Sampled
return s.Sampled
}
// #5 use TracesSampleRate from ClientOptions.
sampleRate := clientOptions.TracesSampleRate
s.sampleRate = sampleRate
// tracesSampleRate can update the sample_rate on frozen DSC
if s.dynamicSamplingContext.HasEntries() {
s.dynamicSamplingContext.Entries["sample_rate"] = strconv.FormatFloat(sampleRate, 'f', -1, 64)
}
if sampleRate < 0.0 || sampleRate > 1.0 {
Logger.Printf("Dropping transaction: TracesSamplerRate out of range [0.0, 1.0]: %f", sampleRate)
DebugLogger.Printf("Dropping transaction: TracesSampleRate out of range [0.0, 1.0]: %f", sampleRate)
return SampledFalse
}
if sampleRate == 0.0 {
Logger.Printf("Dropping transaction: TracesSampleRate rate is: %f", sampleRate)
DebugLogger.Printf("Dropping transaction: TracesSampleRate rate is: %f", sampleRate)
return SampledFalse
}
@ -522,7 +552,7 @@ func (s *Span) toEvent() *Event {
finished := make([]*Span, 0, len(children))
for _, child := range children {
if child.EndTime.IsZero() {
Logger.Printf("Dropped unfinished span: Op=%q TraceID=%s SpanID=%s", child.Op, child.TraceID, child.SpanID)
DebugLogger.Printf("Dropped unfinished span: Op=%q TraceID=%s SpanID=%s", child.Op, child.TraceID, child.SpanID)
continue
}
finished = append(finished, child)
@ -534,7 +564,7 @@ func (s *Span) toEvent() *Event {
s.dynamicSamplingContext = DynamicSamplingContextFromTransaction(s)
}
contexts := map[string]Context{}
contexts := make(map[string]Context, len(s.contexts)+1)
for k, v := range s.contexts {
contexts[k] = cloneContext(v)
}
@ -551,7 +581,6 @@ func (s *Span) toEvent() *Event {
Transaction: s.Name,
Contexts: contexts,
Tags: s.Tags,
Extra: s.Data,
Timestamp: s.EndTime,
StartTime: s.StartTime,
Spans: finished,
@ -570,6 +599,7 @@ func (s *Span) traceContext() *TraceContext {
SpanID: s.SpanID,
ParentSpanID: s.ParentSpanID,
Op: s.Op,
Data: s.Data,
Description: s.Description,
Status: s.Status,
}
@ -706,31 +736,32 @@ const (
maxSpanStatus
)
var spanStatuses = [maxSpanStatus]string{
"",
"ok",
"cancelled", // [sic]
"unknown",
"invalid_argument",
"deadline_exceeded",
"not_found",
"already_exists",
"permission_denied",
"resource_exhausted",
"failed_precondition",
"aborted",
"out_of_range",
"unimplemented",
"internal_error",
"unavailable",
"data_loss",
"unauthenticated",
}
func (ss SpanStatus) String() string {
if ss >= maxSpanStatus {
return ""
}
m := [maxSpanStatus]string{
"",
"ok",
"cancelled", // [sic]
"unknown",
"invalid_argument",
"deadline_exceeded",
"not_found",
"already_exists",
"permission_denied",
"resource_exhausted",
"failed_precondition",
"aborted",
"out_of_range",
"unimplemented",
"internal_error",
"unavailable",
"data_loss",
"unauthenticated",
}
return m[ss]
return spanStatuses[ss]
}
func (ss SpanStatus) MarshalJSON() ([]byte, error) {
@ -744,12 +775,13 @@ func (ss SpanStatus) MarshalJSON() ([]byte, error) {
// A TraceContext carries information about an ongoing trace and is meant to be
// stored in Event.Contexts (as *TraceContext).
type TraceContext struct {
TraceID TraceID `json:"trace_id"`
SpanID SpanID `json:"span_id"`
ParentSpanID SpanID `json:"parent_span_id"`
Op string `json:"op,omitempty"`
Description string `json:"description,omitempty"`
Status SpanStatus `json:"status,omitempty"`
TraceID TraceID `json:"trace_id"`
SpanID SpanID `json:"span_id"`
ParentSpanID SpanID `json:"parent_span_id"`
Op string `json:"op,omitempty"`
Description string `json:"description,omitempty"`
Status SpanStatus `json:"status,omitempty"`
Data map[string]interface{} `json:"data,omitempty"`
}
func (tc *TraceContext) MarshalJSON() ([]byte, error) {
@ -792,6 +824,10 @@ func (tc TraceContext) Map() map[string]interface{} {
m["status"] = tc.Status
}
if len(tc.Data) > 0 {
m["data"] = tc.Data
}
return m
}
@ -866,10 +902,29 @@ func WithTransactionSource(source TransactionSource) SpanOption {
// WithSpanSampled updates the sampling flag for a given span.
func WithSpanSampled(sampled Sampled) SpanOption {
return func(s *Span) {
s.Sampled = sampled
s.explicitSampled = sampled
}
}
// WithSpanOrigin sets the origin of the span.
func WithSpanOrigin(origin SpanOrigin) SpanOption {
return func(s *Span) {
s.Origin = origin
}
}
// ContinueTrace continues a trace based on traceparent and baggage values.
// If the SDK is configured with tracing enabled,
// this function returns populated SpanOption.
// In any other cases, it populates the propagation context on the scope.
func ContinueTrace(hub *Hub, traceparent, baggage string) SpanOption {
scope := hub.Scope()
propagationContext, _ := PropagationContextFromHeaders(traceparent, baggage)
scope.SetPropagationContext(propagationContext)
return ContinueFromHeaders(traceparent, baggage)
}
// ContinueFromRequest returns a span option that updates the span to continue
// an existing trace. If it cannot detect an existing trace in the request, the
// span will be left unchanged.
@ -887,16 +942,17 @@ func ContinueFromHeaders(trace, baggage string) SpanOption {
return func(s *Span) {
if trace != "" {
s.updateFromSentryTrace([]byte(trace))
}
if baggage != "" {
s.updateFromBaggage([]byte(baggage))
}
// In case a sentry-trace header is present but there are no sentry-related
// values in the baggage, create an empty, frozen DynamicSamplingContext.
if trace != "" && !s.dynamicSamplingContext.HasEntries() {
s.dynamicSamplingContext = DynamicSamplingContext{
Frozen: true,
if baggage != "" {
s.updateFromBaggage([]byte(baggage))
}
// In case a sentry-trace header is present but there are no sentry-related
// values in the baggage, create an empty, frozen DynamicSamplingContext.
if !s.dynamicSamplingContext.HasEntries() {
s.dynamicSamplingContext = DynamicSamplingContext{
Frozen: true,
}
}
}
}
@ -939,6 +995,7 @@ func SpanFromContext(ctx context.Context) *Span {
func StartTransaction(ctx context.Context, name string, options ...SpanOption) *Span {
currentTransaction, exists := ctx.Value(spanContextKey{}).(*Span)
if exists {
currentTransaction.ctx = ctx
return currentTransaction
}

View file

@ -16,8 +16,10 @@ import (
"github.com/getsentry/sentry-go/internal/ratelimit"
)
const defaultBufferSize = 30
const defaultTimeout = time.Second * 30
const (
defaultBufferSize = 1000
defaultTimeout = time.Second * 30
)
// maxDrainResponseBytes is the maximum number of bytes that transport
// implementations will read from response bodies when draining them.
@ -33,8 +35,10 @@ const maxDrainResponseBytes = 16 << 10
// Transport is used by the Client to deliver events to remote server.
type Transport interface {
Flush(timeout time.Duration) bool
FlushWithContext(ctx context.Context) bool
Configure(options ClientOptions)
SendEvent(event *Event)
Close()
}
func getProxyConfig(options ClientOptions) func(*http.Request) (*url.URL, error) {
@ -83,67 +87,18 @@ func getRequestBodyFromEvent(event *Event) []byte {
}
body, err = json.Marshal(event)
if err == nil {
Logger.Println(msg)
DebugLogger.Println(msg)
return body
}
// This should _only_ happen when Event.Exception[0].Stacktrace.Frames[0].Vars is unserializable
// Which won't ever happen, as we don't use it now (although it's the part of public interface accepted by Sentry)
// Juuust in case something, somehow goes utterly wrong.
Logger.Println("Event couldn't be marshaled, even with stripped contextual data. Skipping delivery. " +
DebugLogger.Println("Event couldn't be marshaled, even with stripped contextual data. Skipping delivery. " +
"Please notify the SDK owners with possibly broken payload.")
return nil
}
func marshalMetrics(metrics []Metric) []byte {
var b bytes.Buffer
for i, metric := range metrics {
b.WriteString(metric.GetKey())
if unit := metric.GetUnit(); unit != "" {
b.WriteString(fmt.Sprintf("@%s", unit))
}
b.WriteString(fmt.Sprintf("%s|%s", metric.SerializeValue(), metric.GetType()))
if serializedTags := metric.SerializeTags(); serializedTags != "" {
b.WriteString(fmt.Sprintf("|#%s", serializedTags))
}
b.WriteString(fmt.Sprintf("|T%d", metric.GetTimestamp()))
if i < len(metrics)-1 {
b.WriteString("\n")
}
}
return b.Bytes()
}
func encodeMetric(enc *json.Encoder, b io.Writer, metrics []Metric) error {
body := marshalMetrics(metrics)
// Item header
err := enc.Encode(struct {
Type string `json:"type"`
Length int `json:"length"`
}{
Type: metricType,
Length: len(body),
})
if err != nil {
return err
}
// metric payload
if _, err = b.Write(body); err != nil {
return err
}
// "Envelopes should be terminated with a trailing newline."
//
// [1]: https://develop.sentry.dev/sdk/envelopes/#envelopes
if _, err := b.Write([]byte("\n")); err != nil {
return err
}
return err
}
func encodeAttachment(enc *json.Encoder, b io.Writer, attachment *Attachment) error {
// Attachment header
err := enc.Encode(struct {
@ -192,6 +147,23 @@ func encodeEnvelopeItem(enc *json.Encoder, itemType string, body json.RawMessage
return err
}
func encodeEnvelopeLogs(enc *json.Encoder, itemsLength int, body json.RawMessage) error {
err := enc.Encode(
struct {
Type string `json:"type"`
ItemCount int `json:"item_count"`
ContentType string `json:"content_type"`
}{
Type: logEvent.Type,
ItemCount: itemsLength,
ContentType: logEvent.ContentType,
})
if err == nil {
err = enc.Encode(body)
}
return err
}
func envelopeFromBody(event *Event, dsn *Dsn, sentAt time.Time, body json.RawMessage) (*bytes.Buffer, error) {
var b bytes.Buffer
enc := json.NewEncoder(&b)
@ -228,8 +200,8 @@ func envelopeFromBody(event *Event, dsn *Dsn, sentAt time.Time, body json.RawMes
switch event.Type {
case transactionType, checkInType:
err = encodeEnvelopeItem(enc, event.Type, body)
case metricType:
err = encodeMetric(enc, &b, event.Metrics)
case logEvent.Type:
err = encodeEnvelopeLogs(enc, len(event.Logs), body)
default:
err = encodeEnvelopeItem(enc, eventType, body)
}
@ -245,18 +217,6 @@ func envelopeFromBody(event *Event, dsn *Dsn, sentAt time.Time, body json.RawMes
}
}
// Profile data
if event.sdkMetaData.transactionProfile != nil {
body, err = json.Marshal(event.sdkMetaData.transactionProfile)
if err != nil {
return nil, err
}
err = encodeEnvelopeItem(enc, profileType, body)
if err != nil {
return nil, err
}
}
return &b, nil
}
@ -279,10 +239,12 @@ func getRequestFromEvent(ctx context.Context, event *Event, dsn *Dsn) (r *http.R
r.Header.Set("X-Sentry-Auth", auth)
}
}()
body := getRequestBodyFromEvent(event)
if body == nil {
return nil, errors.New("event could not be marshaled")
}
envelope, err := envelopeFromBody(event, dsn, time.Now(), body)
if err != nil {
return nil, err
@ -341,7 +303,8 @@ type HTTPTransport struct {
// current in-flight items and starts a new batch for subsequent events.
buffer chan batch
start sync.Once
startOnce sync.Once
closeOnce sync.Once
// Size of the transport buffer. Defaults to 30.
BufferSize int
@ -350,6 +313,9 @@ type HTTPTransport struct {
mu sync.RWMutex
limits ratelimit.Map
// receiving signal will terminate worker.
done chan struct{}
}
// NewHTTPTransport returns a new pre-configured instance of HTTPTransport.
@ -357,7 +323,7 @@ func NewHTTPTransport() *HTTPTransport {
transport := HTTPTransport{
BufferSize: defaultBufferSize,
Timeout: defaultTimeout,
limits: make(ratelimit.Map),
done: make(chan struct{}),
}
return &transport
}
@ -366,7 +332,7 @@ func NewHTTPTransport() *HTTPTransport {
func (t *HTTPTransport) Configure(options ClientOptions) {
dsn, err := NewDsn(options.Dsn)
if err != nil {
Logger.Printf("%v\n", err)
DebugLogger.Printf("%v\n", err)
return
}
t.dsn = dsn
@ -399,7 +365,7 @@ func (t *HTTPTransport) Configure(options ClientOptions) {
}
}
t.start.Do(func() {
t.startOnce.Do(func() {
go t.worker()
})
}
@ -450,7 +416,7 @@ func (t *HTTPTransport) SendEventWithContext(ctx context.Context, event *Event)
} else {
eventType = fmt.Sprintf("%s event", event.Level)
}
Logger.Printf(
DebugLogger.Printf(
"Sending %s [%s] to %s project: %s",
eventType,
event.EventID,
@ -458,7 +424,7 @@ func (t *HTTPTransport) SendEventWithContext(ctx context.Context, event *Event)
t.dsn.projectID,
)
default:
Logger.Println("Event dropped due to transport buffer being full.")
DebugLogger.Println("Event dropped due to transport buffer being full.")
}
t.buffer <- b
@ -475,8 +441,19 @@ func (t *HTTPTransport) SendEventWithContext(ctx context.Context, event *Event)
// have the SDK send events over the network synchronously, configure it to use
// the HTTPSyncTransport in the call to Init.
func (t *HTTPTransport) Flush(timeout time.Duration) bool {
toolate := time.After(timeout)
timeoutCh := make(chan struct{})
time.AfterFunc(timeout, func() {
close(timeoutCh)
})
return t.flushInternal(timeoutCh)
}
// FlushWithContext works like Flush, but it accepts a context.Context instead of a timeout.
func (t *HTTPTransport) FlushWithContext(ctx context.Context) bool {
return t.flushInternal(ctx.Done())
}
func (t *HTTPTransport) flushInternal(timeout <-chan struct{}) bool {
// Wait until processing the current batch has started or the timeout.
//
// We must wait until the worker has seen the current batch, because it is
@ -484,6 +461,7 @@ func (t *HTTPTransport) Flush(timeout time.Duration) bool {
// possible execution flow in which b.done is never closed, and the only way
// out of Flush would be waiting for the timeout, which is undesired.
var b batch
for {
select {
case b = <-t.buffer:
@ -493,7 +471,7 @@ func (t *HTTPTransport) Flush(timeout time.Duration) bool {
default:
t.buffer <- b
}
case <-toolate:
case <-timeout:
goto fail
}
}
@ -512,17 +490,28 @@ started:
// Wait until the current batch is done or the timeout.
select {
case <-b.done:
Logger.Println("Buffer flushed successfully.")
DebugLogger.Println("Buffer flushed successfully.")
return true
case <-toolate:
case <-timeout:
goto fail
}
fail:
Logger.Println("Buffer flushing reached the timeout.")
DebugLogger.Println("Buffer flushing was canceled or timed out.")
return false
}
// Close will terminate events sending loop.
// It useful to prevent goroutines leak in case of multiple HTTPTransport instances initiated.
//
// Close should be called after Flush and before terminating the program
// otherwise some events may be lost.
func (t *HTTPTransport) Close() {
t.closeOnce.Do(func() {
close(t.done)
})
}
func (t *HTTPTransport) worker() {
for b := range t.buffer {
// Signal that processing of the current batch has started.
@ -533,30 +522,44 @@ func (t *HTTPTransport) worker() {
t.buffer <- b
// Process all batch items.
for item := range b.items {
if t.disabled(item.category) {
continue
}
response, err := t.client.Do(item.request)
if err != nil {
Logger.Printf("There was an issue with sending an event: %v", err)
continue
}
if response.StatusCode >= 400 && response.StatusCode <= 599 {
b, err := io.ReadAll(response.Body)
if err != nil {
Logger.Printf("Error while reading response code: %v", err)
loop:
for {
select {
case <-t.done:
return
case item, open := <-b.items:
if !open {
break loop
}
Logger.Printf("Sending %s failed with the following error: %s", eventType, string(b))
if t.disabled(item.category) {
continue
}
response, err := t.client.Do(item.request)
if err != nil {
DebugLogger.Printf("There was an issue with sending an event: %v", err)
continue
}
if response.StatusCode >= 400 && response.StatusCode <= 599 {
b, err := io.ReadAll(response.Body)
if err != nil {
DebugLogger.Printf("Error while reading response code: %v", err)
}
DebugLogger.Printf("Sending %s failed with the following error: %s", eventType, string(b))
}
t.mu.Lock()
if t.limits == nil {
t.limits = make(ratelimit.Map)
}
t.limits.Merge(ratelimit.FromResponse(response))
t.mu.Unlock()
// Drain body up to a limit and close it, allowing the
// transport to reuse TCP connections.
_, _ = io.CopyN(io.Discard, response.Body, maxDrainResponseBytes)
response.Body.Close()
}
t.mu.Lock()
t.limits.Merge(ratelimit.FromResponse(response))
t.mu.Unlock()
// Drain body up to a limit and close it, allowing the
// transport to reuse TCP connections.
_, _ = io.CopyN(io.Discard, response.Body, maxDrainResponseBytes)
response.Body.Close()
}
// Signal that processing of the batch is done.
@ -569,7 +572,7 @@ func (t *HTTPTransport) disabled(c ratelimit.Category) bool {
defer t.mu.RUnlock()
disabled := t.limits.IsRateLimited(c)
if disabled {
Logger.Printf("Too many requests for %q, backing off till: %v", c, t.limits.Deadline(c))
DebugLogger.Printf("Too many requests for %q, backing off till: %v", c, t.limits.Deadline(c))
}
return disabled
}
@ -615,7 +618,7 @@ func NewHTTPSyncTransport() *HTTPSyncTransport {
func (t *HTTPSyncTransport) Configure(options ClientOptions) {
dsn, err := NewDsn(options.Dsn)
if err != nil {
Logger.Printf("%v\n", err)
DebugLogger.Printf("%v\n", err)
return
}
t.dsn = dsn
@ -644,6 +647,8 @@ func (t *HTTPSyncTransport) SendEvent(event *Event) {
t.SendEventWithContext(context.Background(), event)
}
func (t *HTTPSyncTransport) Close() {}
// SendEventWithContext assembles a new packet out of Event and sends it to the remote server.
func (t *HTTPSyncTransport) SendEventWithContext(ctx context.Context, event *Event) {
if t.dsn == nil {
@ -659,18 +664,18 @@ func (t *HTTPSyncTransport) SendEventWithContext(ctx context.Context, event *Eve
return
}
var eventType string
switch {
case event.Type == transactionType:
eventType = "transaction"
case event.Type == metricType:
eventType = metricType
var eventIdentifier string
switch event.Type {
case transactionType:
eventIdentifier = "transaction"
case logEvent.Type:
eventIdentifier = fmt.Sprintf("%v log events", len(event.Logs))
default:
eventType = fmt.Sprintf("%s event", event.Level)
eventIdentifier = fmt.Sprintf("%s event", event.Level)
}
Logger.Printf(
DebugLogger.Printf(
"Sending %s [%s] to %s project: %s",
eventType,
eventIdentifier,
event.EventID,
t.dsn.host,
t.dsn.projectID,
@ -678,18 +683,22 @@ func (t *HTTPSyncTransport) SendEventWithContext(ctx context.Context, event *Eve
response, err := t.client.Do(request)
if err != nil {
Logger.Printf("There was an issue with sending an event: %v", err)
DebugLogger.Printf("There was an issue with sending an event: %v", err)
return
}
if response.StatusCode >= 400 && response.StatusCode <= 599 {
b, err := io.ReadAll(response.Body)
if err != nil {
Logger.Printf("Error while reading response code: %v", err)
DebugLogger.Printf("Error while reading response code: %v", err)
}
Logger.Printf("Sending %s failed with the following error: %s", eventType, string(b))
DebugLogger.Printf("Sending %s failed with the following error: %s", eventIdentifier, string(b))
}
t.mu.Lock()
if t.limits == nil {
t.limits = make(ratelimit.Map)
}
t.limits.Merge(ratelimit.FromResponse(response))
t.mu.Unlock()
@ -704,12 +713,17 @@ func (t *HTTPSyncTransport) Flush(_ time.Duration) bool {
return true
}
// FlushWithContext is a no-op for HTTPSyncTransport. It always returns true immediately.
func (t *HTTPSyncTransport) FlushWithContext(_ context.Context) bool {
return true
}
func (t *HTTPSyncTransport) disabled(c ratelimit.Category) bool {
t.mu.Lock()
defer t.mu.Unlock()
disabled := t.limits.IsRateLimited(c)
if disabled {
Logger.Printf("Too many requests for %q, backing off till: %v", c, t.limits.Deadline(c))
DebugLogger.Printf("Too many requests for %q, backing off till: %v", c, t.limits.Deadline(c))
}
return disabled
}
@ -725,13 +739,19 @@ type noopTransport struct{}
var _ Transport = noopTransport{}
func (noopTransport) Configure(ClientOptions) {
Logger.Println("Sentry client initialized with an empty DSN. Using noopTransport. No events will be delivered.")
DebugLogger.Println("Sentry client initialized with an empty DSN. Using noopTransport. No events will be delivered.")
}
func (noopTransport) SendEvent(*Event) {
Logger.Println("Event dropped due to noopTransport usage.")
DebugLogger.Println("Event dropped due to noopTransport usage.")
}
func (noopTransport) Flush(time.Duration) bool {
return true
}
func (noopTransport) FlushWithContext(context.Context) bool {
return true
}
func (noopTransport) Close() {}

View file

@ -36,7 +36,7 @@ func monotonicTimeSince(start time.Time) (end time.Time) {
return start.Add(time.Since(start))
}
// nolint: deadcode, unused
// nolint: unused
func prettyPrint(data interface{}) {
dbg, _ := json.MarshalIndent(data, "", " ")
fmt.Println(string(dbg))
@ -62,7 +62,7 @@ func defaultRelease() (release string) {
}
for _, e := range envs {
if release = os.Getenv(e); release != "" {
Logger.Printf("Using release from environment variable %s: %s", e, release)
DebugLogger.Printf("Using release from environment variable %s: %s", e, release)
return release
}
}
@ -89,23 +89,23 @@ func defaultRelease() (release string) {
if err, ok := err.(*exec.ExitError); ok && len(err.Stderr) > 0 {
fmt.Fprintf(&s, ": %s", err.Stderr)
}
Logger.Print(s.String())
DebugLogger.Print(s.String())
} else {
release = strings.TrimSpace(string(b))
Logger.Printf("Using release from Git: %s", release)
DebugLogger.Printf("Using release from Git: %s", release)
return release
}
}
Logger.Print("Some Sentry features will not be available. See https://docs.sentry.io/product/releases/.")
Logger.Print("To stop seeing this message, pass a Release to sentry.Init or set the SENTRY_RELEASE environment variable.")
DebugLogger.Print("Some Sentry features will not be available. See https://docs.sentry.io/product/releases/.")
DebugLogger.Print("To stop seeing this message, pass a Release to sentry.Init or set the SENTRY_RELEASE environment variable.")
return ""
}
func revisionFromBuildInfo(info *debug.BuildInfo) string {
for _, setting := range info.Settings {
if setting.Key == "vcs.revision" && setting.Value != "" {
Logger.Printf("Using release from debug info: %s", setting.Value)
DebugLogger.Printf("Using release from debug info: %s", setting.Value)
return setting.Value
}
}

12
vendor/modules.txt vendored
View file

@ -676,15 +676,19 @@ github.com/getkin/kin-openapi/openapi3filter
github.com/getkin/kin-openapi/routers
github.com/getkin/kin-openapi/routers/legacy
github.com/getkin/kin-openapi/routers/legacy/pathpattern
# github.com/getsentry/sentry-go v0.28.1
## explicit; go 1.18
# github.com/getsentry/sentry-go v0.34.1
## explicit; go 1.21
github.com/getsentry/sentry-go
github.com/getsentry/sentry-go/echo
github.com/getsentry/sentry-go/attribute
github.com/getsentry/sentry-go/internal/debug
github.com/getsentry/sentry-go/internal/otel/baggage
github.com/getsentry/sentry-go/internal/otel/baggage/internal/baggage
github.com/getsentry/sentry-go/internal/ratelimit
github.com/getsentry/sentry-go/internal/traceparser
# github.com/getsentry/sentry-go/echo v0.34.1
## explicit; go 1.21
github.com/getsentry/sentry-go/echo
# github.com/getsentry/sentry-go/logrus v0.34.1
## explicit; go 1.21
github.com/getsentry/sentry-go/logrus
# github.com/go-jose/go-jose/v4 v4.0.5
## explicit; go 1.21