Update packages and add vendor directory

This commit is contained in:
Eddy Filip
2022-09-13 16:24:52 +03:00
parent 23b66f73af
commit 1d75f78891
3906 changed files with 1365387 additions and 465 deletions

15
vendor/github.com/uber/jaeger-client-go/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,15 @@
*.out
*.test
*.xml
*.swp
.idea/
.tmp/
*.iml
*.cov
*.html
*.log
gen/thrift/js
gen/thrift/py
vendor/
crossdock-main
crossdock/jaeger-docker-compose.yml

3
vendor/github.com/uber/jaeger-client-go/.gitmodules generated vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "idl"]
path = idl
url = https://github.com/uber/jaeger-idl.git

56
vendor/github.com/uber/jaeger-client-go/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,56 @@
sudo: required
language: go
go_import_path: github.com/uber/jaeger-client-go
dist: trusty
matrix:
include:
# - go: 1.15.x
# env:
# - TESTS=true
# - USE_DEP=true
# - COVERAGE=true
- go: 1.15.x
env:
- USE_DEP=true
- CROSSDOCK=true
# - go: 1.15.x
# env:
# - TESTS=true
# - USE_DEP=false
# - USE_GLIDE=true
# test with previous version of Go
# - go: 1.14.x
# env:
# - TESTS=true
# - USE_DEP=true
# - CI_SKIP_LINT=true
services:
- docker
env:
global:
- DOCKER_COMPOSE_VERSION=1.8.0
- COMMIT=${TRAVIS_COMMIT::8}
# DOCKER_PASS
- secure: "CnjVyxNvMC/dhr/eR7C+FiWucZ4/O5LfAuz9YU0qlnV6XLR7XXRtzZlfFKIImJT6xHp+OptTqAIXqUbvwK2OXDP1ZsLiWRm+2elb9/isGusWXjs3g817lX8njSUcIFILbfi+vAE7UD2BKjHxpmvWmCZidisU1rcaZ9OQNPqMnNIDxVx0FOTwYx+2hfkdjnN5dikzafBDQ6ZZV/mGbcaTG45GGFU6DHyVLzf9qCPXyXnz2+VDhcoPQsYkzE56XHCmHxvEfXxgfqYefJNUlFPhniAQySVsCNVDJ8QcCV6uHaXoIzxJKx9FdUnWKI1/AtpQsTZPgEm4Ujnt+kGJsXopXy2Xx4MZxmcTCBwAMjZxPMF7KoojbtDeOZgEMtf1tGPN6DTNc3NpVmr0BKZ44lhqk+vnd8HAiC1tHDEoSb1Esl7dMUUf1qZAh3MtT+NYi3mTwyx/ilXUS7KPyy7x0ezB3kGuMoLhvR2hrprqRr5NOV2hrd1au+IXmb+4IanFOsBlceBfs8P0JFMO/aw15r+HimSZpQsJx//IT0LReCZYXLe0/WVsF/8+HDwHKlO99gGpk4iXlNKKvdPWabihMp3I3peMrvL+jnlwh47RqHs/0Q71xsKjVWTn+Svq3FpVP0Pgyxhg+oG4WEByBiLnBQcZwSBhWexkJrNI73GzaZiIldk="
# DOCKER_USER
- secure: "bpBSmypHzI4PnteM4cwLiMC2163Sj/4mEl+1dj+6NWl2tr1hREeVXKhsWBpah25n6BDyr2A4yhBZcWLaNKrsCKT3U37csAQTOFVeQ9x5xhPq+ohANd/OsspFsxNZaKwx161LizH/uTDotMxxevZacsyYWGNv/cRFkwcQ8upLkReRR6puJ+jNQC0BFpKWBJY/zpm5J7xFb7FO20LvQVyRgsgzqWmg9oRNVw9uwOfSY3btacftYctDLUbAr8YRNHd2C6dZnMAi8KdDTLXKTqjKmp6WidOmi92Ml7tOjB+bV6TOaVAhrcI5Rdje4rRWG4MucAjPMP0ZBW36KTfcGqFUcDhX7UqISe2WxoI+8ZD6fJ+nNtD3bk4YAUJB4BSs2sQdiYyjpHyGJR6RW50+3uRz2YbXpzVr9wqv2lZSl/xy3wC5Hag55uqzVlSiDw2pK8lctT3dnQveE7PqAI577PjF2NrHlgrBbykOwwUCNbRTmykzqoDnkxclmiZ+rflEeWsSYglePK/d6Gj9+N7wJZM5heprdJJMFTrzMWZ21Ll9ZGY9updCBKmJA8pBYiLHbu0lWOp+9QUGC+621Zq0d1PHhN6L4eXk/f3RNoZTr//cX6WdNmmO7tBbaGpmp/UYiYTY1WO9vP7tCDsT75k285HCfnIrlGRdbCZZbfuYNGPKIQ0="
install:
- make install-ci USE_DEP=$USE_DEP
- if [ "$CROSSDOCK" == true ]; then bash ./travis/install-crossdock-deps.sh ; fi
script:
- if [ "$TESTS" == true ]; then make test-ci ; else echo 'skipping tests'; fi
- if [ "$CROSSDOCK" == true ]; then bash ./travis/build-crossdock.sh ; else echo 'skipping crossdock'; fi
after_success:
- if [ "$COVERAGE" == true ]; then mv cover.out coverage.txt ; else echo 'skipping coverage'; fi
- if [ "$COVERAGE" == true ]; then bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
after_failure:
- if [ "$CROSSDOCK" == true ]; then timeout 5 docker-compose -f crossdock/docker-compose.yml logs; fi

396
vendor/github.com/uber/jaeger-client-go/CHANGELOG.md generated vendored Normal file
View File

@@ -0,0 +1,396 @@
Changes by Version
==================
2.30.0 (2021-12-07)
-------------------
- Add deprecation notice -- Yuri Shkuro
- Use public struct for tracer options to document initialization better (#605) -- Yuri Shkuro
- Remove redundant newline in NewReporter init message (#603) -- wwade
- [zipkin] Encode span IDs as full 16-hex strings #601 -- Nathan
- [docs] Replace godoc.org with pkg.go.dev (#591) -- Aaron Jheng
- Remove outdated reference to Zipkin model. -- Yuri Shkuro
- Move thrift compilation to a script (#590) -- Aaron Jheng
- Document JAEGER_TRACEID_128BIT env var -- Yuri Shkuro
2.29.1 (2021-05-24)
-------------------
- Remove dependency on "testing" in "thrift" (#586) -- @yurishkuro
2.29.0 (2021-05-20)
-------------------
- Update vendored thrift to 0.14.1 (#584) -- @nhatthm
2.28.0 (2021-04-30)
-------------------
- HTTPSamplingStrategyFetcher: Use http client with 10 second timeout (#578) -- Joe Elliott
2.27.0 (2021-04-19)
-------------------
- Don't override HTTP Reporter batch size to 1; default to 100, user can override (#571) -- R. Aidan Campbell
2.26.0 (2021-04-16)
-------------------
- Delete a baggage item when value is blank (#562) -- evan.kim
- Trim baggage key when parsing (#566) -- sicong.huang
- feat: extend configuration to support custom randomNumber func (#555) -- NemoO_o
- Support JAEGER_TRACEID_128BIT env var (#547) -- Yuri Shkuro
- Additional context protections (#544) -- Joe Elliott
- Lock RemotelyControlledSampler.sampler on callbacks (#543) -- Dima
- Upgrade build to Go 1.15 (#539) -- Yuri Shkuro
- Upgrade to jaeger-lib@2.3.0 to fix broken codahale/hdrhistogram dependency (#537) -- Yuri Shkuro
- Prefix TraceID/SpanID.String() with zeroes (#533) -- Lukas Vogel
- Upgrade to OpenTracing Go 1.2 (#525) -- Yuri Shkuro
2.25.0 (2020-07-13)
-------------------
## Breaking changes
- [feat] Periodically re-resolve UDP server address, with opt-out (#520) -- Trevor Foster
The re-resolving of UDP address is now enabled by default, to make the client more robust in Kubernetes deployments.
The old resolve-once behavior can be restored by setting DisableAttemptReconnecting=true in the Configuration struct,
or via JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED=true environment variable.
## Bug fixes
- Do not add invalid context to references (#521) -- Yuri Shkuro
2.24.0 (2020-06-14)
-------------------
- Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher
- Serialize access to RemotelyControlledSampler.sampler (#515) -- Dima
- Override reporter config only when agent host/port is set in env (#513) -- ilylia
- Converge on JAEGER_SAMPLING_ENDPOINT env variable (#511) -- Eundoo Song
2.23.1 (2020-04-28)
-------------------
- Fix regression by handling nil logger correctly ([#507](https://github.com/jaegertracing/jaeger-client-go/pull/507)) -- Prithvi Raj
2.23.0 (2020-04-22)
-------------------
- Add the ability to log all span interactions at a new debug log level([#502](https://github.com/jaegertracing/jaeger-client-go/pull/502), [#503](https://github.com/jaegertracing/jaeger-client-go/pull/503), [#504](https://github.com/jaegertracing/jaeger-client-go/pull/504)) -- Prithvi Raj
- Chore (docs): fix typos ([#496](https://github.com/jaegertracing/jaeger-client-go/pull/496), [#498](https://github.com/jaegertracing/jaeger-client-go/pull/498)) -- Febrian Setianto and Ivan Babrou
- Unset highest bit of traceID in probabilistic sampler ([#490](https://github.com/jaegertracing/jaeger-client-go/pull/490)) -- Sokolov Yura
2.22.1 (2020-01-16)
-------------------
- Increase UDP batch overhead to account for data loss metrics ([#488](https://github.com/jaegertracing/jaeger-client-go/pull/488)) -- Yuri Shkuro
2.22.0 (2020-01-15)
-------------------
- Report data loss stats to Jaeger backend ([#482](https://github.com/jaegertracing/jaeger-client-go/pull/482)) -- Yuri Shkuro
- Add limit on log records per span ([#483](https://github.com/jaegertracing/jaeger-client-go/pull/483)) -- Sokolov Yura
2.21.1 (2019-12-20)
-------------------
- Update version correctly.
2.21.0 (2019-12-20)
-------------------
- Clarify reporting error logs ([#469](https://github.com/jaegertracing/jaeger-client-go/pull/469)) -- Yuri Shkuro
- Do not strip leading zeros from trace IDs ([#472](https://github.com/jaegertracing/jaeger-client-go/pull/472)) -- Yuri Shkuro
- Chore (docs): fixed a couple of typos ([#475](https://github.com/jaegertracing/jaeger-client-go/pull/475)) -- Marc Bramaud
- Support custom HTTP headers when reporting spans over HTTP ([#479](https://github.com/jaegertracing/jaeger-client-go/pull/479)) -- Albert Teoh
2.20.1 (2019-11-08)
-------------------
Minor patch via https://github.com/jaegertracing/jaeger-client-go/pull/468
- Make `AdaptiveSamplerUpdater` usable with default values; Resolves #467
- Create `OperationNameLateBinding` sampler option and config option
- Make `SamplerOptions` var of public type, so that its functions are discoverable via godoc
2.20.0 (2019-11-06)
-------------------
## New Features
- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj
Sampling state is shared between all spans of the trace that are still in memory.
This allows implementation of delayed sampling decisions (see below).
- Support delayed sampling decisions (#449) -- Yuri Shkuro
This is a large structural change to how the samplers work.
It allows some samplers to be executed multiple times on different
span events (like setting a tag) and make a positive sampling decision
later in the span life cycle, or even based on children spans.
See [README](./README.md#delayed-sampling) for more details.
There is a related minor change in behavior of the adaptive (per-operation) sampler,
which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the
operation used to make the sampling decision is always the one provided at span creation.
- Add experimental tag matching sampler (#452) -- Yuri Shkuro
A sampler that can sample a trace based on a certain tag added to the root
span or one of its local (in-process) children. The sampler can be used with
another experimental `PrioritySampler` that allows multiple samplers to try
to make a sampling decision, in a certain priority order.
- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta
- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy
## Minor patches
- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro
- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro
- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi
- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro
2.19.0 (2019-09-23)
-------------------
- Upgrade jaeger-lib to 2.2 and unpin Prom client (#434) -- Yuri Shkuro
2.18.1 (2019-09-16)
-------------------
- Remove go.mod / go.sum that interfere with `go get` (#432)
2.18.0 (2019-09-09)
-------------------
- Add option "noDebugFlagOnForcedSampling" for tracer initialization [resolves #422] (#423) <Jun Guo>
2.17.0 (2019-08-30)
-------------------
- Add a flag for firehose mode (#419) <Prithvi Raj>
- Default sampling server URL to agent (#414) <Bryan Boreham>
- Update default sampling rate when sampling strategy is refreshed (#413) <Bryan Boreham>
- Support "Self" Span Reference (#411) <dm03514>
- Don't complain about blank service name if tracing is Disabled (#410) Yuri <Shkuro>
- Use IP address from tag if exist (#402) <NikoKVCS>
- Expose span data to custom reporters [fixes #394] (#399) <Curtis Allen>
- Fix the span allocation in the pool (#381) <Dmitry Ponomarev>
2.16.0 (2019-03-24)
-------------------
- Add baggage to B3 codec (#319) <Pavol Loffay>
- Add support for 128bit trace ids to zipkin thrift spans. (#378) <Douglas Reid>
- Update zipkin propagation logic to support 128bit traceIDs (#373) <Douglas Reid>
- Accept "true" for the x-b3-sampled header (#356) <Adrian Bogatu>
- Allow setting of PoolSpans from Config object (#322) <Matthew Pound>
- Make propagators public to allow wrapping (#379) <Ivan Babrou>
- Change default metric namespace to use relevant separator for the metric backend (#364) <Gary Brown>
- Change metrics prefix to jaeger_tracer and add descriptions (#346) <Gary Brown>
- Bump OpenTracing to ^1.1.x (#383) <Yuri Shkuro>
- Upgrade jaeger-lib to v2.0.0 (#359) <Gary Brown>
- Avoid defer when generating random number (#358) <Gary Brown>
- Use a pool of rand.Source to reduce lock contention when creating span ids (#357) <Gary Brown>
- Make JAEGER_ENDPOINT take priority over JAEGER_AGENT_XXX (#342) <Eundoo Song>
2.15.0 (2018-10-10)
-------------------
- Fix FollowsFrom spans ignoring baggage/debug header from dummy parent context (#313) <Zvi Cahana>
- Make maximum annotation length configurable in tracer options (#318) <Eric Chang>
- Support more environment variables in configuration (#323) <Daneyon Hansen>
- Print error on Sampler Query failure (#328) <Goutham Veeramachaneni>
- Add an HTTPOption to support custom http.RoundTripper (#333) <Michael Puncel>
- Return an error when an HTTP error code is seen in zipkin HTTP transport (#331) <Michael Puncel>
2.14.0 (2018-04-30)
-------------------
- Support throttling for debug traces (#274) <Isaac Hier>
- Remove dependency on Apache Thrift (#303) <Yuri Shkuro>
- Remove dependency on tchannel (#295) (#294) <Yuri Shkuro>
- Test with Go 1.9 (#298) <Yuri Shkuro>
2.13.0 (2018-04-15)
-------------------
- Use value receiver for config.NewTracer() (#283) <Yuri Shkuro>
- Lock span during jaeger thrift conversion (#273) <Won Jun Jang>
- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260) <Scott Kidder> <Yuri Shkuro>
- Added support for client configuration via env vars (#275) <Juraci Paixão Kröhling>
- Allow overriding sampler in the Config (#270) <Mike Kabischev>
2.12.0 (2018-03-14)
-------------------
- Use lock when retrieving span.Context() (#268)
- Add Configuration support for custom Injector and Extractor (#263) <Martin Liu>
2.11.2 (2018-01-12)
-------------------
- Add Gopkg.toml to allow using the lib with `dep`
2.11.1 (2018-01-03)
-------------------
- Do not enqueue spans after Reporter is closed (#235, #245)
- Change default flush interval to 1sec (#243)
2.11.0 (2017-11-27)
-------------------
- Normalize metric names and tags to be compatible with Prometheus (#222)
2.10.0 (2017-11-14)
-------------------
- Support custom tracing headers (#176)
- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182)
- Do not coerce baggage keys to lower case (#196)
- Log span name when span cannot be reported (#198)
- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219)
2.9.0 (2017-07-29)
------------------
- Pin thrift <= 0.10 (#179)
- Introduce a parallel interface ContribObserver (#159)
2.8.0 (2017-07-05)
------------------
- Drop `jaeger.` prefix from `jaeger.hostname` process-level tag
- Add options to set tracer tags
2.7.0 (2017-06-21)
------------------
- Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140)
- Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147)
- Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153)
- Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158)
- Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161)
2.6.0 (2017-03-28)
------------------
- Add config option to initialize RPC Metrics feature
2.5.0 (2017-03-23)
------------------
- Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123)
- Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124)
- Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125)
2.4.0 (2017-03-21)
------------------
- Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121)
- Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121)
2.3.0 (2017-03-20)
------------------
- Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117)
- Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118)
2.2.1 (2017-03-14)
------------------
- Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111)
2.2.0 (2017-03-10)
------------------
- Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94)
- Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103)
2.1.2 (2017-02-27)
-------------------
- Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99)
- Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100)
- Add tracer initialization godoc examples
2.1.1 (2017-02-21)
-------------------
- Fix inefficient usage of zap.Logger
2.1.0 (2017-02-17)
-------------------
- Add adapter for zap.Logger (https://github.com/uber-go/zap)
- Move logging API to ./log/ package
2.0.0 (2017-02-08)
-------------------
- Support Adaptive Sampling
- Support 128bit Trace IDs
- Change trace/span IDs from uint64 to strong types TraceID and SpanID
- Add Zipkin HTTP B3 Propagation format support #72
- Rip out existing metrics and use github.com/uber/jaeger-lib/metrics
- Change API for tracer, reporter, sampler initialization
1.6.0 (2016-10-14)
-------------------
- Add Zipkin HTTP transport
- Support external baggage via jaeger-baggage header
- Unpin Thrift version, keep to master
1.5.1 (2016-09-27)
-------------------
- Relax dependency on opentracing to ^1
1.5.0 (2016-09-27)
-------------------
- Upgrade to opentracing-go 1.0
- Support KV logging for Spans
1.4.0 (2016-09-14)
-------------------
- Support debug traces via HTTP header "jaeger-debug-id"

2
vendor/github.com/uber/jaeger-client-go/CODEOWNERS generated vendored Normal file
View File

@@ -0,0 +1,2 @@
* @jaegertracing/jaeger-maintainers

170
vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,170 @@
# How to Contribute to Jaeger
We'd love your help!
Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
pull requests. This document outlines some of the conventions on development
workflow, commit message formatting, contact points and other resources to make
it easier to get your contribution accepted.
We gratefully welcome improvements to documentation as well as to code.
# Certificate of Origin
By contributing to this project you agree to the [Developer Certificate of
Origin](https://developercertificate.org/) (DCO). This document was created
by the Linux Kernel community and is a simple statement that you, as a
contributor, have the legal right to make the contribution. See the [DCO](DCO)
file for details.
## Getting Started
This library uses [dep](https://golang.github.io/dep/) to manage dependencies.
To get started, make sure you clone the Git repository into the correct location
`github.com/uber/jaeger-client-go` relative to `$GOPATH`:
```
mkdir -p $GOPATH/src/github.com/uber
cd $GOPATH/src/github.com/uber
git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go
cd jaeger-client-go
git submodule update --init --recursive
```
Then install dependencies and run the tests:
```
make install
make test
```
## Imports grouping
This projects follows the following pattern for grouping imports in Go files:
* imports from standard library
* imports from other projects
* imports from `jaeger-client-go` project
For example:
```go
import (
"fmt"
"github.com/uber/jaeger-lib/metrics"
"go.uber.org/zap"
"github.com/uber/jaeger-client-go/config"
)
```
## Making A Change
*Before making any significant changes, please [open an
issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed
changes ahead of time will make the contribution process smooth for everyone.
Once we've discussed your changes and you've got your code ready, make sure
that tests are passing (`make test` or `make cover`) and open your PR. Your
pull request is most likely to be accepted if it:
* Includes tests for new functionality.
* Follows the guidelines in [Effective
Go](https://golang.org/doc/effective_go.html) and the [Go team's common code
review comments](https://github.com/golang/go/wiki/CodeReviewComments).
* Has a [good commit message](https://chris.beams.io/posts/git-commit/):
* Separate subject from body with a blank line
* Limit the subject line to 50 characters
* Capitalize the subject line
* Do not end the subject line with a period
* Use the imperative mood in the subject line
* Wrap the body at 72 characters
* Use the body to explain _what_ and _why_ instead of _how_
* Each commit must be signed by the author ([see below](#sign-your-work)).
## License
By contributing your code, you agree to license your contribution under the terms
of the [Apache License](LICENSE).
If you are adding a new file it should have a header like below. The easiest
way to add such header is to run `make fmt`.
```
// Copyright (c) 2017 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
```
## Sign your work
The sign-off is a simple line at the end of the explanation for the
patch, which certifies that you wrote it or otherwise have the right to
pass it on as an open-source patch. The rules are pretty simple: if you
can certify the below (from
[developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe@gmail.com>
using your real name (sorry, no pseudonyms or anonymous contributions.)
You can add the sign off when creating the git commit via `git commit -s`.
If you want this to be automatic you can set up some aliases:
```
git config --add alias.amend "commit -s --amend"
git config --add alias.c "commit -s"
```

37
vendor/github.com/uber/jaeger-client-go/DCO generated vendored Normal file
View File

@@ -0,0 +1,37 @@
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.

301
vendor/github.com/uber/jaeger-client-go/Gopkg.lock generated vendored Normal file
View File

@@ -0,0 +1,301 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113"
name = "github.com/HdrHistogram/hdrhistogram-go"
packages = ["."]
pruneopts = "UT"
revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
version = "0.9.0"
[[projects]]
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
name = "github.com/beorn7/perks"
packages = ["quantile"]
pruneopts = "UT"
revision = "37c8de3658fcb183f997c4e13e8337516ab753e6"
version = "v1.0.1"
[[projects]]
branch = "master"
digest = "1:a382acd6150713655ded76ab5fbcbc7924a7808dab4312dda5d1f23dd8ce5277"
name = "github.com/crossdock/crossdock-go"
packages = [
".",
"assert",
"require",
]
pruneopts = "UT"
revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "UT"
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:7ae311278f7ccaa724de8f2cdec0a507ba3ee6dea8c77237e8157bcf64b0f28b"
name = "github.com/golang/mock"
packages = ["gomock"]
pruneopts = "UT"
revision = "f7b1909c82a8958747e5c87c6a5c3b2eaed8a33d"
version = "v1.4.4"
[[projects]]
digest = "1:4a32eb57407190eced21a21abee9ce4d4ab6f0bf113ca61cb1cb2d549a65c985"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
]
pruneopts = "UT"
revision = "d04d7b157bb510b1e0c10132224b616ac0e26b17"
version = "v1.4.2"
[[projects]]
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = "UT"
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
digest = "1:fe5217d44ae8fb84f711968816fe50077cea9dfa8f44425b8e44e7e3de896d01"
name = "github.com/opentracing/opentracing-go"
packages = [
".",
"ext",
"harness",
"log",
]
pruneopts = "UT"
revision = "d34af3eaa63c4d08ab54863a4bdd0daa45212e12"
version = "v1.2.0"
[[projects]]
digest = "1:9e1d37b58d17113ec3cb5608ac0382313c5b59470b94ed97d0976e69c7022314"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = "UT"
revision = "614d223910a179a466c1767a985424175c39b465"
version = "v0.9.1"
[[projects]]
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = "UT"
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
digest = "1:7097829edd12fd7211fca0d29496b44f94ef9e6d72f88fb64f3d7b06315818ad"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/internal",
]
pruneopts = "UT"
revision = "170205fb58decfd011f1550d4cfb737230d7ae4f"
version = "v1.1.0"
[[projects]]
digest = "1:0db23933b8052702d980a3f029149b3f175f7c0eea0cff85b175017d0f2722c0"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = "UT"
revision = "7bc5445566f0fe75b15de23e6b93886e982d7bf9"
version = "v0.2.0"
[[projects]]
digest = "1:4407525bde4e6ad9c1f60113d38cbb255d769e0ea506c8cf877db39da7753b3a"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model",
]
pruneopts = "UT"
revision = "317b7b125e8fddda956d0c9574e5f03f438ed5bc"
version = "v0.14.0"
[[projects]]
digest = "1:b2268435af85ee1a0fca0e37de4225f78e2d9d8b0b66acde3a29f127634efa87"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/fs",
"internal/util",
]
pruneopts = "UT"
revision = "9dece15c53cd5e9fbfbd72d5108adcf526a3f486"
version = "v0.2.0"
[[projects]]
digest = "1:86ff4af7b6bb3d27c2e89b5ef8c139678acff1cad74a3c5235fc5af6b94fcc9e"
name = "github.com/stretchr/objx"
packages = ["."]
pruneopts = "UT"
revision = "35313a95ee26395aa17d366c71a2ccf788fa69b6"
version = "v0.3.0"
[[projects]]
digest = "1:5201127841a78d84d0ca68a2e564c08e3882c0fb9321a75997ce87926e0d63ea"
name = "github.com/stretchr/testify"
packages = [
"assert",
"mock",
"require",
"suite",
]
pruneopts = "UT"
revision = "f654a9112bbeac49ca2cd45bfbe11533c4666cf8"
version = "v1.6.1"
[[projects]]
digest = "1:4af46f2faea30e52c96ec9ec32bb654d2729579a80d242b0acfa193ad321eb61"
name = "github.com/uber/jaeger-lib"
packages = [
"metrics",
"metrics/metricstest",
"metrics/prometheus",
]
pruneopts = "UT"
revision = "48cc1df63e6be0d63b95677f0d22beb880bce1e4"
version = "v2.3.0"
[[projects]]
digest = "1:7a3de4371d6b68c6f37a0df2c09905664d9de59026c91cbe275aae55f4fe760f"
name = "go.uber.org/atomic"
packages = ["."]
pruneopts = "UT"
revision = "12f27ba2637fa0e13772a4f05fa46a5d18d53182"
version = "v1.7.0"
[[projects]]
digest = "1:e9eeeabfd025a5e69b9c8e2857d3517ea67e747ae913bcb0a9e1e7bafdb9c298"
name = "go.uber.org/multierr"
packages = ["."]
pruneopts = "UT"
revision = "3114a8b704d2d28dbacda34a872690aaef66aeed"
version = "v1.6.0"
[[projects]]
digest = "1:0398f5f0e2e9233f25fad702f3b323241daf9f876cc869ab259238cf1bced236"
name = "go.uber.org/zap"
packages = [
".",
"buffer",
"internal/bufferpool",
"internal/color",
"internal/exit",
"zapcore",
"zaptest/observer",
]
pruneopts = "UT"
revision = "404189cf44aea95b0cd9bddcb0242dd4cf88c510"
version = "v1.16.0"
[[projects]]
branch = "master"
digest = "1:f8b491a7c25030a895a0e579742d07136e6958e77ef2d46e769db8eec4e58fcd"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
]
pruneopts = "UT"
revision = "328152dc79b1547da63f950cd4cdd9afd50b2774"
[[projects]]
branch = "master"
digest = "1:1e581fa394685ef0d84008ae04cf3414390c1a700c04846853869cb4ac2fec86"
name = "golang.org/x/sys"
packages = [
"internal/unsafeheader",
"unix",
"windows",
]
pruneopts = "UT"
revision = "d9f96fdee20d1e5115ee34ba4016eae6cfb66eb9"
[[projects]]
digest = "1:fd328c5b52e433ea3ffc891bcc4f94469a82bf478558208db2b386aad8a304a1"
name = "google.golang.org/protobuf"
packages = [
"encoding/prototext",
"encoding/protowire",
"internal/descfmt",
"internal/descopts",
"internal/detrand",
"internal/encoding/defval",
"internal/encoding/messageset",
"internal/encoding/tag",
"internal/encoding/text",
"internal/errors",
"internal/fieldsort",
"internal/filedesc",
"internal/filetype",
"internal/flags",
"internal/genid",
"internal/impl",
"internal/mapsort",
"internal/pragma",
"internal/set",
"internal/strs",
"internal/version",
"proto",
"reflect/protoreflect",
"reflect/protoregistry",
"runtime/protoiface",
"runtime/protoimpl",
"types/known/anypb",
"types/known/durationpb",
"types/known/timestamppb",
]
pruneopts = "UT"
revision = "3f7a61f89bb6813f89d981d1870ed68da0b3c3f1"
version = "v1.25.0"
[[projects]]
branch = "v3"
digest = "1:229cb0f6192914f518cc1241ede6d6f1f458b31debfa18bf3a5c9e4f7b01e24b"
name = "gopkg.in/yaml.v3"
packages = ["."]
pruneopts = "UT"
revision = "eeeca48fe7764f320e4870d231902bf9c1be2c08"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/crossdock/crossdock-go",
"github.com/golang/mock/gomock",
"github.com/opentracing/opentracing-go",
"github.com/opentracing/opentracing-go/ext",
"github.com/opentracing/opentracing-go/harness",
"github.com/opentracing/opentracing-go/log",
"github.com/pkg/errors",
"github.com/prometheus/client_golang/prometheus",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/mock",
"github.com/stretchr/testify/require",
"github.com/stretchr/testify/suite",
"github.com/uber/jaeger-lib/metrics",
"github.com/uber/jaeger-lib/metrics/metricstest",
"github.com/uber/jaeger-lib/metrics/prometheus",
"go.uber.org/atomic",
"go.uber.org/zap",
"go.uber.org/zap/zapcore",
"go.uber.org/zap/zaptest/observer",
]
solver-name = "gps-cdcl"
solver-version = 1

31
vendor/github.com/uber/jaeger-client-go/Gopkg.toml generated vendored Normal file
View File

@@ -0,0 +1,31 @@
[[constraint]]
name = "github.com/crossdock/crossdock-go"
branch = "master"
[[constraint]]
name = "github.com/opentracing/opentracing-go"
version = "^1.2"
[[constraint]]
name = "github.com/prometheus/client_golang"
version = "^1"
[[constraint]]
name = "github.com/stretchr/testify"
version = "^1.1.3"
[[constraint]]
name = "go.uber.org/atomic"
version = "^1"
[[constraint]]
name = "github.com/uber/jaeger-lib"
version = "^2.3"
[[constraint]]
name = "go.uber.org/zap"
version = "^1"
[prune]
go-tests = true
unused-packages = true

201
vendor/github.com/uber/jaeger-client-go/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

135
vendor/github.com/uber/jaeger-client-go/Makefile generated vendored Normal file
View File

@@ -0,0 +1,135 @@
PROJECT_ROOT=github.com/uber/jaeger-client-go
export GO111MODULE=off
PACKAGES := . $(shell GO111MODULE=off go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
# all .go files that don't exist in hidden directories
ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
-e ".*/\..*" \
-e ".*/_.*" \
-e ".*/mocks.*")
USE_DEP := true
-include crossdock/rules.mk
RACE=-race
GOTEST=go test -v $(RACE)
GOLINT=golint
GOVET=go vet
GOFMT=gofmt
FMT_LOG=fmt.log
LINT_LOG=lint.log
THRIFT_VER=0.14
THRIFT_IMG=jaegertracing/thrift:$(THRIFT_VER)
THRIFT=docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) thrift
PASS=$(shell printf "\033[32mPASS\033[0m")
FAIL=$(shell printf "\033[31mFAIL\033[0m")
COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/''
.DEFAULT_GOAL := test-and-lint
.PHONY: test-and-lint
test-and-lint: test fmt lint
.PHONY: test
test:
ifeq ($(USE_DEP),true)
dep check
endif
bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)"
.PHONY: fmt
fmt:
$(GOFMT) -e -s -l -w $(ALL_SRC)
./scripts/updateLicenses.sh
.PHONY: lint
lint: vet golint lint-fmt lint-thrift-testing
.PHONY: vet
vet:
$(GOVET) $(PACKAGES)
.PHONY: golint
golint:
@cat /dev/null > $(LINT_LOG)
@$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;)
@[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false)
.PHONY: lint-fmt
lint-fmt:
@$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG)
./scripts/updateLicenses.sh >> $(FMT_LOG)
@[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false)
# make sure thrift/ module does not import "testing"
.PHONY: lint-thrift-testing
lint-thrift-testing:
@cat /dev/null > $(LINT_LOG)
@(grep -rn '"testing"' thrift | grep -v README.md > $(LINT_LOG)) || true
@[ ! -s "$(LINT_LOG)" ] || (echo '"thrift" module must not import "testing", see issue #585' | cat - $(LINT_LOG) && false)
.PHONY: install
install:
@echo install: USE_DEP=$(USE_DEP) USE_GLIDE=$(USE_GLIDE)
ifeq ($(USE_DEP),true)
dep version || make install-dep
dep ensure -vendor-only -v
endif
ifeq ($(USE_GLIDE),true)
glide --version || go get github.com/Masterminds/glide
glide install
endif
.PHONY: cover
cover:
$(GOTEST) -cover -coverprofile cover.out $(PACKAGES)
.PHONY: cover-html
cover-html: cover
go tool cover -html=cover.out -o cover.html
# This is not part of the regular test target because we don't want to slow it
# down.
.PHONY: test-examples
test-examples:
make -C examples
.PHONY: thrift
thrift: idl-submodule thrift-compile
# TODO at the moment we're not generating tchan_*.go files
.PHONY: thrift-compile
thrift-compile: thrift-image
docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) /data/scripts/gen-thrift.sh
.PHONY: idl-submodule
idl-submodule:
git submodule init
git submodule update
.PHONY: thrift-image
thrift-image:
$(THRIFT) -version
.PHONY: install-dep
install-dep:
- curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $$GOPATH/bin/dep
- chmod +x $$GOPATH/bin/dep
.PHONY: install-ci
install-ci: install
go get github.com/wadey/gocovmerge
go get github.com/mattn/goveralls
go get golang.org/x/tools/cmd/cover
go get golang.org/x/lint/golint
.PHONY: test-ci
test-ci: cover
ifeq ($(CI_SKIP_LINT),true)
echo 'skipping lint'
else
make lint
endif

339
vendor/github.com/uber/jaeger-client-go/README.md generated vendored Normal file
View File

@@ -0,0 +1,339 @@
[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
# 🛑 This library is being deprecated!
We urge all users to migrate to [OpenTelemetry](https://opentelemetry.io/). Please refer to the [notice in the documentation](https://www.jaegertracing.io/docs/latest/client-libraries/#deprecating-jaeger-clients) for details.
# Jaeger Bindings for Go OpenTracing API
Instrumentation library that implements an
[OpenTracing Go](https://github.com/opentracing/opentracing-go) Tracer for Jaeger (https://jaegertracing.io).
**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release.
* :white_check_mark: `import "github.com/uber/jaeger-client-go"`
* :x: `import "github.com/jaegertracing/jaeger-client-go"`
## How to Contribute
Please see [CONTRIBUTING.md](CONTRIBUTING.md).
## Installation
### Preferred
Add `github.com/uber/jaeger-client-go` to `go.mod`.
### Old way
We recommended using a dependency manager like [dep](https://golang.github.io/dep/)
and [semantic versioning](http://semver.org/) when including this library into an application.
For example, Jaeger backend imports this library like this:
```toml
[[constraint]]
name = "github.com/uber/jaeger-client-go"
version = "2.17"
```
If you instead want to use the latest version in `master`, you can pull it via `go get`.
Note that during `go get` you may see build errors due to incompatible dependencies, which is why
we recommend using semantic versions for dependencies. The error may be fixed by running
`make install` (it will install `dep` if you don't have it):
```shell
go get -u github.com/uber/jaeger-client-go/
cd $GOPATH/src/github.com/uber/jaeger-client-go/
git submodule update --init --recursive
make install
```
## Initialization
See tracer initialization examples in [godoc](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#pkg-examples)
and [config/example_test.go](./config/example_test.go).
There are two ways to create a tracer:
* Using [Configuration](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#Configuration) struct that allows declarative configuration. For example, you can populate that struct from a YAML/JSON config, or ask it to initialize itself using environment variables (see next section).
* Using [NewTracer()](https://pkg.go.dev/github.com/uber/jaeger-client-go#NewTracer) function that allows for full programmatic control of configuring the tracer using TracerOptions.
### Environment variables
The tracer can be initialized with values coming from environment variables, if it is
[built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer)
that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv).
None of the env vars are required and all of them can be overridden via direct setting
of the property on the configuration object.
Property| Description
--- | ---
JAEGER_SERVICE_NAME | The service name.
JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP (default `localhost`).
JAEGER_AGENT_PORT | The port for communicating with agent via UDP (default `6831`).
JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. If specified, the agent host/port are ignored.
JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint.
JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint.
JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`).
JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`).
JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`).
JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED | When true, disables udp connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change (default `false`).
JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL | Controls how often the agent client re-resolves the provided hostname in order to detect address changes ([valid units][timeunits]; default `30s`).
JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/.
JAEGER_SAMPLER_PARAM | The sampler parameter (number).
JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler.
JAEGER_SAMPLING_ENDPOINT | The URL for the sampling configuration server when using sampler type `remote` (default `http://127.0.0.1:5778/sampling`).
JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`).
JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`).
JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`.
JAEGER_TRACEID_128BIT | Whether to enable 128bit trace-id generation, `true` or `false`. If not enabled, the SDK defaults to 64bit trace-ids.
JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`).
JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`).
By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and
`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces
to the endpoint via `HTTP`, making the `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` unused. If `JAEGER_ENDPOINT` is
secured, HTTP basic authentication can be performed by setting the `JAEGER_USER` and `JAEGER_PASSWORD` environment
variables.
### Closing the tracer via `io.Closer`
The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance.
It is recommended to structure your `main()` so that it calls the `Close()` function on the closer
before exiting, e.g.
```go
tracer, closer, err := cfg.NewTracer(...)
defer closer.Close()
```
This is especially useful for command-line tools that enable tracing, as well as
for the long-running apps that support graceful shutdown. For example, if your deployment
system sends SIGTERM instead of killing the process and you trap that signal to do a graceful
exit, then having `defer closer.Close()` ensures that all buffered spans are flushed.
### Metrics & Monitoring
The tracer emits a number of different metrics, defined in
[metrics.go](metrics.go). The monitoring backend is expected to support
tag-based metric names, e.g. instead of `statsd`-style string names
like `counters.my-service.jaeger.spans.started.sampled`, the metrics
are defined by a short name and a collection of key/value tags, for
example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go)
file for the full list and descriptions of emitted metrics.
The monitoring backend is represented by the `metrics.Factory` interface from package
[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation
of that interface can be passed as an option to either the Configuration object or the Tracer
constructor, for example:
```go
import (
"github.com/uber/jaeger-client-go/config"
"github.com/uber/jaeger-lib/metrics/prometheus"
)
metricsFactory := prometheus.New()
tracer, closer, err := config.Configuration{
ServiceName: "your-service-name",
}.NewTracer(
config.Metrics(metricsFactory),
)
```
By default, a no-op `metrics.NullFactory` is used.
### Logging
The tracer can be configured with an optional logger, which will be
used to log communication errors, or log spans if a logging reporter
option is specified in the configuration. The logging API is abstracted
by the [Logger](logger.go) interface. A logger instance implementing
this interface can be set on the `Config` object before calling the
`New` method.
Besides the [zap](https://github.com/uber-go/zap) implementation
bundled with this package there is also a [go-kit](https://github.com/go-kit/kit)
one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository.
## Instrumentation for Tracing
Since this tracer is fully compliant with OpenTracing API 1.0,
all code instrumentation should only use the API itself, as described
in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation.
## Features
### Reporters
A "reporter" is a component that receives the finished spans and reports
them to somewhere. Under normal circumstances, the Tracer
should use the default `RemoteReporter`, which sends the spans out of
process via configurable "transport". For testing purposes, one can
use an `InMemoryReporter` that accumulates spans in a buffer and
allows to retrieve them for later verification. Also available are
`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter`
which logs all finished spans using their `String()` method, and a
`CompositeReporter` that can be used to combine more than one reporter
into one, e.g. to attach a logging reporter to the main remote reporter.
### Span Reporting Transports
The remote reporter uses "transports" to actually send the spans out
of process. Currently the supported transports include:
* [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP,
* [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP.
### Sampling
The tracer does not record all spans, but only those that have the
sampling bit set in the `flags`. When a new trace is started and a new
unique ID is generated, a sampling decision is made whether this trace
should be sampled. The sampling decision is propagated to all downstream
calls via the `flags` field of the trace context. The following samplers
are available:
1. `RemotelyControlledSampler` uses one of the other simpler samplers
and periodically updates it by polling an external server. This
allows dynamic control of the sampling strategies.
1. `ConstSampler` always makes the same sampling decision for all
trace IDs. it can be configured to either sample all traces, or
to sample none.
1. `ProbabilisticSampler` uses a fixed sampling rate as a probability
for a given trace to be sampled. The actual decision is made by
comparing the trace ID with a random number multiplied by the
sampling rate.
1. `RateLimitingSampler` can be used to allow only a certain fixed
number of traces to be sampled per second.
#### Delayed sampling
Version 2.20 introduced the ability to delay sampling decisions in the life cycle
of the root span. It involves several features and architectural changes:
* **Shared sampling state**: the sampling state is shared across all local
(i.e. in-process) spans for a given trace.
* **New `SamplerV2` API** allows the sampler to be called at multiple points
in the life cycle of a span:
* on span creation
* on overwriting span operation name
* on setting span tags
* on finishing the span
* **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler
to indicate if the negative sampling decision is final or not (positive sampling
decisions are always final). If the decision is not final, the sampler will be
called again on further span life cycle events, like setting tags.
These new features are used in the experimental `x.TagMatchingSampler`, which
can sample a trace based on a certain tag added to the root
span or one of its local (in-process) children. The sampler can be used with
another experimental `x.PrioritySampler` that allows multiple samplers to try
to make a sampling decision, in a certain priority order.
### Baggage Injection
The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
to the span context and propagated throughout the trace. An external process can inject baggage
by setting the special HTTP Header `jaeger-baggage` on a request:
```sh
curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com
```
Baggage can also be programatically set inside your service:
```go
if span := opentracing.SpanFromContext(ctx); span != nil {
span.SetBaggageItem("key", "value")
}
```
Another service downstream of that can retrieve the baggage in a similar way:
```go
if span := opentracing.SpanFromContext(ctx); span != nil {
val := span.BaggageItem("key")
println(val)
}
```
### Debug Traces (Forced Sampling)
#### Programmatically
The OpenTracing API defines a `sampling.priority` standard tag that
can be used to affect the sampling of a span and its children:
```go
import (
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
)
span := opentracing.SpanFromContext(ctx)
ext.SamplingPriority.Set(span, 1)
```
#### Via HTTP Headers
Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`,
which can be set in the incoming request, e.g.
```sh
curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com
```
When Jaeger sees this header in the request that otherwise has no
tracing context, it ensures that the new trace started for this
request will be sampled in the "debug" mode (meaning it should survive
all downsampling that might happen in the collection pipeline), and the
root span will have a tag as if this statement was executed:
```go
span.SetTag("jaeger-debug-id", "some-correlation-id")
```
This allows using Jaeger UI to find the trace by this tag.
### Zipkin HTTP B3 compatible header propagation
Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used
by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin).
However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up.
## SelfRef
Jaeger Tracer supports an additional [span reference][] type call `Self`, which was proposed
to the OpenTracing Specification (https://github.com/opentracing/specification/issues/81)
but not yet accepted. This allows the caller to provide an already created `SpanContext`
when starting a new span. The `Self` reference bypasses trace and span id generation,
as well as sampling decisions (i.e. the sampling bit in the `SpanContext.flags` must be
set appropriately by the caller).
The `Self` reference supports the following use cases:
* the ability to provide externally generated trace and span IDs
* appending data to the same span from different processes, such as loading and continuing spans/traces from offline (ie log-based) storage
Usage requires passing in a `SpanContext` and the `jaeger.Self` reference type:
```
span := tracer.StartSpan(
"continued_span",
jaeger.SelfRef(yourSpanContext),
)
...
defer span.Finish()
```
## License
[Apache 2.0 License](LICENSE).
[doc-img]: https://pkg.go.dev/badge/github.com/uber/jaeger-client-go.svg
[doc]: https://pkg.go.dev/github.com/uber/jaeger-client-go
[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go
[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg
[ot-url]: http://opentracing.io
[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item
[timeunits]: https://golang.org/pkg/time/#ParseDuration
[span reference]: https://github.com/opentracing/specification/blob/1.1/specification.md#references-between-spans

12
vendor/github.com/uber/jaeger-client-go/RELEASE.md generated vendored Normal file
View File

@@ -0,0 +1,12 @@
# Release Process
1. Create a PR "Preparing for release X.Y.Z" against master branch
* Alter CHANGELOG.md from `<placeholder_version> (unreleased)` to `<X.Y.Z> (YYYY-MM-DD)`
* Use `git log --pretty=format:'- %s -- %an'` as the basis for for changelog entries
* Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z`
2. Create a release "Release X.Y.Z" on Github
* Create Tag `vX.Y.Z`
* Copy CHANGELOG.md into the release notes
3. Create a PR "Back to development" against master branch
* Add `<next_version> (unreleased)` to CHANGELOG.md
* Update `JaegerClientVersion` in constants.go to `Go-<next_version>dev`

View File

@@ -0,0 +1,77 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"github.com/opentracing/opentracing-go/log"
"github.com/uber/jaeger-client-go/internal/baggage"
)
// baggageSetter is an actor that can set a baggage value on a Span given certain
// restrictions (eg. maxValueLength).
type baggageSetter struct {
restrictionManager baggage.RestrictionManager
metrics *Metrics
}
func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter {
return &baggageSetter{
restrictionManager: restrictionManager,
metrics: metrics,
}
}
// (NB) span should hold the lock before making this call
func (s *baggageSetter) setBaggage(span *Span, key, value string) {
var truncated bool
var prevItem string
restriction := s.restrictionManager.GetRestriction(span.serviceName(), key)
if !restriction.KeyAllowed() {
s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
s.metrics.BaggageUpdateFailure.Inc(1)
return
}
if len(value) > restriction.MaxValueLength() {
truncated = true
value = value[:restriction.MaxValueLength()]
s.metrics.BaggageTruncate.Inc(1)
}
prevItem = span.context.baggage[key]
s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
span.context = span.context.WithBaggageItem(key, value)
s.metrics.BaggageUpdateSuccess.Inc(1)
}
func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) {
if !span.context.IsSampled() {
return
}
fields := []log.Field{
log.String("event", "baggage"),
log.String("key", key),
log.String("value", value),
}
if prevItem != "" {
fields = append(fields, log.String("override", "true"))
}
if truncated {
fields = append(fields, log.String("truncated", "true"))
}
if !valid {
fields = append(fields, log.String("invalid", "true"))
}
span.logFieldsNoLocking(fields...)
}

View File

@@ -0,0 +1,447 @@
// Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"io"
"strings"
"time"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go/utils"
"github.com/uber/jaeger-client-go"
"github.com/uber/jaeger-client-go/internal/baggage/remote"
throttler "github.com/uber/jaeger-client-go/internal/throttler/remote"
"github.com/uber/jaeger-client-go/rpcmetrics"
"github.com/uber/jaeger-client-go/transport"
"github.com/uber/jaeger-lib/metrics"
)
const defaultSamplingProbability = 0.001
// Configuration configures and creates Jaeger Tracer
type Configuration struct {
// ServiceName specifies the service name to use on the tracer.
// Can be provided by FromEnv() via the environment variable named JAEGER_SERVICE_NAME
ServiceName string `yaml:"serviceName"`
// Disabled makes the config return opentracing.NoopTracer.
// Value can be provided by FromEnv() via the environment variable named JAEGER_DISABLED.
Disabled bool `yaml:"disabled"`
// RPCMetrics enables generations of RPC metrics (requires metrics factory to be provided).
// Value can be provided by FromEnv() via the environment variable named JAEGER_RPC_METRICS
RPCMetrics bool `yaml:"rpc_metrics"`
// Gen128Bit instructs the tracer to generate 128-bit wide trace IDs, compatible with W3C Trace Context.
// Value can be provided by FromEnv() via the environment variable named JAEGER_TRACEID_128BIT.
Gen128Bit bool `yaml:"traceid_128bit"`
// Tags can be provided by FromEnv() via the environment variable named JAEGER_TAGS
Tags []opentracing.Tag `yaml:"tags"`
Sampler *SamplerConfig `yaml:"sampler"`
Reporter *ReporterConfig `yaml:"reporter"`
Headers *jaeger.HeadersConfig `yaml:"headers"`
BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"`
Throttler *ThrottlerConfig `yaml:"throttler"`
}
// SamplerConfig allows initializing a non-default sampler. All fields are optional.
type SamplerConfig struct {
// Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote.
// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_TYPE
Type string `yaml:"type"`
// Param is a value passed to the sampler.
// Valid values for Param field are:
// - for "const" sampler, 0 or 1 for always false/true respectively
// - for "probabilistic" sampler, a probability between 0 and 1
// - for "rateLimiting" sampler, the number of spans per second
// - for "remote" sampler, param is the same as for "probabilistic"
// and indicates the initial sampling rate before the actual one
// is received from the mothership.
// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_PARAM
Param float64 `yaml:"param"`
// SamplingServerURL is the URL of sampling manager that can provide
// sampling strategy to this service.
// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLING_ENDPOINT
SamplingServerURL string `yaml:"samplingServerURL"`
// SamplingRefreshInterval controls how often the remotely controlled sampler will poll
// sampling manager for the appropriate sampling strategy.
// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
// MaxOperations is the maximum number of operations that the PerOperationSampler
// will keep track of. If an operation is not tracked, a default probabilistic
// sampler will be used rather than the per operation specific sampler.
// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_MAX_OPERATIONS.
MaxOperations int `yaml:"maxOperations"`
// Opt-in feature for applications that require late binding of span name via explicit
// call to SetOperationName when using PerOperationSampler. When this feature is enabled,
// the sampler will return retryable=true from OnCreateSpan(), thus leaving the sampling
// decision as non-final (and the span as writeable). This may lead to degraded performance
// in applications that always provide the correct span name on trace creation.
//
// For backwards compatibility this option is off by default.
OperationNameLateBinding bool `yaml:"operationNameLateBinding"`
// Options can be used to programmatically pass additional options to the Remote sampler.
Options []jaeger.SamplerOption
}
// ReporterConfig configures the reporter. All fields are optional.
type ReporterConfig struct {
// QueueSize controls how many spans the reporter can keep in memory before it starts dropping
// new spans. The queue is continuously drained by a background go-routine, as fast as spans
// can be sent out of process.
// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
QueueSize int `yaml:"queueSize"`
// BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full.
// It is generally not useful, as it only matters for very low traffic services.
// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
BufferFlushInterval time.Duration
// LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter
// and logs all submitted spans. Main Configuration.Logger must be initialized in the code
// for this option to have any effect.
// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_LOG_SPANS
LogSpans bool `yaml:"logSpans"`
// LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address.
// Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
LocalAgentHostPort string `yaml:"localAgentHostPort"`
// DisableAttemptReconnecting when true, disables udp connection helper that periodically re-resolves
// the agent's hostname and reconnects if there was a change. This option only
// applies if LocalAgentHostPort is specified.
// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED
DisableAttemptReconnecting bool `yaml:"disableAttemptReconnecting"`
// AttemptReconnectInterval controls how often the agent client re-resolves the provided hostname
// in order to detect address changes. This option only applies if DisableAttemptReconnecting is false.
// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL
AttemptReconnectInterval time.Duration
// CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL.
// Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT
CollectorEndpoint string `yaml:"collectorEndpoint"`
// User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector.
// Can be provided by FromEnv() via the environment variable named JAEGER_USER
User string `yaml:"user"`
// Password instructs reporter to include a password for basic http authentication when sending spans to
// jaeger-collector.
// Can be provided by FromEnv() via the environment variable named JAEGER_PASSWORD
Password string `yaml:"password"`
// HTTPHeaders instructs the reporter to add these headers to the http request when reporting spans.
// This field takes effect only when using HTTPTransport by setting the CollectorEndpoint.
HTTPHeaders map[string]string `yaml:"http_headers"`
}
// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist
// certain baggage keys. All fields are optional.
type BaggageRestrictionsConfig struct {
// DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction
// manager. If true, the manager will not allow any baggage to be written until baggage restrictions have
// been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage
// restrictions have been retrieved from jaeger-agent.
DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"`
// HostPort is the hostPort of jaeger-agent's baggage restrictions server
HostPort string `yaml:"hostPort"`
// RefreshInterval controls how often the baggage restriction manager will poll
// jaeger-agent for the most recent baggage restrictions.
RefreshInterval time.Duration `yaml:"refreshInterval"`
}
// ThrottlerConfig configures the throttler which can be used to throttle the
// rate at which the client may send debug requests.
type ThrottlerConfig struct {
// HostPort of jaeger-agent's credit server.
HostPort string `yaml:"hostPort"`
// RefreshInterval controls how often the throttler will poll jaeger-agent
// for more throttling credits.
RefreshInterval time.Duration `yaml:"refreshInterval"`
// SynchronousInitialization determines whether or not the throttler should
// synchronously fetch credits from the agent when an operation is seen for
// the first time. This should be set to true if the client will be used by
// a short lived service that needs to ensure that credits are fetched
// upfront such that sampling or throttling occurs.
SynchronousInitialization bool `yaml:"synchronousInitialization"`
}
type nullCloser struct{}
func (*nullCloser) Close() error { return nil }
// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers
// before shutdown.
//
// Deprecated: use NewTracer() function
func (c Configuration) New(
serviceName string,
options ...Option,
) (opentracing.Tracer, io.Closer, error) {
if serviceName != "" {
c.ServiceName = serviceName
}
return c.NewTracer(options...)
}
// NewTracer returns a new tracer based on the current configuration, using the given options,
// and a closer func that can be used to flush buffers before shutdown.
func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) {
if c.Disabled {
return &opentracing.NoopTracer{}, &nullCloser{}, nil
}
if c.ServiceName == "" {
return nil, nil, errors.New("no service name provided")
}
opts := applyOptions(options...)
tracerMetrics := jaeger.NewMetrics(opts.metrics, nil)
if c.RPCMetrics {
Observer(
rpcmetrics.NewObserver(
opts.metrics.Namespace(metrics.NSOptions{Name: "jaeger-rpc", Tags: map[string]string{"component": "jaeger"}}),
rpcmetrics.DefaultNameNormalizer,
),
)(&opts) // adds to c.observers
}
if c.Sampler == nil {
c.Sampler = &SamplerConfig{
Type: jaeger.SamplerTypeRemote,
Param: defaultSamplingProbability,
}
}
if c.Reporter == nil {
c.Reporter = &ReporterConfig{}
}
sampler := opts.sampler
if sampler == nil {
s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics)
if err != nil {
return nil, nil, err
}
sampler = s
}
reporter := opts.reporter
if reporter == nil {
r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger)
if err != nil {
return nil, nil, err
}
reporter = r
}
tracerOptions := []jaeger.TracerOption{
jaeger.TracerOptions.Metrics(tracerMetrics),
jaeger.TracerOptions.Logger(opts.logger),
jaeger.TracerOptions.CustomHeaderKeys(c.Headers),
jaeger.TracerOptions.PoolSpans(opts.poolSpans),
jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength),
jaeger.TracerOptions.NoDebugFlagOnForcedSampling(opts.noDebugFlagOnForcedSampling),
}
if c.Gen128Bit || opts.gen128Bit {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Gen128Bit(true))
}
if opts.randomNumber != nil {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.RandomNumber(opts.randomNumber))
}
for _, tag := range opts.tags {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
}
for _, tag := range c.Tags {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
}
for _, obs := range opts.observers {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs))
}
for _, cobs := range opts.contribObservers {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs))
}
for format, injector := range opts.injectors {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector))
}
for format, extractor := range opts.extractors {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor))
}
if c.BaggageRestrictions != nil {
mgr := remote.NewRestrictionManager(
c.ServiceName,
remote.Options.Metrics(tracerMetrics),
remote.Options.Logger(opts.logger),
remote.Options.HostPort(c.BaggageRestrictions.HostPort),
remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval),
remote.Options.DenyBaggageOnInitializationFailure(
c.BaggageRestrictions.DenyBaggageOnInitializationFailure,
),
)
tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr))
}
if c.Throttler != nil {
debugThrottler := throttler.NewThrottler(
c.ServiceName,
throttler.Options.Metrics(tracerMetrics),
throttler.Options.Logger(opts.logger),
throttler.Options.HostPort(c.Throttler.HostPort),
throttler.Options.RefreshInterval(c.Throttler.RefreshInterval),
throttler.Options.SynchronousInitialization(
c.Throttler.SynchronousInitialization,
),
)
tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler))
}
tracer, closer := jaeger.NewTracer(
c.ServiceName,
sampler,
reporter,
tracerOptions...,
)
return tracer, closer, nil
}
// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer.
// It returns a closer func that can be used to flush buffers before shutdown.
func (c Configuration) InitGlobalTracer(
serviceName string,
options ...Option,
) (io.Closer, error) {
if c.Disabled {
return &nullCloser{}, nil
}
tracer, closer, err := c.New(serviceName, options...)
if err != nil {
return nil, err
}
opentracing.SetGlobalTracer(tracer)
return closer, nil
}
// NewSampler creates a new sampler based on the configuration
func (sc *SamplerConfig) NewSampler(
serviceName string,
metrics *jaeger.Metrics,
) (jaeger.Sampler, error) {
samplerType := strings.ToLower(sc.Type)
if samplerType == jaeger.SamplerTypeConst {
return jaeger.NewConstSampler(sc.Param != 0), nil
}
if samplerType == jaeger.SamplerTypeProbabilistic {
if sc.Param >= 0 && sc.Param <= 1.0 {
return jaeger.NewProbabilisticSampler(sc.Param)
}
return nil, fmt.Errorf(
"invalid Param for probabilistic sampler; expecting value between 0 and 1, received %v",
sc.Param,
)
}
if samplerType == jaeger.SamplerTypeRateLimiting {
return jaeger.NewRateLimitingSampler(sc.Param), nil
}
if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" {
sc2 := *sc
sc2.Type = jaeger.SamplerTypeProbabilistic
initSampler, err := sc2.NewSampler(serviceName, nil)
if err != nil {
return nil, err
}
options := []jaeger.SamplerOption{
jaeger.SamplerOptions.Metrics(metrics),
jaeger.SamplerOptions.InitialSampler(initSampler),
jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL),
jaeger.SamplerOptions.MaxOperations(sc.MaxOperations),
jaeger.SamplerOptions.OperationNameLateBinding(sc.OperationNameLateBinding),
jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval),
}
options = append(options, sc.Options...)
return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
}
return nil, fmt.Errorf("unknown sampler type (%s)", sc.Type)
}
// NewReporter instantiates a new reporter that submits spans to the collector
func (rc *ReporterConfig) NewReporter(
serviceName string,
metrics *jaeger.Metrics,
logger jaeger.Logger,
) (jaeger.Reporter, error) {
sender, err := rc.newTransport(logger)
if err != nil {
return nil, err
}
reporter := jaeger.NewRemoteReporter(
sender,
jaeger.ReporterOptions.QueueSize(rc.QueueSize),
jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval),
jaeger.ReporterOptions.Logger(logger),
jaeger.ReporterOptions.Metrics(metrics))
if rc.LogSpans && logger != nil {
logger.Infof("Initializing logging reporter")
reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
}
return reporter, err
}
func (rc *ReporterConfig) newTransport(logger jaeger.Logger) (jaeger.Transport, error) {
switch {
case rc.CollectorEndpoint != "":
httpOptions := []transport.HTTPOption{transport.HTTPHeaders(rc.HTTPHeaders)}
if rc.User != "" && rc.Password != "" {
httpOptions = append(httpOptions, transport.HTTPBasicAuth(rc.User, rc.Password))
}
return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil
default:
return jaeger.NewUDPTransportWithParams(jaeger.UDPTransportParams{
AgentClientUDPParams: utils.AgentClientUDPParams{
HostPort: rc.LocalAgentHostPort,
Logger: logger,
DisableAttemptReconnecting: rc.DisableAttemptReconnecting,
AttemptReconnectInterval: rc.AttemptReconnectInterval,
},
})
}
}

View File

@@ -0,0 +1,268 @@
// Copyright (c) 2018 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/uber/jaeger-client-go"
)
const (
// environment variable names
envServiceName = "JAEGER_SERVICE_NAME"
envDisabled = "JAEGER_DISABLED"
envRPCMetrics = "JAEGER_RPC_METRICS"
envTags = "JAEGER_TAGS"
envSamplerType = "JAEGER_SAMPLER_TYPE"
envSamplerParam = "JAEGER_SAMPLER_PARAM"
envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint
envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT"
envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS"
envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL"
envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS"
envReporterAttemptReconnectingDisabled = "JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED"
envReporterAttemptReconnectInterval = "JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL"
envEndpoint = "JAEGER_ENDPOINT"
envUser = "JAEGER_USER"
envPassword = "JAEGER_PASSWORD"
envAgentHost = "JAEGER_AGENT_HOST"
envAgentPort = "JAEGER_AGENT_PORT"
env128bit = "JAEGER_TRACEID_128BIT"
)
// FromEnv uses environment variables to set the tracer's Configuration
func FromEnv() (*Configuration, error) {
c := &Configuration{}
return c.FromEnv()
}
// FromEnv uses environment variables and overrides existing tracer's Configuration
func (c *Configuration) FromEnv() (*Configuration, error) {
if e := os.Getenv(envServiceName); e != "" {
c.ServiceName = e
}
if e := os.Getenv(envRPCMetrics); e != "" {
if value, err := strconv.ParseBool(e); err == nil {
c.RPCMetrics = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e)
}
}
if e := os.Getenv(envDisabled); e != "" {
if value, err := strconv.ParseBool(e); err == nil {
c.Disabled = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e)
}
}
if e := os.Getenv(envTags); e != "" {
c.Tags = parseTags(e)
}
if e := os.Getenv(env128bit); e != "" {
if value, err := strconv.ParseBool(e); err == nil {
c.Gen128Bit = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", env128bit, e)
}
}
if c.Sampler == nil {
c.Sampler = &SamplerConfig{}
}
if s, err := c.Sampler.samplerConfigFromEnv(); err == nil {
c.Sampler = s
} else {
return nil, errors.Wrap(err, "cannot obtain sampler config from env")
}
if c.Reporter == nil {
c.Reporter = &ReporterConfig{}
}
if r, err := c.Reporter.reporterConfigFromEnv(); err == nil {
c.Reporter = r
} else {
return nil, errors.Wrap(err, "cannot obtain reporter config from env")
}
return c, nil
}
// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) {
if e := os.Getenv(envSamplerType); e != "" {
sc.Type = e
}
if e := os.Getenv(envSamplerParam); e != "" {
if value, err := strconv.ParseFloat(e, 64); err == nil {
sc.Param = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e)
}
}
if e := os.Getenv(envSamplingEndpoint); e != "" {
sc.SamplingServerURL = e
} else if e := os.Getenv(envSamplerManagerHostPort); e != "" {
sc.SamplingServerURL = e
} else if e := os.Getenv(envAgentHost); e != "" {
// Fallback if we know the agent host - try the sampling endpoint there
sc.SamplingServerURL = fmt.Sprintf("http://%s:%d/sampling", e, jaeger.DefaultSamplingServerPort)
}
if e := os.Getenv(envSamplerMaxOperations); e != "" {
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
sc.MaxOperations = int(value)
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e)
}
}
if e := os.Getenv(envSamplerRefreshInterval); e != "" {
if value, err := time.ParseDuration(e); err == nil {
sc.SamplingRefreshInterval = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e)
}
}
return sc, nil
}
// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) {
if e := os.Getenv(envReporterMaxQueueSize); e != "" {
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
rc.QueueSize = int(value)
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e)
}
}
if e := os.Getenv(envReporterFlushInterval); e != "" {
if value, err := time.ParseDuration(e); err == nil {
rc.BufferFlushInterval = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e)
}
}
if e := os.Getenv(envReporterLogSpans); e != "" {
if value, err := strconv.ParseBool(e); err == nil {
rc.LogSpans = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e)
}
}
if e := os.Getenv(envEndpoint); e != "" {
u, err := url.ParseRequestURI(e)
if err != nil {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envEndpoint, e)
}
rc.CollectorEndpoint = u.String()
user := os.Getenv(envUser)
pswd := os.Getenv(envPassword)
if user != "" && pswd == "" || user == "" && pswd != "" {
return nil, errors.Errorf("you must set %s and %s env vars together", envUser, envPassword)
}
rc.User = user
rc.Password = pswd
} else {
useEnv := false
host := jaeger.DefaultUDPSpanServerHost
if e := os.Getenv(envAgentHost); e != "" {
host = e
useEnv = true
}
port := jaeger.DefaultUDPSpanServerPort
if e := os.Getenv(envAgentPort); e != "" {
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
port = int(value)
useEnv = true
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e)
}
}
if useEnv || rc.LocalAgentHostPort == "" {
rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
}
if e := os.Getenv(envReporterAttemptReconnectingDisabled); e != "" {
if value, err := strconv.ParseBool(e); err == nil {
rc.DisableAttemptReconnecting = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectingDisabled, e)
}
}
if !rc.DisableAttemptReconnecting {
if e := os.Getenv(envReporterAttemptReconnectInterval); e != "" {
if value, err := time.ParseDuration(e); err == nil {
rc.AttemptReconnectInterval = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectInterval, e)
}
}
}
}
return rc, nil
}
// parseTags parses the given string into a collection of Tags.
// Spec for this value:
// - comma separated list of key=value
// - value can be specified using the notation ${envVar:defaultValue}, where `envVar`
// is an environment variable and `defaultValue` is the value to use in case the env var is not set
func parseTags(sTags string) []opentracing.Tag {
pairs := strings.Split(sTags, ",")
tags := make([]opentracing.Tag, 0)
for _, p := range pairs {
kv := strings.SplitN(p, "=", 2)
k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1])
if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") {
ed := strings.SplitN(v[2:len(v)-1], ":", 2)
e, d := ed[0], ed[1]
v = os.Getenv(e)
if v == "" && d != "" {
v = d
}
}
tag := opentracing.Tag{Key: k, Value: v}
tags = append(tags, tag)
}
return tags
}

View File

@@ -0,0 +1,173 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
opentracing "github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-lib/metrics"
"github.com/uber/jaeger-client-go"
)
// Option is a function that sets some option on the client.
type Option func(c *Options)
// Options control behavior of the client.
type Options struct {
metrics metrics.Factory
logger jaeger.Logger
reporter jaeger.Reporter
sampler jaeger.Sampler
contribObservers []jaeger.ContribObserver
observers []jaeger.Observer
gen128Bit bool
poolSpans bool
zipkinSharedRPCSpan bool
maxTagValueLength int
noDebugFlagOnForcedSampling bool
tags []opentracing.Tag
injectors map[interface{}]jaeger.Injector
extractors map[interface{}]jaeger.Extractor
randomNumber func() uint64
}
// Metrics creates an Option that initializes Metrics in the tracer,
// which is used to emit statistics about spans.
func Metrics(factory metrics.Factory) Option {
return func(c *Options) {
c.metrics = factory
}
}
// Logger can be provided to log Reporter errors, as well as to log spans
// if Reporter.LogSpans is set to true.
func Logger(logger jaeger.Logger) Option {
return func(c *Options) {
c.logger = logger
}
}
// Reporter can be provided explicitly to override the configuration.
// Useful for testing, e.g. by passing InMemoryReporter.
func Reporter(reporter jaeger.Reporter) Option {
return func(c *Options) {
c.reporter = reporter
}
}
// Sampler can be provided explicitly to override the configuration.
func Sampler(sampler jaeger.Sampler) Option {
return func(c *Options) {
c.sampler = sampler
}
}
// Observer can be registered with the Tracer to receive notifications about new Spans.
func Observer(observer jaeger.Observer) Option {
return func(c *Options) {
c.observers = append(c.observers, observer)
}
}
// ContribObserver can be registered with the Tracer to receive notifications
// about new spans.
func ContribObserver(observer jaeger.ContribObserver) Option {
return func(c *Options) {
c.contribObservers = append(c.contribObservers, observer)
}
}
// Gen128Bit specifies whether to generate 128bit trace IDs.
func Gen128Bit(gen128Bit bool) Option {
return func(c *Options) {
c.gen128Bit = gen128Bit
}
}
// PoolSpans specifies whether to pool spans
func PoolSpans(poolSpans bool) Option {
return func(c *Options) {
c.poolSpans = poolSpans
}
}
// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client
// and server spans a la zipkin. If false, client and server spans will be assigned
// different IDs.
func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option {
return func(c *Options) {
c.zipkinSharedRPCSpan = zipkinSharedRPCSpan
}
}
// MaxTagValueLength can be provided to override the default max tag value length.
func MaxTagValueLength(maxTagValueLength int) Option {
return func(c *Options) {
c.maxTagValueLength = maxTagValueLength
}
}
// NoDebugFlagOnForcedSampling can be used to decide whether debug flag will be set or not
// when calling span.setSamplingPriority to force sample a span.
func NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) Option {
return func(c *Options) {
c.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
}
}
// Tag creates an option that adds a tracer-level tag.
func Tag(key string, value interface{}) Option {
return func(c *Options) {
c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value})
}
}
// Injector registers an Injector with the given format.
func Injector(format interface{}, injector jaeger.Injector) Option {
return func(c *Options) {
c.injectors[format] = injector
}
}
// Extractor registers an Extractor with the given format.
func Extractor(format interface{}, extractor jaeger.Extractor) Option {
return func(c *Options) {
c.extractors[format] = extractor
}
}
// WithRandomNumber supplies a random number generator function to the Tracer used to generate trace and span IDs.
func WithRandomNumber(f func() uint64) Option {
return func(c *Options) {
c.randomNumber = f
}
}
func applyOptions(options ...Option) Options {
opts := Options{
injectors: make(map[interface{}]jaeger.Injector),
extractors: make(map[interface{}]jaeger.Extractor),
}
for _, option := range options {
option(&opts)
}
if opts.metrics == nil {
opts.metrics = metrics.NullFactory
}
if opts.logger == nil {
opts.logger = jaeger.NullLogger
}
return opts
}

106
vendor/github.com/uber/jaeger-client-go/constants.go generated vendored Normal file
View File

@@ -0,0 +1,106 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"fmt"
"github.com/opentracing/opentracing-go"
)
const (
// JaegerClientVersion is the version of the client library reported as Span tag.
JaegerClientVersion = "Go-2.30.0"
// JaegerClientVersionTagKey is the name of the tag used to report client version.
JaegerClientVersionTagKey = "jaeger.version"
// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
// if found in the carrier, forces the trace to be sampled as "debug" trace.
// The value of the header is recorded as the tag on the root span, so that the
// trace can be found in the UI using this value as a correlation ID.
JaegerDebugHeader = "jaeger-debug-id"
// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
// a root span does not exist.
JaegerBaggageHeader = "jaeger-baggage"
// TracerHostnameTagKey used to report host name of the process.
TracerHostnameTagKey = "hostname"
// TracerIPTagKey used to report ip of the process.
TracerIPTagKey = "ip"
// TracerUUIDTagKey used to report UUID of the client process.
TracerUUIDTagKey = "client-uuid"
// SamplerTypeTagKey reports which sampler was used on the root span.
SamplerTypeTagKey = "sampler.type"
// SamplerParamTagKey reports the parameter of the sampler, like sampling probability.
SamplerParamTagKey = "sampler.param"
// TraceContextHeaderName is the http header name used to propagate tracing context.
// This must be in lower-case to avoid mismatches when decoding incoming headers.
TraceContextHeaderName = "uber-trace-id"
// TracerStateHeaderName is deprecated.
// Deprecated: use TraceContextHeaderName
TracerStateHeaderName = TraceContextHeaderName
// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
// This must be in lower-case to avoid mismatches when decoding incoming headers.
TraceBaggageHeaderPrefix = "uberctx-"
// SamplerTypeConst is the type of sampler that always makes the same decision.
SamplerTypeConst = "const"
// SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.
SamplerTypeRemote = "remote"
// SamplerTypeProbabilistic is the type of sampler that samples traces
// with a certain fixed probability.
SamplerTypeProbabilistic = "probabilistic"
// SamplerTypeRateLimiting is the type of sampler that samples
// only up to a fixed number of traces per second.
SamplerTypeRateLimiting = "ratelimiting"
// SamplerTypeLowerBound is the type of sampler that samples
// at least a fixed number of traces per second.
SamplerTypeLowerBound = "lowerbound"
// DefaultUDPSpanServerHost is the default host to send the spans to, via UDP
DefaultUDPSpanServerHost = "localhost"
// DefaultUDPSpanServerPort is the default port to send the spans to, via UDP
DefaultUDPSpanServerPort = 6831
// DefaultSamplingServerPort is the default port to fetch sampling config from, via http
DefaultSamplingServerPort = 5778
// DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value.
DefaultMaxTagValueLength = 256
// SelfRefType is a jaeger specific reference type that supports creating a span
// with an already defined context.
selfRefType opentracing.SpanReferenceType = 99
)
var (
// DefaultSamplingServerURL is the default url to fetch sampling config from, via http
DefaultSamplingServerURL = fmt.Sprintf("http://127.0.0.1:%d/sampling", DefaultSamplingServerPort)
)

View File

@@ -0,0 +1,56 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
opentracing "github.com/opentracing/opentracing-go"
)
// ContribObserver can be registered with the Tracer to receive notifications
// about new Spans. Modelled after github.com/opentracing-contrib/go-observer.
type ContribObserver interface {
// Create and return a span observer. Called when a span starts.
// If the Observer is not interested in the given span, it must return (nil, false).
// E.g :
// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
// var sp opentracing.Span
// sso := opentracing.StartSpanOptions{}
// if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok {
// // we have a valid SpanObserver
// }
// ...
// }
OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool)
}
// ContribSpanObserver is created by the Observer and receives notifications
// about other Span events. This interface is meant to match
// github.com/opentracing-contrib/go-observer, via duck typing, without
// directly importing the go-observer package.
type ContribSpanObserver interface {
OnSetOperationName(operationName string)
OnSetTag(key string, value interface{})
OnFinish(options opentracing.FinishOptions)
}
// wrapper observer for the old observers (see observer.go)
type oldObserver struct {
obs Observer
}
func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) {
spanObserver := o.obs.OnStartSpan(operationName, options)
return spanObserver, spanObserver != nil
}

22
vendor/github.com/uber/jaeger-client-go/doc.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
For integration instructions please refer to the README:
https://github.com/uber/jaeger-client-go/blob/master/README.md
*/
package jaeger

105
vendor/github.com/uber/jaeger-client-go/glide.lock generated vendored Normal file
View File

@@ -0,0 +1,105 @@
hash: 63bec420a22b7e5abac8c602c5cc9b66a33d6a1bfec8918eecc77fd344b759ed
updated: 2020-07-31T13:30:37.242608-04:00
imports:
- name: github.com/beorn7/perks
version: 3a771d992973f24aa725d07868b467d1ddfceafb
subpackages:
- quantile
- name: github.com/HdrHistogram/hdrhistogram-go
version: 3a0bb77429bd3a61596f5e8a3172445844342120
- name: github.com/crossdock/crossdock-go
version: 049aabb0122b03bc9bd30cab8f3f91fb60166361
subpackages:
- assert
- require
- name: github.com/davecgh/go-spew
version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73
subpackages:
- spew
- name: github.com/golang/mock
version: 51421b967af1f557f93a59e0057aaf15ca02e29c
subpackages:
- gomock
- name: github.com/golang/protobuf
version: b5d812f8a3706043e23a9cd5babf2e5423744d30
subpackages:
- proto
- name: github.com/matttproud/golang_protobuf_extensions
version: c182affec369e30f25d3eb8cd8a478dee585ae7d
subpackages:
- pbutil
- name: github.com/opentracing/opentracing-go
version: d34af3eaa63c4d08ab54863a4bdd0daa45212e12
subpackages:
- ext
- harness
- log
- name: github.com/pkg/errors
version: ba968bfe8b2f7e042a574c888954fccecfa385b4
- name: github.com/pmezard/go-difflib
version: 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
subpackages:
- difflib
- name: github.com/prometheus/client_golang
version: 170205fb58decfd011f1550d4cfb737230d7ae4f
subpackages:
- prometheus
- prometheus/internal
- name: github.com/prometheus/client_model
version: fd36f4220a901265f90734c3183c5f0c91daa0b8
subpackages:
- go
- name: github.com/prometheus/common
version: 1ab4d74fc89940cfbc3c2b3a89821336cdefa119
subpackages:
- expfmt
- internal/bitbucket.org/ww/goautoneg
- model
- name: github.com/prometheus/procfs
version: 8a055596020d692cf491851e47ba3e302d9f90ce
subpackages:
- internal/fs
- internal/util
- name: github.com/stretchr/testify
version: f654a9112bbeac49ca2cd45bfbe11533c4666cf8
subpackages:
- assert
- mock
- require
- suite
- name: github.com/uber-go/atomic
version: 845920076a298bdb984fb0f1b86052e4ca0a281c
- name: github.com/uber/jaeger-lib
version: 48cc1df63e6be0d63b95677f0d22beb880bce1e4
subpackages:
- metrics
- metrics/metricstest
- metrics/prometheus
- name: go.uber.org/atomic
version: 845920076a298bdb984fb0f1b86052e4ca0a281c
- name: go.uber.org/multierr
version: b587143a48b62b01d337824eab43700af6ffe222
- name: go.uber.org/zap
version: feeb9a050b31b40eec6f2470e7599eeeadfe5bdd
subpackages:
- buffer
- internal/bufferpool
- internal/color
- internal/exit
- zapcore
- zaptest/observer
- name: golang.org/x/net
version: addf6b3196f61cd44ce5a76657913698c73479d0
subpackages:
- context
- context/ctxhttp
- name: golang.org/x/sys
version: 3e129f6d46b10b0e1da36b3deffcb55e09631b64
subpackages:
- internal/unsafeheader
- windows
- name: gopkg.in/yaml.v3
version: eeeca48fe7764f320e4870d231902bf9c1be2c08
testImports:
- name: github.com/stretchr/objx
version: 35313a95ee26395aa17d366c71a2ccf788fa69b6

30
vendor/github.com/uber/jaeger-client-go/glide.yaml generated vendored Normal file
View File

@@ -0,0 +1,30 @@
package: github.com/uber/jaeger-client-go
import:
- package: github.com/opentracing/opentracing-go
version: ^1.2
subpackages:
- ext
- log
- package: github.com/crossdock/crossdock-go
- package: github.com/uber/jaeger-lib
version: ^2.3.0
subpackages:
- metrics
- package: github.com/pkg/errors
version: ~0.8.0
- package: go.uber.org/zap
source: https://github.com/uber-go/zap.git
version: ^1
- package: github.com/uber-go/atomic
version: ^1
- package: github.com/prometheus/client_golang
version: 1.1
- package: github.com/prometheus/procfs
version: 0.0.6
testImport:
- package: github.com/stretchr/testify
subpackages:
- assert
- require
- suite
- package: github.com/golang/mock

65
vendor/github.com/uber/jaeger-client-go/header.go generated vendored Normal file
View File

@@ -0,0 +1,65 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
// HeadersConfig contains the values for the header keys that Jaeger will use.
// These values may be either custom or default depending on whether custom
// values were provided via a configuration.
type HeadersConfig struct {
// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
// if found in the carrier, forces the trace to be sampled as "debug" trace.
// The value of the header is recorded as the tag on the root span, so that the
// trace can be found in the UI using this value as a correlation ID.
JaegerDebugHeader string `yaml:"jaegerDebugHeader"`
// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
// a root span does not exist.
JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"`
// TraceContextHeaderName is the http header name used to propagate tracing context.
// This must be in lower-case to avoid mismatches when decoding incoming headers.
TraceContextHeaderName string `yaml:"TraceContextHeaderName"`
// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
// This must be in lower-case to avoid mismatches when decoding incoming headers.
TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"`
}
// ApplyDefaults sets missing configuration keys to default values
func (c *HeadersConfig) ApplyDefaults() *HeadersConfig {
if c.JaegerBaggageHeader == "" {
c.JaegerBaggageHeader = JaegerBaggageHeader
}
if c.JaegerDebugHeader == "" {
c.JaegerDebugHeader = JaegerDebugHeader
}
if c.TraceBaggageHeaderPrefix == "" {
c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix
}
if c.TraceContextHeaderName == "" {
c.TraceContextHeaderName = TraceContextHeaderName
}
return c
}
func getDefaultHeadersConfig() *HeadersConfig {
return &HeadersConfig{
JaegerDebugHeader: JaegerDebugHeader,
JaegerBaggageHeader: JaegerBaggageHeader,
TraceContextHeaderName: TraceContextHeaderName,
TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix,
}
}

View File

@@ -0,0 +1,101 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"time"
"github.com/uber/jaeger-client-go"
)
const (
defaultMaxValueLength = 2048
defaultRefreshInterval = time.Minute
defaultHostPort = "localhost:5778"
)
// Option is a function that sets some option on the RestrictionManager
type Option func(options *options)
// Options is a factory for all available options
var Options options
type options struct {
denyBaggageOnInitializationFailure bool
metrics *jaeger.Metrics
logger jaeger.Logger
hostPort string
refreshInterval time.Duration
}
// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager.
// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage
// restrictions have been retrieved from agent.
// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage
// restrictions have been retrieved from agent.
func (options) DenyBaggageOnInitializationFailure(b bool) Option {
return func(o *options) {
o.denyBaggageOnInitializationFailure = b
}
}
// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics.
func (options) Metrics(m *jaeger.Metrics) Option {
return func(o *options) {
o.metrics = m
}
}
// Logger creates an Option that sets the logger used by the RestrictionManager.
func (options) Logger(logger jaeger.Logger) Option {
return func(o *options) {
o.logger = logger
}
}
// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions.
func (options) HostPort(hostPort string) Option {
return func(o *options) {
o.hostPort = hostPort
}
}
// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for
// the baggage restrictions.
func (options) RefreshInterval(refreshInterval time.Duration) Option {
return func(o *options) {
o.refreshInterval = refreshInterval
}
}
func applyOptions(o ...Option) options {
opts := options{}
for _, option := range o {
option(&opts)
}
if opts.metrics == nil {
opts.metrics = jaeger.NewNullMetrics()
}
if opts.logger == nil {
opts.logger = jaeger.NullLogger
}
if opts.hostPort == "" {
opts.hostPort = defaultHostPort
}
if opts.refreshInterval == 0 {
opts.refreshInterval = defaultRefreshInterval
}
return opts
}

View File

@@ -0,0 +1,158 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"context"
"fmt"
"net/url"
"sync"
"time"
"github.com/uber/jaeger-client-go/internal/baggage"
thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage"
"github.com/uber/jaeger-client-go/utils"
)
type httpBaggageRestrictionManagerProxy struct {
url string
}
func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy {
v := url.Values{}
v.Set("service", serviceName)
return &httpBaggageRestrictionManagerProxy{
url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()),
}
}
func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(context.Context, string) ([]*thrift.BaggageRestriction, error) {
var out []*thrift.BaggageRestriction
if err := utils.GetJSON(s.url, &out); err != nil {
return nil, err
}
return out, nil
}
// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent
type RestrictionManager struct {
options
mux sync.RWMutex
serviceName string
restrictions map[string]*baggage.Restriction
thriftProxy thrift.BaggageRestrictionManager
pollStopped sync.WaitGroup
stopPoll chan struct{}
invalidRestriction *baggage.Restriction
validRestriction *baggage.Restriction
// Determines if the manager has successfully retrieved baggage restrictions from agent
initialized bool
}
// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest
// baggage restrictions.
func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager {
// TODO there is a developing use case where a single tracer can generate traces on behalf of many services.
// restrictionsMap will need to exist per service
opts := applyOptions(options...)
m := &RestrictionManager{
serviceName: serviceName,
options: opts,
restrictions: make(map[string]*baggage.Restriction),
thriftProxy: newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName),
stopPoll: make(chan struct{}),
invalidRestriction: baggage.NewRestriction(false, 0),
validRestriction: baggage.NewRestriction(true, defaultMaxValueLength),
}
m.pollStopped.Add(1)
go m.pollManager()
return m
}
// isReady returns true if the manager has retrieved baggage restrictions from the remote source.
func (m *RestrictionManager) isReady() bool {
m.mux.RLock()
defer m.mux.RUnlock()
return m.initialized
}
// GetRestriction implements RestrictionManager#GetRestriction.
func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction {
m.mux.RLock()
defer m.mux.RUnlock()
if !m.initialized {
if m.denyBaggageOnInitializationFailure {
return m.invalidRestriction
}
return m.validRestriction
}
if restriction, ok := m.restrictions[key]; ok {
return restriction
}
return m.invalidRestriction
}
// Close stops remote polling and closes the RemoteRestrictionManager.
func (m *RestrictionManager) Close() error {
close(m.stopPoll)
m.pollStopped.Wait()
return nil
}
func (m *RestrictionManager) pollManager() {
defer m.pollStopped.Done()
// attempt to initialize baggage restrictions
if err := m.updateRestrictions(); err != nil {
m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error()))
}
ticker := time.NewTicker(m.refreshInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := m.updateRestrictions(); err != nil {
m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error()))
}
case <-m.stopPoll:
return
}
}
}
func (m *RestrictionManager) updateRestrictions() error {
restrictions, err := m.thriftProxy.GetBaggageRestrictions(context.Background(), m.serviceName)
if err != nil {
m.metrics.BaggageRestrictionsUpdateFailure.Inc(1)
return err
}
newRestrictions := m.parseRestrictions(restrictions)
m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1)
m.mux.Lock()
defer m.mux.Unlock()
m.initialized = true
m.restrictions = newRestrictions
return nil
}
func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction {
setters := make(map[string]*baggage.Restriction, len(restrictions))
for _, restriction := range restrictions {
setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength))
}
return setters
}

View File

@@ -0,0 +1,71 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage
const (
defaultMaxValueLength = 2048
)
// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value.
type Restriction struct {
keyAllowed bool
maxValueLength int
}
// NewRestriction returns a new Restriction.
func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction {
return &Restriction{
keyAllowed: keyAllowed,
maxValueLength: maxValueLength,
}
}
// KeyAllowed returns whether the baggage key for this restriction is allowed.
func (r *Restriction) KeyAllowed() bool {
return r.keyAllowed
}
// MaxValueLength returns the max length for the baggage value.
func (r *Restriction) MaxValueLength() int {
return r.maxValueLength
}
// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager
// will return a Restriction for a specific baggage key which will determine whether the baggage
// key is allowed for the current service and any other applicable restrictions on the baggage
// value.
type RestrictionManager interface {
GetRestriction(service, key string) *Restriction
}
// DefaultRestrictionManager allows any baggage key.
type DefaultRestrictionManager struct {
defaultRestriction *Restriction
}
// NewDefaultRestrictionManager returns a DefaultRestrictionManager.
func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {
if maxValueLength == 0 {
maxValueLength = defaultMaxValueLength
}
return &DefaultRestrictionManager{
defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},
}
}
// GetRestriction implements RestrictionManager#GetRestriction.
func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction {
return m.defaultRestriction
}

View File

@@ -0,0 +1,25 @@
// Copyright (c) 2020 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package reporterstats
// ReporterStats exposes some metrics from the RemoteReporter.
type ReporterStats interface {
SpansDroppedFromQueue() int64
}
// Receiver can be implemented by a Transport to be given ReporterStats.
type Receiver interface {
SetReporterStats(ReporterStats)
}

View File

@@ -0,0 +1,81 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package spanlog
import (
"encoding/json"
"fmt"
"github.com/opentracing/opentracing-go/log"
)
type fieldsAsMap map[string]string
// MaterializeWithJSON converts log Fields into JSON string
// TODO refactor into pluggable materializer
func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
fields := fieldsAsMap(make(map[string]string, len(logFields)))
for _, field := range logFields {
field.Marshal(fields)
}
if event, ok := fields["event"]; ok && len(fields) == 1 {
return []byte(event), nil
}
return json.Marshal(fields)
}
func (ml fieldsAsMap) EmitString(key, value string) {
ml[key] = value
}
func (ml fieldsAsMap) EmitBool(key string, value bool) {
ml[key] = fmt.Sprintf("%t", value)
}
func (ml fieldsAsMap) EmitInt(key string, value int) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitInt32(key string, value int32) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitInt64(key string, value int64) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
ml[key] = fmt.Sprintf("%f", value)
}
func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
ml[key] = fmt.Sprintf("%f", value)
}
func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
ml[key] = fmt.Sprintf("%+v", value)
}
func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
value(ml)
}

View File

@@ -0,0 +1,99 @@
// Copyright (c) 2018 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"time"
"github.com/uber/jaeger-client-go"
)
const (
defaultHostPort = "localhost:5778"
defaultRefreshInterval = time.Second * 5
)
// Option is a function that sets some option on the Throttler
type Option func(options *options)
// Options is a factory for all available options
var Options options
type options struct {
metrics *jaeger.Metrics
logger jaeger.Logger
hostPort string
refreshInterval time.Duration
synchronousInitialization bool
}
// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics.
func (options) Metrics(m *jaeger.Metrics) Option {
return func(o *options) {
o.metrics = m
}
}
// Logger creates an Option that sets the logger used by the Throttler.
func (options) Logger(logger jaeger.Logger) Option {
return func(o *options) {
o.logger = logger
}
}
// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits.
func (options) HostPort(hostPort string) Option {
return func(o *options) {
o.hostPort = hostPort
}
}
// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for
// credits.
func (options) RefreshInterval(refreshInterval time.Duration) Option {
return func(o *options) {
o.refreshInterval = refreshInterval
}
}
// SynchronousInitialization creates an Option that determines whether the throttler should synchronously
// fetch credits from the agent when an operation is seen for the first time. This should be set to true
// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront
// such that sampling or throttling occurs.
func (options) SynchronousInitialization(b bool) Option {
return func(o *options) {
o.synchronousInitialization = b
}
}
func applyOptions(o ...Option) options {
opts := options{}
for _, option := range o {
option(&opts)
}
if opts.metrics == nil {
opts.metrics = jaeger.NewNullMetrics()
}
if opts.logger == nil {
opts.logger = jaeger.NullLogger
}
if opts.hostPort == "" {
opts.hostPort = defaultHostPort
}
if opts.refreshInterval == 0 {
opts.refreshInterval = defaultRefreshInterval
}
return opts
}

View File

@@ -0,0 +1,216 @@
// Copyright (c) 2018 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"fmt"
"net/url"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/uber/jaeger-client-go"
"github.com/uber/jaeger-client-go/utils"
)
const (
// minimumCredits is the minimum amount of credits necessary to not be throttled.
// i.e. if currentCredits > minimumCredits, then the operation will not be throttled.
minimumCredits = 1.0
)
var (
errorUUIDNotSet = errors.New("Throttler UUID must be set")
)
type operationBalance struct {
Operation string `json:"operation"`
Balance float64 `json:"balance"`
}
type creditResponse struct {
Balances []operationBalance `json:"balances"`
}
type httpCreditManagerProxy struct {
hostPort string
}
func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy {
return &httpCreditManagerProxy{
hostPort: hostPort,
}
}
// N.B. Operations list must not be empty.
func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) {
params := url.Values{}
params.Set("service", serviceName)
params.Set("uuid", uuid)
for _, op := range operations {
params.Add("operations", op)
}
var resp creditResponse
if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil {
return nil, errors.Wrap(err, "Failed to receive credits from agent")
}
return &resp, nil
}
// Throttler retrieves credits from agent and uses it to throttle operations.
type Throttler struct {
options
mux sync.RWMutex
service string
uuid atomic.Value
creditManager *httpCreditManagerProxy
credits map[string]float64 // map of operation->credits
close chan struct{}
stopped sync.WaitGroup
}
// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle
// the service.
func NewThrottler(service string, options ...Option) *Throttler {
opts := applyOptions(options...)
creditManager := newHTTPCreditManagerProxy(opts.hostPort)
t := &Throttler{
options: opts,
creditManager: creditManager,
service: service,
credits: make(map[string]float64),
close: make(chan struct{}),
}
t.stopped.Add(1)
go t.pollManager()
return t
}
// IsAllowed implements Throttler#IsAllowed.
func (t *Throttler) IsAllowed(operation string) bool {
t.mux.Lock()
defer t.mux.Unlock()
value, ok := t.credits[operation]
if !ok || value == 0 {
if !ok {
// NOTE: This appears to be a no-op at first glance, but it stores
// the operation key in the map. Necessary for functionality of
// Throttler#operations method.
t.credits[operation] = 0
}
if !t.synchronousInitialization {
t.metrics.ThrottledDebugSpans.Inc(1)
return false
}
// If it is the first time this operation is being checked, synchronously fetch
// the credits.
credits, err := t.fetchCredits([]string{operation})
if err != nil {
// Failed to receive credits from agent, try again next time
t.logger.Error("Failed to fetch credits: " + err.Error())
return false
}
if len(credits.Balances) == 0 {
// This shouldn't happen but just in case
return false
}
for _, opBalance := range credits.Balances {
t.credits[opBalance.Operation] += opBalance.Balance
}
}
return t.isAllowed(operation)
}
// Close stops the throttler from fetching credits from remote.
func (t *Throttler) Close() error {
close(t.close)
t.stopped.Wait()
return nil
}
// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote
// requests are made.
func (t *Throttler) SetProcess(process jaeger.Process) {
if process.UUID != "" {
t.uuid.Store(process.UUID)
}
}
// N.B. This function must be called with the Write Lock
func (t *Throttler) isAllowed(operation string) bool {
credits := t.credits[operation]
if credits < minimumCredits {
t.metrics.ThrottledDebugSpans.Inc(1)
return false
}
t.credits[operation] = credits - minimumCredits
return true
}
func (t *Throttler) pollManager() {
defer t.stopped.Done()
ticker := time.NewTicker(t.refreshInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
t.refreshCredits()
case <-t.close:
return
}
}
}
func (t *Throttler) operations() []string {
t.mux.RLock()
defer t.mux.RUnlock()
operations := make([]string, 0, len(t.credits))
for op := range t.credits {
operations = append(operations, op)
}
return operations
}
func (t *Throttler) refreshCredits() {
operations := t.operations()
if len(operations) == 0 {
return
}
newCredits, err := t.fetchCredits(operations)
if err != nil {
t.metrics.ThrottlerUpdateFailure.Inc(1)
t.logger.Error("Failed to fetch credits: " + err.Error())
return
}
t.metrics.ThrottlerUpdateSuccess.Inc(1)
t.mux.Lock()
defer t.mux.Unlock()
for _, opBalance := range newCredits.Balances {
t.credits[opBalance.Operation] += opBalance.Balance
}
}
func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) {
uuid := t.uuid.Load()
uuidStr, _ := uuid.(string)
if uuid == nil || uuidStr == "" {
return nil, errorUUIDNotSet
}
return t.creditManager.FetchCredits(uuidStr, t.service, operations)
}

View File

@@ -0,0 +1,32 @@
// Copyright (c) 2018 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package throttler
// Throttler is used to rate limits operations. For example, given how debug spans
// are always sampled, a throttler can be enabled per client to rate limit the amount
// of debug spans a client can start.
type Throttler interface {
// IsAllowed determines whether the operation should be allowed and not be
// throttled.
IsAllowed(operation string) bool
}
// DefaultThrottler doesn't throttle at all.
type DefaultThrottler struct{}
// IsAllowed implements Throttler#IsAllowed.
func (t DefaultThrottler) IsAllowed(operation string) bool {
return true
}

55
vendor/github.com/uber/jaeger-client-go/interop.go generated vendored Normal file
View File

@@ -0,0 +1,55 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"github.com/opentracing/opentracing-go"
)
// TODO this file should not be needed after TChannel PR.
type formatKey int
// SpanContextFormat is a constant used as OpenTracing Format.
// Requires *SpanContext as carrier.
// This format is intended for interop with TChannel or other Zipkin-like tracers.
const SpanContextFormat formatKey = iota
type jaegerTraceContextPropagator struct {
tracer *Tracer
}
func (p *jaegerTraceContextPropagator) Inject(
ctx SpanContext,
abstractCarrier interface{},
) error {
carrier, ok := abstractCarrier.(*SpanContext)
if !ok {
return opentracing.ErrInvalidCarrier
}
carrier.CopyFrom(&ctx)
return nil
}
func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
carrier, ok := abstractCarrier.(*SpanContext)
if !ok {
return emptyContext, opentracing.ErrInvalidCarrier
}
ctx := new(SpanContext)
ctx.CopyFrom(carrier)
return *ctx, nil
}

84
vendor/github.com/uber/jaeger-client-go/jaeger_tag.go generated vendored Normal file
View File

@@ -0,0 +1,84 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"fmt"
"github.com/opentracing/opentracing-go/log"
j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
)
type tags []*j.Tag
// ConvertLogsToJaegerTags converts log Fields into jaeger tags.
func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag {
fields := tags(make([]*j.Tag, 0, len(logFields)))
for _, field := range logFields {
field.Marshal(&fields)
}
return fields
}
func (t *tags) EmitString(key, value string) {
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value})
}
func (t *tags) EmitBool(key string, value bool) {
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value})
}
func (t *tags) EmitInt(key string, value int) {
vLong := int64(value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
}
func (t *tags) EmitInt32(key string, value int32) {
vLong := int64(value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
}
func (t *tags) EmitInt64(key string, value int64) {
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value})
}
func (t *tags) EmitUint32(key string, value uint32) {
vLong := int64(value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
}
func (t *tags) EmitUint64(key string, value uint64) {
vLong := int64(value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
}
func (t *tags) EmitFloat32(key string, value float32) {
vDouble := float64(value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble})
}
func (t *tags) EmitFloat64(key string, value float64) {
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value})
}
func (t *tags) EmitObject(key string, value interface{}) {
vStr := fmt.Sprintf("%+v", value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr})
}
func (t *tags) EmitLazyLogger(value log.LazyLogger) {
value(t)
}

View File

@@ -0,0 +1,181 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"time"
"github.com/opentracing/opentracing-go"
j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
"github.com/uber/jaeger-client-go/utils"
)
// BuildJaegerThrift builds jaeger span based on internal span.
// TODO: (breaking change) move to internal package.
func BuildJaegerThrift(span *Span) *j.Span {
span.Lock()
defer span.Unlock()
startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
duration := span.duration.Nanoseconds() / int64(time.Microsecond)
jaegerSpan := &j.Span{
TraceIdLow: int64(span.context.traceID.Low),
TraceIdHigh: int64(span.context.traceID.High),
SpanId: int64(span.context.spanID),
ParentSpanId: int64(span.context.parentID),
OperationName: span.operationName,
Flags: int32(span.context.samplingState.flags()),
StartTime: startTime,
Duration: duration,
Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength),
Logs: buildLogs(span.logs),
References: buildReferences(span.references),
}
return jaegerSpan
}
// BuildJaegerProcessThrift creates a thrift Process type.
// TODO: (breaking change) move to internal package.
func BuildJaegerProcessThrift(span *Span) *j.Process {
span.Lock()
defer span.Unlock()
return buildJaegerProcessThrift(span.tracer)
}
func buildJaegerProcessThrift(tracer *Tracer) *j.Process {
process := &j.Process{
ServiceName: tracer.serviceName,
Tags: buildTags(tracer.tags, tracer.options.maxTagValueLength),
}
if tracer.process.UUID != "" {
process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING})
}
return process
}
func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag {
jTags := make([]*j.Tag, 0, len(tags))
for _, tag := range tags {
jTag := buildTag(&tag, maxTagValueLength)
jTags = append(jTags, jTag)
}
return jTags
}
func buildLogs(logs []opentracing.LogRecord) []*j.Log {
jLogs := make([]*j.Log, 0, len(logs))
for _, log := range logs {
jLog := &j.Log{
Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
Fields: ConvertLogsToJaegerTags(log.Fields),
}
jLogs = append(jLogs, jLog)
}
return jLogs
}
func buildTag(tag *Tag, maxTagValueLength int) *j.Tag {
jTag := &j.Tag{Key: tag.key}
switch value := tag.value.(type) {
case string:
vStr := truncateString(value, maxTagValueLength)
jTag.VStr = &vStr
jTag.VType = j.TagType_STRING
case []byte:
if len(value) > maxTagValueLength {
value = value[:maxTagValueLength]
}
jTag.VBinary = value
jTag.VType = j.TagType_BINARY
case int:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case uint:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case int8:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case uint8:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case int16:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case uint16:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case int32:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case uint32:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case int64:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case uint64:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case float32:
vDouble := float64(value)
jTag.VDouble = &vDouble
jTag.VType = j.TagType_DOUBLE
case float64:
vDouble := float64(value)
jTag.VDouble = &vDouble
jTag.VType = j.TagType_DOUBLE
case bool:
vBool := value
jTag.VBool = &vBool
jTag.VType = j.TagType_BOOL
default:
vStr := truncateString(stringify(value), maxTagValueLength)
jTag.VStr = &vStr
jTag.VType = j.TagType_STRING
}
return jTag
}
func buildReferences(references []Reference) []*j.SpanRef {
retMe := make([]*j.SpanRef, 0, len(references))
for _, ref := range references {
if ref.Type == opentracing.ChildOfRef {
retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF))
} else if ref.Type == opentracing.FollowsFromRef {
retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM))
}
}
return retMe
}
func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef {
return &j.SpanRef{
RefType: refType,
TraceIdLow: int64(ctx.traceID.Low),
TraceIdHigh: int64(ctx.traceID.High),
SpanId: int64(ctx.spanID),
}
}

141
vendor/github.com/uber/jaeger-client-go/log/logger.go generated vendored Normal file
View File

@@ -0,0 +1,141 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"bytes"
"fmt"
"log"
"sync"
)
// Logger provides an abstract interface for logging from Reporters.
// Applications can provide their own implementation of this interface to adapt
// reporters logging to whatever logging library they prefer (stdlib log,
// logrus, go-logging, etc).
type Logger interface {
// Error logs a message at error priority
Error(msg string)
// Infof logs a message at info priority
Infof(msg string, args ...interface{})
}
// StdLogger is implementation of the Logger interface that delegates to default `log` package
var StdLogger = &stdLogger{}
type stdLogger struct{}
func (l *stdLogger) Error(msg string) {
log.Printf("ERROR: %s", msg)
}
// Infof logs a message at info priority
func (l *stdLogger) Infof(msg string, args ...interface{}) {
log.Printf(msg, args...)
}
// Debugf logs a message at debug priority
func (l *stdLogger) Debugf(msg string, args ...interface{}) {
log.Printf(fmt.Sprintf("DEBUG: %s", msg), args...)
}
// NullLogger is implementation of the Logger interface that is no-op
var NullLogger = &nullLogger{}
type nullLogger struct{}
func (l *nullLogger) Error(msg string) {}
func (l *nullLogger) Infof(msg string, args ...interface{}) {}
func (l *nullLogger) Debugf(msg string, args ...interface{}) {}
// BytesBufferLogger implements Logger backed by a bytes.Buffer.
type BytesBufferLogger struct {
mux sync.Mutex
buf bytes.Buffer
}
// Error implements Logger.
func (l *BytesBufferLogger) Error(msg string) {
l.mux.Lock()
l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg))
l.mux.Unlock()
}
// Infof implements Logger.
func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) {
l.mux.Lock()
l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n")
l.mux.Unlock()
}
// Debugf implements Logger.
func (l *BytesBufferLogger) Debugf(msg string, args ...interface{}) {
l.mux.Lock()
l.buf.WriteString("DEBUG: " + fmt.Sprintf(msg, args...) + "\n")
l.mux.Unlock()
}
// String returns string representation of the underlying buffer.
func (l *BytesBufferLogger) String() string {
l.mux.Lock()
defer l.mux.Unlock()
return l.buf.String()
}
// Flush empties the underlying buffer.
func (l *BytesBufferLogger) Flush() {
l.mux.Lock()
defer l.mux.Unlock()
l.buf.Reset()
}
// DebugLogger is an interface which adds a debug logging level
type DebugLogger interface {
Logger
// Debugf logs a message at debug priority
Debugf(msg string, args ...interface{})
}
// DebugLogAdapter is a log adapter that converts a Logger into a DebugLogger
// If the provided Logger doesn't satisfy the interface, a logger with debug
// disabled is returned
func DebugLogAdapter(logger Logger) DebugLogger {
if logger == nil {
return nil
}
if debugLogger, ok := logger.(DebugLogger); ok {
return debugLogger
}
logger.Infof("debug logging disabled")
return debugDisabledLogAdapter{logger: logger}
}
type debugDisabledLogAdapter struct {
logger Logger
}
func (d debugDisabledLogAdapter) Error(msg string) {
d.logger.Error(msg)
}
func (d debugDisabledLogAdapter) Infof(msg string, args ...interface{}) {
d.logger.Infof(msg, args...)
}
// Debugf is a nop
func (d debugDisabledLogAdapter) Debugf(msg string, args ...interface{}) {
}

53
vendor/github.com/uber/jaeger-client-go/logger.go generated vendored Normal file
View File

@@ -0,0 +1,53 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import "log"
// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead.
// Logger provides an abstract interface for logging from Reporters.
// Applications can provide their own implementation of this interface to adapt
// reporters logging to whatever logging library they prefer (stdlib log,
// logrus, go-logging, etc).
type Logger interface {
// Error logs a message at error priority
Error(msg string)
// Infof logs a message at info priority
Infof(msg string, args ...interface{})
}
// StdLogger is implementation of the Logger interface that delegates to default `log` package
var StdLogger = &stdLogger{}
type stdLogger struct{}
func (l *stdLogger) Error(msg string) {
log.Printf("ERROR: %s", msg)
}
// Infof logs a message at info priority
func (l *stdLogger) Infof(msg string, args ...interface{}) {
log.Printf(msg, args...)
}
// NullLogger is implementation of the Logger interface that delegates to default `log` package
var NullLogger = &nullLogger{}
type nullLogger struct{}
func (l *nullLogger) Error(msg string) {}
func (l *nullLogger) Infof(msg string, args ...interface{}) {}

119
vendor/github.com/uber/jaeger-client-go/metrics.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
// Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"github.com/uber/jaeger-lib/metrics"
)
// Metrics is a container of all stats emitted by Jaeger tracer.
type Metrics struct {
// Number of traces started by this tracer as sampled
TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y" help:"Number of traces started by this tracer as sampled"`
// Number of traces started by this tracer as not sampled
TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"`
// Number of traces started by this tracer with delayed sampling
TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"`
// Number of externally started sampled traces this tracer joined
TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"`
// Number of externally started not-sampled traces this tracer joined
TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"`
// Number of sampled spans started by this tracer
SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"`
// Number of not sampled spans started by this tracer
SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"`
// Number of spans with delayed sampling started by this tracer
SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"`
// Number of spans finished by this tracer
SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"`
// Number of spans finished by this tracer
SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"`
// Number of spans finished by this tracer
SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"`
// Number of errors decoding tracing context
DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"`
// Number of spans successfully reported
ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok" help:"Number of spans successfully reported"`
// Number of spans not reported due to a Sender failure
ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err" help:"Number of spans not reported due to a Sender failure"`
// Number of spans dropped due to internal queue overflow
ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped" help:"Number of spans dropped due to internal queue overflow"`
// Current number of spans in the reporter queue
ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length" help:"Current number of spans in the reporter queue"`
// Number of times the Sampler succeeded to retrieve sampling strategy
SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve sampling strategy"`
// Number of times the Sampler failed to retrieve sampling strategy
SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err" help:"Number of times the Sampler failed to retrieve sampling strategy"`
// Number of times the Sampler succeeded to retrieve and update sampling strategy
SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve and update sampling strategy"`
// Number of times the Sampler failed to update sampling strategy
SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err" help:"Number of times the Sampler failed to update sampling strategy"`
// Number of times baggage was successfully written or updated on spans.
BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok" help:"Number of times baggage was successfully written or updated on spans"`
// Number of times baggage failed to write or update on spans.
BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err" help:"Number of times baggage failed to write or update on spans"`
// Number of times baggage was truncated as per baggage restrictions.
BaggageTruncate metrics.Counter `metric:"baggage_truncations" help:"Number of times baggage was truncated as per baggage restrictions"`
// Number of times baggage restrictions were successfully updated.
BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok" help:"Number of times baggage restrictions were successfully updated"`
// Number of times baggage restrictions failed to update.
BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err" help:"Number of times baggage restrictions failed to update"`
// Number of times debug spans were throttled.
ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans" help:"Number of times debug spans were throttled"`
// Number of times throttler successfully updated.
ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok" help:"Number of times throttler successfully updated"`
// Number of times throttler failed to update.
ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err" help:"Number of times throttler failed to update"`
}
// NewMetrics creates a new Metrics struct and initializes it.
func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics {
m := &Metrics{}
// TODO the namespace "jaeger" should be configurable
metrics.MustInit(m, factory.Namespace(metrics.NSOptions{Name: "jaeger"}).Namespace(metrics.NSOptions{Name: "tracer"}), globalTags)
return m
}
// NewNullMetrics creates a new Metrics struct that won't report any metrics.
func NewNullMetrics() *Metrics {
return NewMetrics(metrics.NullFactory, nil)
}

88
vendor/github.com/uber/jaeger-client-go/observer.go generated vendored Normal file
View File

@@ -0,0 +1,88 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import opentracing "github.com/opentracing/opentracing-go"
// Observer can be registered with the Tracer to receive notifications about
// new Spans.
//
// Deprecated: use jaeger.ContribObserver instead.
type Observer interface {
OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver
}
// SpanObserver is created by the Observer and receives notifications about
// other Span events.
//
// Deprecated: use jaeger.ContribSpanObserver instead.
type SpanObserver interface {
OnSetOperationName(operationName string)
OnSetTag(key string, value interface{})
OnFinish(options opentracing.FinishOptions)
}
// compositeObserver is a dispatcher to other observers
type compositeObserver struct {
observers []ContribObserver
}
// compositeSpanObserver is a dispatcher to other span observers
type compositeSpanObserver struct {
observers []ContribSpanObserver
}
// noopSpanObserver is used when there are no observers registered
// on the Tracer or none of them returns span observers from OnStartSpan.
var noopSpanObserver = &compositeSpanObserver{}
func (o *compositeObserver) append(contribObserver ContribObserver) {
o.observers = append(o.observers, contribObserver)
}
func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver {
var spanObservers []ContribSpanObserver
for _, obs := range o.observers {
spanObs, ok := obs.OnStartSpan(sp, operationName, options)
if ok {
if spanObservers == nil {
spanObservers = make([]ContribSpanObserver, 0, len(o.observers))
}
spanObservers = append(spanObservers, spanObs)
}
}
if len(spanObservers) == 0 {
return noopSpanObserver
}
return &compositeSpanObserver{observers: spanObservers}
}
func (o *compositeSpanObserver) OnSetOperationName(operationName string) {
for _, obs := range o.observers {
obs.OnSetOperationName(operationName)
}
}
func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) {
for _, obs := range o.observers {
obs.OnSetTag(key, value)
}
}
func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) {
for _, obs := range o.observers {
obs.OnFinish(options)
}
}

29
vendor/github.com/uber/jaeger-client-go/process.go generated vendored Normal file
View File

@@ -0,0 +1,29 @@
// Copyright (c) 2018 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
// Process holds process specific metadata that's relevant to this client.
type Process struct {
Service string
UUID string
Tags []Tag
}
// ProcessSetter sets a process. This can be used by any class that requires
// the process to be set as part of initialization.
// See internal/throttler/remote/throttler.go for an example.
type ProcessSetter interface {
SetProcess(process Process)
}

325
vendor/github.com/uber/jaeger-client-go/propagation.go generated vendored Normal file
View File

@@ -0,0 +1,325 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"log"
"net/url"
"strings"
"sync"
opentracing "github.com/opentracing/opentracing-go"
)
// Injector is responsible for injecting SpanContext instances in a manner suitable
// for propagation via a format-specific "carrier" object. Typically the
// injection will take place across an RPC boundary, but message queues and
// other IPC mechanisms are also reasonable places to use an Injector.
type Injector interface {
// Inject takes `SpanContext` and injects it into `carrier`. The actual type
// of `carrier` depends on the `format` passed to `Tracer.Inject()`.
//
// Implementations may return opentracing.ErrInvalidCarrier or any other
// implementation-specific error if injection fails.
Inject(ctx SpanContext, carrier interface{}) error
}
// Extractor is responsible for extracting SpanContext instances from a
// format-specific "carrier" object. Typically the extraction will take place
// on the server side of an RPC boundary, but message queues and other IPC
// mechanisms are also reasonable places to use an Extractor.
type Extractor interface {
// Extract decodes a SpanContext instance from the given `carrier`,
// or (nil, opentracing.ErrSpanContextNotFound) if no context could
// be found in the `carrier`.
Extract(carrier interface{}) (SpanContext, error)
}
// TextMapPropagator is a combined Injector and Extractor for TextMap format
type TextMapPropagator struct {
headerKeys *HeadersConfig
metrics Metrics
encodeValue func(string) string
decodeValue func(string) string
}
// NewTextMapPropagator creates a combined Injector and Extractor for TextMap format
func NewTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
return &TextMapPropagator{
headerKeys: headerKeys,
metrics: metrics,
encodeValue: func(val string) string {
return val
},
decodeValue: func(val string) string {
return val
},
}
}
// NewHTTPHeaderPropagator creates a combined Injector and Extractor for HTTPHeaders format
func NewHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
return &TextMapPropagator{
headerKeys: headerKeys,
metrics: metrics,
encodeValue: func(val string) string {
return url.QueryEscape(val)
},
decodeValue: func(val string) string {
// ignore decoding errors, cannot do anything about them
if v, err := url.QueryUnescape(val); err == nil {
return v
}
return val
},
}
}
// BinaryPropagator is a combined Injector and Extractor for Binary format
type BinaryPropagator struct {
tracer *Tracer
buffers sync.Pool
}
// NewBinaryPropagator creates a combined Injector and Extractor for Binary format
func NewBinaryPropagator(tracer *Tracer) *BinaryPropagator {
return &BinaryPropagator{
tracer: tracer,
buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
}
}
// Inject implements Injector of TextMapPropagator
func (p *TextMapPropagator) Inject(
sc SpanContext,
abstractCarrier interface{},
) error {
textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter)
if !ok {
return opentracing.ErrInvalidCarrier
}
// Do not encode the string with trace context to avoid accidental double-encoding
// if people are using opentracing < 0.10.0. Our colon-separated representation
// of the trace context is already safe for HTTP headers.
textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String())
for k, v := range sc.baggage {
safeKey := p.addBaggageKeyPrefix(k)
safeVal := p.encodeValue(v)
textMapWriter.Set(safeKey, safeVal)
}
return nil
}
// Extract implements Extractor of TextMapPropagator
func (p *TextMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
textMapReader, ok := abstractCarrier.(opentracing.TextMapReader)
if !ok {
return emptyContext, opentracing.ErrInvalidCarrier
}
var ctx SpanContext
var baggage map[string]string
err := textMapReader.ForeachKey(func(rawKey, value string) error {
key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap
if key == p.headerKeys.TraceContextHeaderName {
var err error
safeVal := p.decodeValue(value)
if ctx, err = ContextFromString(safeVal); err != nil {
return err
}
} else if key == p.headerKeys.JaegerDebugHeader {
ctx.debugID = p.decodeValue(value)
} else if key == p.headerKeys.JaegerBaggageHeader {
if baggage == nil {
baggage = make(map[string]string)
}
for k, v := range p.parseCommaSeparatedMap(value) {
baggage[k] = v
}
} else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) {
if baggage == nil {
baggage = make(map[string]string)
}
safeKey := p.removeBaggageKeyPrefix(key)
safeVal := p.decodeValue(value)
baggage[safeKey] = safeVal
}
return nil
})
if err != nil {
p.metrics.DecodingErrors.Inc(1)
return emptyContext, err
}
if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 {
return emptyContext, opentracing.ErrSpanContextNotFound
}
ctx.baggage = baggage
return ctx, nil
}
// Inject implements Injector of BinaryPropagator
func (p *BinaryPropagator) Inject(
sc SpanContext,
abstractCarrier interface{},
) error {
carrier, ok := abstractCarrier.(io.Writer)
if !ok {
return opentracing.ErrInvalidCarrier
}
// Handle the tracer context
if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil {
return err
}
if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil {
return err
}
if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
return err
}
if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil {
return err
}
// Handle the baggage items
if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil {
return err
}
for k, v := range sc.baggage {
if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil {
return err
}
io.WriteString(carrier, k)
if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil {
return err
}
io.WriteString(carrier, v)
}
return nil
}
// W3C limits https://github.com/w3c/baggage/blob/master/baggage/HTTP_HEADER_FORMAT.md#limits
const (
maxBinaryBaggage = 180
maxBinaryNameValueLen = 4096
)
// Extract implements Extractor of BinaryPropagator
func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
carrier, ok := abstractCarrier.(io.Reader)
if !ok {
return emptyContext, opentracing.ErrInvalidCarrier
}
var ctx SpanContext
ctx.samplingState = &samplingState{}
if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
var flags byte
if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
ctx.samplingState.setFlags(flags)
// Handle the baggage items
var numBaggage int32
if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
if numBaggage > maxBinaryBaggage {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
if iNumBaggage := int(numBaggage); iNumBaggage > 0 {
ctx.baggage = make(map[string]string, iNumBaggage)
buf := p.buffers.Get().(*bytes.Buffer)
defer p.buffers.Put(buf)
var keyLen, valLen int32
for i := 0; i < iNumBaggage; i++ {
if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
buf.Reset()
buf.Grow(int(keyLen))
if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
key := buf.String()
if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
if keyLen+valLen > maxBinaryNameValueLen {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
buf.Reset()
buf.Grow(int(valLen))
if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
ctx.baggage[key] = buf.String()
}
}
return ctx, nil
}
// Converts a comma separated key value pair list into a map
// e.g. key1=value1, key2=value2, key3 = value3
// is converted to map[string]string { "key1" : "value1",
// "key2" : "value2",
// "key3" : "value3" }
func (p *TextMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
baggage := make(map[string]string)
value, err := url.QueryUnescape(value)
if err != nil {
log.Printf("Unable to unescape %s, %v", value, err)
return baggage
}
for _, kvpair := range strings.Split(value, ",") {
kv := strings.Split(strings.TrimSpace(kvpair), "=")
if len(kv) == 2 {
baggage[strings.TrimSpace(kv[0])] = kv[1]
} else {
log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader)
}
}
return baggage
}
// Converts a baggage item key into an http header format,
// by prepending TraceBaggageHeaderPrefix and encoding the key string
func (p *TextMapPropagator) addBaggageKeyPrefix(key string) string {
// TODO encodeBaggageKeyAsHeader add caching and escaping
return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key)
}
func (p *TextMapPropagator) removeBaggageKeyPrefix(key string) string {
// TODO decodeBaggageHeaderKey add caching and escaping
return key[len(p.headerKeys.TraceBaggageHeaderPrefix):]
}

23
vendor/github.com/uber/jaeger-client-go/reference.go generated vendored Normal file
View File

@@ -0,0 +1,23 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import "github.com/opentracing/opentracing-go"
// Reference represents a causal reference to other Spans (via their SpanContext).
type Reference struct {
Type opentracing.SpanReferenceType
Context SpanContext
}

322
vendor/github.com/uber/jaeger-client-go/reporter.go generated vendored Normal file
View File

@@ -0,0 +1,322 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go/internal/reporterstats"
"github.com/uber/jaeger-client-go/log"
)
// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
type Reporter interface {
// Report submits a new span to collectors, possibly asynchronously and/or with buffering.
// If the reporter is processing Span asynchronously then it needs to Retain() the span,
// and then Release() it when no longer needed, to avoid span data corruption.
Report(span *Span)
// Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
Close()
}
// ------------------------------
type nullReporter struct{}
// NewNullReporter creates a no-op reporter that ignores all reported spans.
func NewNullReporter() Reporter {
return &nullReporter{}
}
// Report implements Report() method of Reporter by doing nothing.
func (r *nullReporter) Report(span *Span) {
// no-op
}
// Close implements Close() method of Reporter by doing nothing.
func (r *nullReporter) Close() {
// no-op
}
// ------------------------------
type loggingReporter struct {
logger Logger
}
// NewLoggingReporter creates a reporter that logs all reported spans to provided logger.
func NewLoggingReporter(logger Logger) Reporter {
return &loggingReporter{logger}
}
// Report implements Report() method of Reporter by logging the span to the logger.
func (r *loggingReporter) Report(span *Span) {
r.logger.Infof("Reporting span %+v", span)
}
// Close implements Close() method of Reporter by doing nothing.
func (r *loggingReporter) Close() {
// no-op
}
// ------------------------------
// InMemoryReporter is used for testing, and simply collects spans in memory.
type InMemoryReporter struct {
spans []opentracing.Span
lock sync.Mutex
}
// NewInMemoryReporter creates a reporter that stores spans in memory.
// NOTE: the Tracer should be created with options.PoolSpans = false.
func NewInMemoryReporter() *InMemoryReporter {
return &InMemoryReporter{
spans: make([]opentracing.Span, 0, 10),
}
}
// Report implements Report() method of Reporter by storing the span in the buffer.
func (r *InMemoryReporter) Report(span *Span) {
r.lock.Lock()
// Need to retain the span otherwise it will be released
r.spans = append(r.spans, span.Retain())
r.lock.Unlock()
}
// Close implements Close() method of Reporter
func (r *InMemoryReporter) Close() {
r.Reset()
}
// SpansSubmitted returns the number of spans accumulated in the buffer.
func (r *InMemoryReporter) SpansSubmitted() int {
r.lock.Lock()
defer r.lock.Unlock()
return len(r.spans)
}
// GetSpans returns accumulated spans as a copy of the buffer.
func (r *InMemoryReporter) GetSpans() []opentracing.Span {
r.lock.Lock()
defer r.lock.Unlock()
copied := make([]opentracing.Span, len(r.spans))
copy(copied, r.spans)
return copied
}
// Reset clears all accumulated spans.
func (r *InMemoryReporter) Reset() {
r.lock.Lock()
defer r.lock.Unlock()
// Before reset the collection need to release Span memory
for _, span := range r.spans {
span.(*Span).Release()
}
r.spans = r.spans[:0]
}
// ------------------------------
type compositeReporter struct {
reporters []Reporter
}
// NewCompositeReporter creates a reporter that ignores all reported spans.
func NewCompositeReporter(reporters ...Reporter) Reporter {
return &compositeReporter{reporters: reporters}
}
// Report implements Report() method of Reporter by delegating to each underlying reporter.
func (r *compositeReporter) Report(span *Span) {
for _, reporter := range r.reporters {
reporter.Report(span)
}
}
// Close implements Close() method of Reporter by closing each underlying reporter.
func (r *compositeReporter) Close() {
for _, reporter := range r.reporters {
reporter.Close()
}
}
// ------------- REMOTE REPORTER -----------------
type reporterQueueItemType int
const (
defaultQueueSize = 100
defaultBufferFlushInterval = 1 * time.Second
reporterQueueItemSpan reporterQueueItemType = iota
reporterQueueItemClose
)
type reporterQueueItem struct {
itemType reporterQueueItemType
span *Span
close *sync.WaitGroup
}
// reporterStats implements reporterstats.ReporterStats.
type reporterStats struct {
droppedCount int64 // provided to Transports to report data loss to the backend
}
// SpansDroppedFromQueue implements reporterstats.ReporterStats.
func (r *reporterStats) SpansDroppedFromQueue() int64 {
return atomic.LoadInt64(&r.droppedCount)
}
func (r *reporterStats) incDroppedCount() {
atomic.AddInt64(&r.droppedCount, 1)
}
type remoteReporter struct {
// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
queueLength int64 // used to update metrics.Gauge
closed int64 // 0 - not closed, 1 - closed
reporterOptions
sender Transport
queue chan reporterQueueItem
reporterStats *reporterStats
}
// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender.
// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped).
// Periodically the transport buffer is flushed even if it hasn't reached max packet size.
// Calls to Close() block until all spans reported prior to the call to Close are flushed.
func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
options := reporterOptions{}
for _, option := range opts {
option(&options)
}
if options.bufferFlushInterval <= 0 {
options.bufferFlushInterval = defaultBufferFlushInterval
}
if options.logger == nil {
options.logger = log.NullLogger
}
if options.metrics == nil {
options.metrics = NewNullMetrics()
}
if options.queueSize <= 0 {
options.queueSize = defaultQueueSize
}
reporter := &remoteReporter{
reporterOptions: options,
sender: sender,
queue: make(chan reporterQueueItem, options.queueSize),
reporterStats: new(reporterStats),
}
if receiver, ok := sender.(reporterstats.Receiver); ok {
receiver.SetReporterStats(reporter.reporterStats)
}
go reporter.processQueue()
return reporter
}
// Report implements Report() method of Reporter.
// It passes the span to a background go-routine for submission to Jaeger backend.
// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented.
// If Report() is called after the reporter has been Close()-ed, the additional spans will not be
// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly,
// because some of them may still be successfully added to the queue.
func (r *remoteReporter) Report(span *Span) {
select {
// Need to retain the span otherwise it will be released
case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span.Retain()}:
atomic.AddInt64(&r.queueLength, 1)
default:
r.metrics.ReporterDropped.Inc(1)
r.reporterStats.incDroppedCount()
}
}
// Close implements Close() method of Reporter by waiting for the queue to be drained.
func (r *remoteReporter) Close() {
r.logger.Debugf("closing reporter")
if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped {
r.logger.Error("Repeated attempt to close the reporter is ignored")
return
}
r.sendCloseEvent()
_ = r.sender.Close()
}
func (r *remoteReporter) sendCloseEvent() {
wg := &sync.WaitGroup{}
wg.Add(1)
item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg}
r.queue <- item // if the queue is full we will block until there is space
atomic.AddInt64(&r.queueLength, 1)
wg.Wait()
}
// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer.
// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger.
// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped
// reporting new spans.
func (r *remoteReporter) processQueue() {
// flush causes the Sender to flush its accumulated spans and clear the buffer
flush := func() {
if flushed, err := r.sender.Flush(); err != nil {
r.metrics.ReporterFailure.Inc(int64(flushed))
r.logger.Error(fmt.Sprintf("failed to flush Jaeger spans to server: %s", err.Error()))
} else if flushed > 0 {
r.metrics.ReporterSuccess.Inc(int64(flushed))
}
}
timer := time.NewTicker(r.bufferFlushInterval)
for {
select {
case <-timer.C:
flush()
case item := <-r.queue:
atomic.AddInt64(&r.queueLength, -1)
switch item.itemType {
case reporterQueueItemSpan:
span := item.span
if flushed, err := r.sender.Append(span); err != nil {
r.metrics.ReporterFailure.Inc(int64(flushed))
r.logger.Error(fmt.Sprintf("error reporting Jaeger span %q: %s", span.OperationName(), err.Error()))
} else if flushed > 0 {
r.metrics.ReporterSuccess.Inc(int64(flushed))
// to reduce the number of gauge stats, we only emit queue length on flush
r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength))
r.logger.Debugf("flushed %d spans", flushed)
}
span.Release()
case reporterQueueItemClose:
timer.Stop()
flush()
item.close.Done()
return
}
}
}
}

View File

@@ -0,0 +1,71 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"time"
"github.com/uber/jaeger-client-go/log"
)
// ReporterOption is a function that sets some option on the reporter.
type ReporterOption func(c *reporterOptions)
// ReporterOptions is a factory for all available ReporterOption's
var ReporterOptions reporterOptions
// reporterOptions control behavior of the reporter.
type reporterOptions struct {
// queueSize is the size of internal queue where reported spans are stored before they are processed in the background
queueSize int
// bufferFlushInterval is how often the buffer is force-flushed, even if it's not full
bufferFlushInterval time.Duration
// logger is used to log errors of span submissions
logger log.DebugLogger
// metrics is used to record runtime stats
metrics *Metrics
}
// QueueSize creates a ReporterOption that sets the size of the internal queue where
// spans are stored before they are processed.
func (reporterOptions) QueueSize(queueSize int) ReporterOption {
return func(r *reporterOptions) {
r.queueSize = queueSize
}
}
// Metrics creates a ReporterOption that initializes Metrics in the reporter,
// which is used to record runtime statistics.
func (reporterOptions) Metrics(metrics *Metrics) ReporterOption {
return func(r *reporterOptions) {
r.metrics = metrics
}
}
// BufferFlushInterval creates a ReporterOption that sets how often the queue
// is force-flushed.
func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption {
return func(r *reporterOptions) {
r.bufferFlushInterval = bufferFlushInterval
}
}
// Logger creates a ReporterOption that initializes the logger used to log
// errors of span submissions.
func (reporterOptions) Logger(logger Logger) ReporterOption {
return func(r *reporterOptions) {
r.logger = log.DebugLogAdapter(logger)
}
}

View File

@@ -0,0 +1,5 @@
An Observer that can be used to emit RPC metrics
================================================
It can be attached to the tracer during tracer construction.
See `ExampleObserver` function in [observer_test.go](./observer_test.go).

View File

@@ -0,0 +1,16 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package rpcmetrics implements an Observer that can be used to emit RPC metrics.
package rpcmetrics

View File

@@ -0,0 +1,63 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcmetrics
import "sync"
// normalizedEndpoints is a cache for endpointName -> safeName mappings.
type normalizedEndpoints struct {
names map[string]string
maxSize int
defaultName string
normalizer NameNormalizer
mux sync.RWMutex
}
func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints {
return &normalizedEndpoints{
maxSize: maxSize,
normalizer: normalizer,
names: make(map[string]string, maxSize),
}
}
// normalize looks up the name in the cache, if not found it uses normalizer
// to convert the name to a safe name. If called with more than maxSize unique
// names it returns "" for all other names beyond those already cached.
func (n *normalizedEndpoints) normalize(name string) string {
n.mux.RLock()
norm, ok := n.names[name]
l := len(n.names)
n.mux.RUnlock()
if ok {
return norm
}
if l >= n.maxSize {
return ""
}
return n.normalizeWithLock(name)
}
func (n *normalizedEndpoints) normalizeWithLock(name string) string {
norm := n.normalizer.Normalize(name)
n.mux.Lock()
defer n.mux.Unlock()
// cache may have grown while we were not holding the lock
if len(n.names) >= n.maxSize {
return ""
}
n.names[name] = norm
return norm
}

View File

@@ -0,0 +1,124 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcmetrics
import (
"sync"
"github.com/uber/jaeger-lib/metrics"
)
const (
otherEndpointsPlaceholder = "other"
endpointNameMetricTag = "endpoint"
)
// Metrics is a collection of metrics for an endpoint describing
// throughput, success, errors, and performance.
type Metrics struct {
// RequestCountSuccess is a counter of the total number of successes.
RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"`
// RequestCountFailures is a counter of the number of times any failure has been observed.
RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"`
// RequestLatencySuccess is a latency histogram of successful requests.
RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"`
// RequestLatencyFailures is a latency histogram of failed requests.
RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"`
// HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299
HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"`
// HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399
HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"`
// HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499
HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"`
// HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599
HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"`
}
func (m *Metrics) recordHTTPStatusCode(statusCode uint16) {
if statusCode >= 200 && statusCode < 300 {
m.HTTPStatusCode2xx.Inc(1)
} else if statusCode >= 300 && statusCode < 400 {
m.HTTPStatusCode3xx.Inc(1)
} else if statusCode >= 400 && statusCode < 500 {
m.HTTPStatusCode4xx.Inc(1)
} else if statusCode >= 500 && statusCode < 600 {
m.HTTPStatusCode5xx.Inc(1)
}
}
// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name.
// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped
// to a generic endpoint name "other".
type MetricsByEndpoint struct {
metricsFactory metrics.Factory
endpoints *normalizedEndpoints
metricsByEndpoint map[string]*Metrics
mux sync.RWMutex
}
func newMetricsByEndpoint(
metricsFactory metrics.Factory,
normalizer NameNormalizer,
maxNumberOfEndpoints int,
) *MetricsByEndpoint {
return &MetricsByEndpoint{
metricsFactory: metricsFactory,
endpoints: newNormalizedEndpoints(maxNumberOfEndpoints, normalizer),
metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other"
}
}
func (m *MetricsByEndpoint) get(endpoint string) *Metrics {
safeName := m.endpoints.normalize(endpoint)
if safeName == "" {
safeName = otherEndpointsPlaceholder
}
m.mux.RLock()
met := m.metricsByEndpoint[safeName]
m.mux.RUnlock()
if met != nil {
return met
}
return m.getWithWriteLock(safeName)
}
// split to make easier to test
func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics {
m.mux.Lock()
defer m.mux.Unlock()
// it is possible that the name has been already registered after we released
// the read lock and before we grabbed the write lock, so check for that.
if met, ok := m.metricsByEndpoint[safeName]; ok {
return met
}
// it would be nice to create the struct before locking, since Init() is somewhat
// expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics.
met := &Metrics{}
tags := map[string]string{endpointNameMetricTag: safeName}
metrics.Init(met, m.metricsFactory, tags)
m.metricsByEndpoint[safeName] = met
return met
}

View File

@@ -0,0 +1,101 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcmetrics
// NameNormalizer is used to convert the endpoint names to strings
// that can be safely used as tags in the metrics.
type NameNormalizer interface {
Normalize(name string) string
}
// DefaultNameNormalizer converts endpoint names so that they contain only characters
// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'.
var DefaultNameNormalizer = &SimpleNameNormalizer{
SafeSets: []SafeCharacterSet{
&Range{From: 'a', To: 'z'},
&Range{From: 'A', To: 'Z'},
&Range{From: '0', To: '9'},
&Char{'-'},
&Char{'_'},
&Char{'/'},
&Char{'.'},
},
Replacement: '-',
}
// SimpleNameNormalizer uses a set of safe character sets.
type SimpleNameNormalizer struct {
SafeSets []SafeCharacterSet
Replacement byte
}
// SafeCharacterSet determines if the given character is "safe"
type SafeCharacterSet interface {
IsSafe(c byte) bool
}
// Range implements SafeCharacterSet
type Range struct {
From, To byte
}
// IsSafe implements SafeCharacterSet
func (r *Range) IsSafe(c byte) bool {
return c >= r.From && c <= r.To
}
// Char implements SafeCharacterSet
type Char struct {
Val byte
}
// IsSafe implements SafeCharacterSet
func (ch *Char) IsSafe(c byte) bool {
return c == ch.Val
}
// Normalize checks each character in the string against SafeSets,
// and if it's not safe substitutes it with Replacement.
func (n *SimpleNameNormalizer) Normalize(name string) string {
var retMe []byte
nameBytes := []byte(name)
for i, b := range nameBytes {
if n.safeByte(b) {
if retMe != nil {
retMe[i] = b
}
} else {
if retMe == nil {
retMe = make([]byte, len(nameBytes))
copy(retMe[0:i], nameBytes[0:i])
}
retMe[i] = n.Replacement
}
}
if retMe == nil {
return name
}
return string(retMe)
}
// safeByte checks if b against all safe charsets.
func (n *SimpleNameNormalizer) safeByte(b byte) bool {
for i := range n.SafeSets {
if n.SafeSets[i].IsSafe(b) {
return true
}
}
return false
}

View File

@@ -0,0 +1,171 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcmetrics
import (
"strconv"
"sync"
"time"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/uber/jaeger-lib/metrics"
jaeger "github.com/uber/jaeger-client-go"
)
const defaultMaxNumberOfEndpoints = 200
// Observer is an observer that can emit RPC metrics.
type Observer struct {
metricsByEndpoint *MetricsByEndpoint
}
// NewObserver creates a new observer that can emit RPC metrics.
func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer {
return &Observer{
metricsByEndpoint: newMetricsByEndpoint(
metricsFactory,
normalizer,
defaultMaxNumberOfEndpoints,
),
}
}
// OnStartSpan creates a new Observer for the span.
func (o *Observer) OnStartSpan(
operationName string,
options opentracing.StartSpanOptions,
) jaeger.SpanObserver {
return NewSpanObserver(o.metricsByEndpoint, operationName, options)
}
// SpanKind identifies the span as inboud, outbound, or internal
type SpanKind int
const (
// Local span kind
Local SpanKind = iota
// Inbound span kind
Inbound
// Outbound span kind
Outbound
)
// SpanObserver collects RPC metrics
type SpanObserver struct {
metricsByEndpoint *MetricsByEndpoint
operationName string
startTime time.Time
mux sync.Mutex
kind SpanKind
httpStatusCode uint16
err bool
}
// NewSpanObserver creates a new SpanObserver that can emit RPC metrics.
func NewSpanObserver(
metricsByEndpoint *MetricsByEndpoint,
operationName string,
options opentracing.StartSpanOptions,
) *SpanObserver {
so := &SpanObserver{
metricsByEndpoint: metricsByEndpoint,
operationName: operationName,
startTime: options.StartTime,
}
for k, v := range options.Tags {
so.handleTagInLock(k, v)
}
return so
}
// handleTags watches for special tags
// - SpanKind
// - HttpStatusCode
// - Error
func (so *SpanObserver) handleTagInLock(key string, value interface{}) {
if key == string(ext.SpanKind) {
if v, ok := value.(ext.SpanKindEnum); ok {
value = string(v)
}
if v, ok := value.(string); ok {
if v == string(ext.SpanKindRPCClientEnum) {
so.kind = Outbound
} else if v == string(ext.SpanKindRPCServerEnum) {
so.kind = Inbound
}
}
return
}
if key == string(ext.HTTPStatusCode) {
if v, ok := value.(uint16); ok {
so.httpStatusCode = v
} else if v, ok := value.(int); ok {
so.httpStatusCode = uint16(v)
} else if v, ok := value.(string); ok {
if vv, err := strconv.Atoi(v); err == nil {
so.httpStatusCode = uint16(vv)
}
}
return
}
if key == string(ext.Error) {
if v, ok := value.(bool); ok {
so.err = v
} else if v, ok := value.(string); ok {
if vv, err := strconv.ParseBool(v); err == nil {
so.err = vv
}
}
return
}
}
// OnFinish emits the RPC metrics. It only has an effect when operation name
// is not blank, and the span kind is an RPC server.
func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) {
so.mux.Lock()
defer so.mux.Unlock()
if so.operationName == "" || so.kind != Inbound {
return
}
mets := so.metricsByEndpoint.get(so.operationName)
latency := options.FinishTime.Sub(so.startTime)
if so.err {
mets.RequestCountFailures.Inc(1)
mets.RequestLatencyFailures.Record(latency)
} else {
mets.RequestCountSuccess.Inc(1)
mets.RequestLatencySuccess.Record(latency)
}
mets.recordHTTPStatusCode(so.httpStatusCode)
}
// OnSetOperationName records new operation name.
func (so *SpanObserver) OnSetOperationName(operationName string) {
so.mux.Lock()
so.operationName = operationName
so.mux.Unlock()
}
// OnSetTag implements SpanObserver
func (so *SpanObserver) OnSetTag(key string, value interface{}) {
so.mux.Lock()
so.handleTagInLock(key, value)
so.mux.Unlock()
}

516
vendor/github.com/uber/jaeger-client-go/sampler.go generated vendored Normal file
View File

@@ -0,0 +1,516 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"fmt"
"math"
"strings"
"sync"
"github.com/uber/jaeger-client-go/thrift-gen/sampling"
"github.com/uber/jaeger-client-go/utils"
)
const (
defaultMaxOperations = 2000
)
// Sampler decides whether a new trace should be sampled or not.
type Sampler interface {
// IsSampled decides whether a trace with given `id` and `operation`
// should be sampled. This function will also return the tags that
// can be used to identify the type of sampling that was applied to
// the root span. Most simple samplers would return two tags,
// sampler.type and sampler.param, similar to those used in the Configuration
IsSampled(id TraceID, operation string) (sampled bool, tags []Tag)
// Close does a clean shutdown of the sampler, stopping any background
// go-routines it may have started.
Close()
// Equal checks if the `other` sampler is functionally equivalent
// to this sampler.
// TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation.
Equal(other Sampler) bool
}
// -----------------------
// ConstSampler is a sampler that always makes the same decision.
type ConstSampler struct {
legacySamplerV1Base
Decision bool
tags []Tag
}
// NewConstSampler creates a ConstSampler.
func NewConstSampler(sample bool) *ConstSampler {
tags := []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeConst},
{key: SamplerParamTagKey, value: sample},
}
s := &ConstSampler{
Decision: sample,
tags: tags,
}
s.delegate = s.IsSampled
return s
}
// IsSampled implements IsSampled() of Sampler.
func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
return s.Decision, s.tags
}
// Close implements Close() of Sampler.
func (s *ConstSampler) Close() {
// nothing to do
}
// Equal implements Equal() of Sampler.
func (s *ConstSampler) Equal(other Sampler) bool {
if o, ok := other.(*ConstSampler); ok {
return s.Decision == o.Decision
}
return false
}
// String is used to log sampler details.
func (s *ConstSampler) String() string {
return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision)
}
// -----------------------
// ProbabilisticSampler is a sampler that randomly samples a certain percentage
// of traces.
type ProbabilisticSampler struct {
legacySamplerV1Base
samplingRate float64
samplingBoundary uint64
tags []Tag
}
const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff
// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the
// samplingRate, in the range between 0.0 and 1.0.
//
// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision
// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63).
// TODO remove the error from this function for next major release
func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) {
if samplingRate < 0.0 || samplingRate > 1.0 {
return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
}
return newProbabilisticSampler(samplingRate), nil
}
func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
s := new(ProbabilisticSampler)
s.delegate = s.IsSampled
return s.init(samplingRate)
}
func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler {
s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate)
s.tags = []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
{key: SamplerParamTagKey, value: s.samplingRate},
}
return s
}
// SamplingRate returns the sampling probability this sampled was constructed with.
func (s *ProbabilisticSampler) SamplingRate() float64 {
return s.samplingRate
}
// IsSampled implements IsSampled() of Sampler.
func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
return s.samplingBoundary >= id.Low&maxRandomNumber, s.tags
}
// Close implements Close() of Sampler.
func (s *ProbabilisticSampler) Close() {
// nothing to do
}
// Equal implements Equal() of Sampler.
func (s *ProbabilisticSampler) Equal(other Sampler) bool {
if o, ok := other.(*ProbabilisticSampler); ok {
return s.samplingBoundary == o.samplingBoundary
}
return false
}
// Update modifies in-place the sampling rate. Locking must be done externally.
func (s *ProbabilisticSampler) Update(samplingRate float64) error {
if samplingRate < 0.0 || samplingRate > 1.0 {
return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
}
s.init(samplingRate)
return nil
}
// String is used to log sampler details.
func (s *ProbabilisticSampler) String() string {
return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate)
}
// -----------------------
// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows
// burstiness of the service, i.e. a service with uniformly distributed requests will have those
// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a
// number of sequential requests can be sampled each second.
type RateLimitingSampler struct {
legacySamplerV1Base
maxTracesPerSecond float64
rateLimiter *utils.ReconfigurableRateLimiter
tags []Tag
}
// NewRateLimitingSampler creates new RateLimitingSampler.
func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler {
s := new(RateLimitingSampler)
s.delegate = s.IsSampled
return s.init(maxTracesPerSecond)
}
func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler {
if s.rateLimiter == nil {
s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
} else {
s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
}
s.maxTracesPerSecond = maxTracesPerSecond
s.tags = []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
{key: SamplerParamTagKey, value: maxTracesPerSecond},
}
return s
}
// IsSampled implements IsSampled() of Sampler.
func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
return s.rateLimiter.CheckCredit(1.0), s.tags
}
// Update reconfigures the rate limiter, while preserving its accumulated balance.
// Locking must be done externally.
func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) {
if s.maxTracesPerSecond != maxTracesPerSecond {
s.init(maxTracesPerSecond)
}
}
// Close does nothing.
func (s *RateLimitingSampler) Close() {
// nothing to do
}
// Equal compares with another sampler.
func (s *RateLimitingSampler) Equal(other Sampler) bool {
if o, ok := other.(*RateLimitingSampler); ok {
return s.maxTracesPerSecond == o.maxTracesPerSecond
}
return false
}
// String is used to log sampler details.
func (s *RateLimitingSampler) String() string {
return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond)
}
// -----------------------
// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and
// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that
// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
//
// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
// samplers return true, the tags for ProbabilisticSampler will be used.
type GuaranteedThroughputProbabilisticSampler struct {
probabilisticSampler *ProbabilisticSampler
lowerBoundSampler *RateLimitingSampler
tags []Tag
samplingRate float64
lowerBound float64
}
// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
// ProbabilisticSampler and RateLimitingSampler.
func NewGuaranteedThroughputProbabilisticSampler(
lowerBound, samplingRate float64,
) (*GuaranteedThroughputProbabilisticSampler, error) {
return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil
}
func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler {
s := &GuaranteedThroughputProbabilisticSampler{
lowerBoundSampler: NewRateLimitingSampler(lowerBound),
lowerBound: lowerBound,
}
s.setProbabilisticSampler(samplingRate)
return s
}
func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
if s.probabilisticSampler == nil {
s.probabilisticSampler = newProbabilisticSampler(samplingRate)
} else if s.samplingRate != samplingRate {
s.probabilisticSampler.init(samplingRate)
}
// since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval
samplingRate = s.probabilisticSampler.SamplingRate()
if s.samplingRate != samplingRate || s.tags == nil {
s.samplingRate = s.probabilisticSampler.SamplingRate()
s.tags = []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
{key: SamplerParamTagKey, value: s.samplingRate},
}
}
}
// IsSampled implements IsSampled() of Sampler.
func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled {
s.lowerBoundSampler.IsSampled(id, operation)
return true, tags
}
sampled, _ := s.lowerBoundSampler.IsSampled(id, operation)
return sampled, s.tags
}
// Close implements Close() of Sampler.
func (s *GuaranteedThroughputProbabilisticSampler) Close() {
s.probabilisticSampler.Close()
s.lowerBoundSampler.Close()
}
// Equal implements Equal() of Sampler.
func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
// NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
// more information.
return false
}
// this function should only be called while holding a Write lock
func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
s.setProbabilisticSampler(samplingRate)
if s.lowerBound != lowerBound {
s.lowerBoundSampler.Update(lowerBound)
s.lowerBound = lowerBound
}
}
func (s GuaranteedThroughputProbabilisticSampler) String() string {
return fmt.Sprintf("GuaranteedThroughputProbabilisticSampler(lowerBound=%f, samplingRate=%f)", s.lowerBound, s.samplingRate)
}
// -----------------------
// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler
// on a per-operation basis.
type PerOperationSampler struct {
sync.RWMutex
samplers map[string]*GuaranteedThroughputProbabilisticSampler
defaultSampler *ProbabilisticSampler
lowerBound float64
maxOperations int
// see description in PerOperationSamplerParams
operationNameLateBinding bool
}
// NewAdaptiveSampler returns a new PerOperationSampler.
// Deprecated: please use NewPerOperationSampler.
func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) {
return NewPerOperationSampler(PerOperationSamplerParams{
MaxOperations: maxOperations,
Strategies: strategies,
}), nil
}
// PerOperationSamplerParams defines parameters when creating PerOperationSampler.
type PerOperationSamplerParams struct {
// Max number of operations that will be tracked. Other operations will be given default strategy.
MaxOperations int
// Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName.
// When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving
// the sampling decision as non-final (and the span as writeable). This may lead to degraded performance
// in applications that always provide the correct span name on trace creation.
//
// For backwards compatibility this option is off by default.
OperationNameLateBinding bool
// Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler).
Strategies *sampling.PerOperationSamplingStrategies
}
// NewPerOperationSampler returns a new PerOperationSampler.
func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler {
if params.MaxOperations <= 0 {
params.MaxOperations = defaultMaxOperations
}
samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
for _, strategy := range params.Strategies.PerOperationStrategies {
sampler := newGuaranteedThroughputProbabilisticSampler(
params.Strategies.DefaultLowerBoundTracesPerSecond,
strategy.ProbabilisticSampling.SamplingRate,
)
samplers[strategy.Operation] = sampler
}
return &PerOperationSampler{
samplers: samplers,
defaultSampler: newProbabilisticSampler(params.Strategies.DefaultSamplingProbability),
lowerBound: params.Strategies.DefaultLowerBoundTracesPerSecond,
maxOperations: params.MaxOperations,
operationNameLateBinding: params.OperationNameLateBinding,
}
}
// IsSampled is not used and only exists to match Sampler V1 API.
// TODO (breaking change) remove when upgrading everything to SamplerV2
func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
return false, nil
}
func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) {
samplerV1 := s.getSamplerForOperation(operationName)
var sampled bool
var tags []Tag
if span.context.samplingState.isLocalRootSpan(span.context.spanID) {
sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName)
}
return sampled, tags
}
// OnCreateSpan implements OnCreateSpan of SamplerV2.
func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision {
sampled, tags := s.trySampling(span, span.OperationName())
return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags}
}
// OnSetOperationName implements OnSetOperationName of SamplerV2.
func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
sampled, tags := s.trySampling(span, operationName)
return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags}
}
// OnSetTag implements OnSetTag of SamplerV2.
func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
return SamplingDecision{Sample: false, Retryable: true}
}
// OnFinishSpan implements OnFinishSpan of SamplerV2.
func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision {
return SamplingDecision{Sample: false, Retryable: true}
}
func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler {
s.RLock()
sampler, ok := s.samplers[operation]
if ok {
defer s.RUnlock()
return sampler
}
s.RUnlock()
s.Lock()
defer s.Unlock()
// Check if sampler has already been created
sampler, ok = s.samplers[operation]
if ok {
return sampler
}
// Store only up to maxOperations of unique ops.
if len(s.samplers) >= s.maxOperations {
return s.defaultSampler
}
newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
s.samplers[operation] = newSampler
return newSampler
}
// Close invokes Close on all underlying samplers.
func (s *PerOperationSampler) Close() {
s.Lock()
defer s.Unlock()
for _, sampler := range s.samplers {
sampler.Close()
}
s.defaultSampler.Close()
}
func (s *PerOperationSampler) String() string {
var sb strings.Builder
fmt.Fprintf(&sb, "PerOperationSampler(defaultSampler=%v, ", s.defaultSampler)
fmt.Fprintf(&sb, "lowerBound=%f, ", s.lowerBound)
fmt.Fprintf(&sb, "maxOperations=%d, ", s.maxOperations)
fmt.Fprintf(&sb, "operationNameLateBinding=%t, ", s.operationNameLateBinding)
fmt.Fprintf(&sb, "numOperations=%d,\n", len(s.samplers))
fmt.Fprintf(&sb, "samplers=[")
for operationName, sampler := range s.samplers {
fmt.Fprintf(&sb, "\n(operationName=%s, sampler=%v)", operationName, sampler)
}
fmt.Fprintf(&sb, "])")
return sb.String()
}
// Equal is not used.
// TODO (breaking change) remove this in the future
func (s *PerOperationSampler) Equal(other Sampler) bool {
// NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple
// samplers which all need to be initialized before this function can be called for a comparison.
// Therefore, PerOperationSampler uses the update() function to only alter the samplers that need
// changing. Hence this function always returns false so that the update function can be called.
// Once the Equal() function is removed from the Sampler API, this will no longer be needed.
return false
}
func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
s.Lock()
defer s.Unlock()
newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{}
for _, strategy := range strategies.PerOperationStrategies {
operation := strategy.Operation
samplingRate := strategy.ProbabilisticSampling.SamplingRate
lowerBound := strategies.DefaultLowerBoundTracesPerSecond
if sampler, ok := s.samplers[operation]; ok {
sampler.update(lowerBound, samplingRate)
newSamplers[operation] = sampler
} else {
sampler := newGuaranteedThroughputProbabilisticSampler(
lowerBound,
samplingRate,
)
newSamplers[operation] = sampler
}
}
s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond
if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability {
s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability)
}
s.samplers = newSamplers
}

View File

@@ -0,0 +1,358 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"sync"
"sync/atomic"
"time"
"github.com/uber/jaeger-client-go/log"
"github.com/uber/jaeger-client-go/thrift-gen/sampling"
)
const (
defaultRemoteSamplingTimeout = 10 * time.Second
defaultSamplingRefreshInterval = time.Minute
)
// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server.
type SamplingStrategyFetcher interface {
Fetch(service string) ([]byte, error)
}
// SamplingStrategyParser is used to parse sampling strategy updates. The output object
// should be of the type that is recognized by the SamplerUpdaters.
type SamplingStrategyParser interface {
Parse(response []byte) (interface{}, error)
}
// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies,
// retrieved from remote config server, to the current sampler. The updater can modify
// the sampler in-place if sampler supports it, or create a new one.
//
// If the strategy does not contain configuration for the sampler in question,
// updater must return modifiedSampler=nil to give other updaters a chance to inspect
// the sampling strategy response.
//
// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler.
type SamplerUpdater interface {
Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error)
}
// RemotelyControlledSampler is a delegating sampler that polls a remote server
// for the appropriate sampling strategy, constructs a corresponding sampler and
// delegates to it for sampling decisions.
type RemotelyControlledSampler struct {
// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
closed int64 // 0 - not closed, 1 - closed
sync.RWMutex // used to serialize access to samplerOptions.sampler
samplerOptions
serviceName string
doneChan chan *sync.WaitGroup
}
// NewRemotelyControlledSampler creates a sampler that periodically pulls
// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
func NewRemotelyControlledSampler(
serviceName string,
opts ...SamplerOption,
) *RemotelyControlledSampler {
options := new(samplerOptions).applyOptionsAndDefaults(opts...)
sampler := &RemotelyControlledSampler{
samplerOptions: *options,
serviceName: serviceName,
doneChan: make(chan *sync.WaitGroup),
}
go sampler.pollController()
return sampler
}
// IsSampled implements IsSampled() of Sampler.
// TODO (breaking change) remove when Sampler V1 is removed
func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
return false, nil
}
// OnCreateSpan implements OnCreateSpan of SamplerV2.
func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision {
s.RLock()
defer s.RUnlock()
return s.sampler.OnCreateSpan(span)
}
// OnSetOperationName implements OnSetOperationName of SamplerV2.
func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
s.RLock()
defer s.RUnlock()
return s.sampler.OnSetOperationName(span, operationName)
}
// OnSetTag implements OnSetTag of SamplerV2.
func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
s.RLock()
defer s.RUnlock()
return s.sampler.OnSetTag(span, key, value)
}
// OnFinishSpan implements OnFinishSpan of SamplerV2.
func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision {
s.RLock()
defer s.RUnlock()
return s.sampler.OnFinishSpan(span)
}
// Close implements Close() of Sampler.
func (s *RemotelyControlledSampler) Close() {
if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
s.logger.Error("Repeated attempt to close the sampler is ignored")
return
}
var wg sync.WaitGroup
wg.Add(1)
s.doneChan <- &wg
wg.Wait()
}
// Equal implements Equal() of Sampler.
func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
// NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
// more information.
return false
}
func (s *RemotelyControlledSampler) pollController() {
ticker := time.NewTicker(s.samplingRefreshInterval)
defer ticker.Stop()
s.pollControllerWithTicker(ticker)
}
func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
for {
select {
case <-ticker.C:
s.UpdateSampler()
case wg := <-s.doneChan:
wg.Done()
return
}
}
}
// Sampler returns the currently active sampler.
func (s *RemotelyControlledSampler) Sampler() SamplerV2 {
s.RLock()
defer s.RUnlock()
return s.sampler
}
func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) {
s.Lock()
defer s.Unlock()
s.sampler = sampler
}
// UpdateSampler forces the sampler to fetch sampling strategy from backend server.
// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests.
func (s *RemotelyControlledSampler) UpdateSampler() {
res, err := s.samplingFetcher.Fetch(s.serviceName)
if err != nil {
s.metrics.SamplerQueryFailure.Inc(1)
s.logger.Infof("failed to fetch sampling strategy: %v", err)
return
}
strategy, err := s.samplingParser.Parse(res)
if err != nil {
s.metrics.SamplerUpdateFailure.Inc(1)
s.logger.Infof("failed to parse sampling strategy response: %v", err)
return
}
s.Lock()
defer s.Unlock()
s.metrics.SamplerRetrieved.Inc(1)
if err := s.updateSamplerViaUpdaters(strategy); err != nil {
s.metrics.SamplerUpdateFailure.Inc(1)
s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err)
return
}
s.metrics.SamplerUpdated.Inc(1)
}
// NB: this function should only be called while holding a Write lock
func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error {
for _, updater := range s.updaters {
sampler, err := updater.Update(s.sampler, strategy)
if err != nil {
return err
}
if sampler != nil {
s.logger.Debugf("sampler updated: %+v", sampler)
s.sampler = sampler
return nil
}
}
return fmt.Errorf("unsupported sampling strategy %+v", strategy)
}
// -----------------------
// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
type ProbabilisticSamplerUpdater struct{}
// Update implements Update of SamplerUpdater.
func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
type response interface {
GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy
}
var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
if resp, ok := strategy.(response); ok {
if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil {
if ps, ok := sampler.(*ProbabilisticSampler); ok {
if err := ps.Update(probabilistic.SamplingRate); err != nil {
return nil, err
}
return sampler, nil
}
return newProbabilisticSampler(probabilistic.SamplingRate), nil
}
}
return nil, nil
}
// -----------------------
// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
type RateLimitingSamplerUpdater struct{}
// Update implements Update of SamplerUpdater.
func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
type response interface {
GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy
}
var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
if resp, ok := strategy.(response); ok {
if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil {
rateLimit := float64(rateLimiting.MaxTracesPerSecond)
if rl, ok := sampler.(*RateLimitingSampler); ok {
rl.Update(rateLimit)
return rl, nil
}
return NewRateLimitingSampler(rateLimit), nil
}
}
return nil, nil
}
// -----------------------
// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
// Fields have the same meaning as in PerOperationSamplerParams.
type AdaptiveSamplerUpdater struct {
MaxOperations int
OperationNameLateBinding bool
}
// Update implements Update of SamplerUpdater.
func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
type response interface {
GetOperationSampling() *sampling.PerOperationSamplingStrategies
}
var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
if p, ok := strategy.(response); ok {
if operations := p.GetOperationSampling(); operations != nil {
if as, ok := sampler.(*PerOperationSampler); ok {
as.update(operations)
return as, nil
}
return NewPerOperationSampler(PerOperationSamplerParams{
MaxOperations: u.MaxOperations,
OperationNameLateBinding: u.OperationNameLateBinding,
Strategies: operations,
}), nil
}
}
return nil, nil
}
// -----------------------
type httpSamplingStrategyFetcher struct {
serverURL string
logger log.DebugLogger
httpClient http.Client
}
func newHTTPSamplingStrategyFetcher(serverURL string, logger log.DebugLogger) *httpSamplingStrategyFetcher {
customTransport := http.DefaultTransport.(*http.Transport).Clone()
customTransport.ResponseHeaderTimeout = defaultRemoteSamplingTimeout
return &httpSamplingStrategyFetcher{
serverURL: serverURL,
logger: logger,
httpClient: http.Client{
Transport: customTransport,
},
}
}
func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) {
v := url.Values{}
v.Set("service", serviceName)
uri := f.serverURL + "?" + v.Encode()
resp, err := f.httpClient.Get(uri)
if err != nil {
return nil, err
}
defer func() {
if err := resp.Body.Close(); err != nil {
f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err))
}
}()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
}
return body, nil
}
// -----------------------
type samplingStrategyParser struct{}
func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) {
strategy := new(sampling.SamplingStrategyResponse)
if err := json.Unmarshal(response, strategy); err != nil {
return nil, err
}
return strategy, nil
}

View File

@@ -0,0 +1,159 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"time"
"github.com/uber/jaeger-client-go/log"
)
// SamplerOption is a function that sets some option on the sampler
type SamplerOption func(options *samplerOptions)
// SamplerOptions is a factory for all available SamplerOption's.
var SamplerOptions SamplerOptionsFactory
// SamplerOptionsFactory is a factory for all available SamplerOption's.
// The type acts as a namespace for factory functions. It is public to
// make the functions discoverable via godoc. Recommended to be used
// via global SamplerOptions variable.
type SamplerOptionsFactory struct{}
type samplerOptions struct {
metrics *Metrics
sampler SamplerV2
logger log.DebugLogger
samplingServerURL string
samplingRefreshInterval time.Duration
samplingFetcher SamplingStrategyFetcher
samplingParser SamplingStrategyParser
updaters []SamplerUpdater
posParams PerOperationSamplerParams
}
// Metrics creates a SamplerOption that initializes Metrics on the sampler,
// which is used to emit statistics.
func (SamplerOptionsFactory) Metrics(m *Metrics) SamplerOption {
return func(o *samplerOptions) {
o.metrics = m
}
}
// MaxOperations creates a SamplerOption that sets the maximum number of
// operations the sampler will keep track of.
func (SamplerOptionsFactory) MaxOperations(maxOperations int) SamplerOption {
return func(o *samplerOptions) {
o.posParams.MaxOperations = maxOperations
}
}
// OperationNameLateBinding creates a SamplerOption that sets the respective
// field in the PerOperationSamplerParams.
func (SamplerOptionsFactory) OperationNameLateBinding(enable bool) SamplerOption {
return func(o *samplerOptions) {
o.posParams.OperationNameLateBinding = enable
}
}
// InitialSampler creates a SamplerOption that sets the initial sampler
// to use before a remote sampler is created and used.
func (SamplerOptionsFactory) InitialSampler(sampler Sampler) SamplerOption {
return func(o *samplerOptions) {
o.sampler = samplerV1toV2(sampler)
}
}
// Logger creates a SamplerOption that sets the logger used by the sampler.
func (SamplerOptionsFactory) Logger(logger Logger) SamplerOption {
return func(o *samplerOptions) {
o.logger = log.DebugLogAdapter(logger)
}
}
// SamplingServerURL creates a SamplerOption that sets the sampling server url
// of the local agent that contains the sampling strategies.
func (SamplerOptionsFactory) SamplingServerURL(samplingServerURL string) SamplerOption {
return func(o *samplerOptions) {
o.samplingServerURL = samplingServerURL
}
}
// SamplingRefreshInterval creates a SamplerOption that sets how often the
// sampler will poll local agent for the appropriate sampling strategy.
func (SamplerOptionsFactory) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
return func(o *samplerOptions) {
o.samplingRefreshInterval = samplingRefreshInterval
}
}
// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher.
func (SamplerOptionsFactory) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption {
return func(o *samplerOptions) {
o.samplingFetcher = fetcher
}
}
// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser.
func (SamplerOptionsFactory) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption {
return func(o *samplerOptions) {
o.samplingParser = parser
}
}
// Updaters creates a SamplerOption that initializes sampler updaters.
func (SamplerOptionsFactory) Updaters(updaters ...SamplerUpdater) SamplerOption {
return func(o *samplerOptions) {
o.updaters = updaters
}
}
func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions {
for _, option := range opts {
option(o)
}
if o.sampler == nil {
o.sampler = newProbabilisticSampler(0.001)
}
if o.logger == nil {
o.logger = log.NullLogger
}
if o.samplingServerURL == "" {
o.samplingServerURL = DefaultSamplingServerURL
}
if o.metrics == nil {
o.metrics = NewNullMetrics()
}
if o.samplingRefreshInterval <= 0 {
o.samplingRefreshInterval = defaultSamplingRefreshInterval
}
if o.samplingFetcher == nil {
o.samplingFetcher = newHTTPSamplingStrategyFetcher(o.samplingServerURL, o.logger)
}
if o.samplingParser == nil {
o.samplingParser = new(samplingStrategyParser)
}
if o.updaters == nil {
o.updaters = []SamplerUpdater{
&AdaptiveSamplerUpdater{
MaxOperations: o.posParams.MaxOperations,
OperationNameLateBinding: o.posParams.OperationNameLateBinding,
},
new(ProbabilisticSamplerUpdater),
new(RateLimitingSamplerUpdater),
}
}
return o
}

93
vendor/github.com/uber/jaeger-client-go/sampler_v2.go generated vendored Normal file
View File

@@ -0,0 +1,93 @@
// Copyright (c) 2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
// SamplingDecision is returned by the V2 samplers.
type SamplingDecision struct {
Sample bool
Retryable bool
Tags []Tag
}
// SamplerV2 is an extension of the V1 samplers that allows sampling decisions
// be made at different points of the span lifecycle.
type SamplerV2 interface {
OnCreateSpan(span *Span) SamplingDecision
OnSetOperationName(span *Span, operationName string) SamplingDecision
OnSetTag(span *Span, key string, value interface{}) SamplingDecision
OnFinishSpan(span *Span) SamplingDecision
// Close does a clean shutdown of the sampler, stopping any background
// go-routines it may have started.
Close()
}
// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2.
func samplerV1toV2(s Sampler) SamplerV2 {
if s2, ok := s.(SamplerV2); ok {
return s2
}
type legacySamplerV1toV2Adapter struct {
legacySamplerV1Base
}
return &legacySamplerV1toV2Adapter{
legacySamplerV1Base: legacySamplerV1Base{
delegate: s.IsSampled,
},
}
}
// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods.
// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler
// for backwards compatibility reasons.
// TODO (breaking change) remove this in the next major release
type SamplerV2Base struct{}
// IsSampled implements IsSampled of Sampler.
func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) {
return false, nil
}
// Close implements Close of Sampler.
func (SamplerV2Base) Close() {}
// Equal implements Equal of Sampler.
func (SamplerV2Base) Equal(other Sampler) bool { return false }
// legacySamplerV1Base is used as a base for simple samplers that only implement
// the legacy isSampled() function that is not sensitive to its arguments.
type legacySamplerV1Base struct {
delegate func(id TraceID, operation string) (sampled bool, tags []Tag)
}
func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision {
isSampled, tags := s.delegate(span.context.traceID, span.operationName)
return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
}
func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision {
isSampled, tags := s.delegate(span.context.traceID, span.operationName)
return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
}
func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
return SamplingDecision{Sample: false, Retryable: true}
}
func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision {
return SamplingDecision{Sample: false, Retryable: true}
}
func (s *legacySamplerV1Base) Close() {}

503
vendor/github.com/uber/jaeger-client-go/span.go generated vendored Normal file
View File

@@ -0,0 +1,503 @@
// Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"sync"
"sync/atomic"
"time"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
)
// Span implements opentracing.Span
type Span struct {
// referenceCounter used to increase the lifetime of
// the object before return it into the pool.
referenceCounter int32
sync.RWMutex
tracer *Tracer
// TODO: (breaking change) change to use a pointer
context SpanContext
// The name of the "operation" this span is an instance of.
// Known as a "span name" in some implementations.
operationName string
// firstInProcess, if true, indicates that this span is the root of the (sub)tree
// of spans in the current process. In other words it's true for the root spans,
// and the ingress spans when the process joins another trace.
firstInProcess bool
// startTime is the timestamp indicating when the span began, with microseconds precision.
startTime time.Time
// duration returns duration of the span with microseconds precision.
// Zero value means duration is unknown.
duration time.Duration
// tags attached to this span
tags []Tag
// The span's "micro-log"
logs []opentracing.LogRecord
// The number of logs dropped because of MaxLogsPerSpan.
numDroppedLogs int
// references for this span
references []Reference
observer ContribSpanObserver
}
// Tag is a simple key value wrapper.
// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
type Tag struct {
key string
value interface{}
}
// NewTag creates a new Tag.
// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
func NewTag(key string, value interface{}) Tag {
return Tag{key: key, value: value}
}
// SetOperationName sets or changes the operation name.
func (s *Span) SetOperationName(operationName string) opentracing.Span {
s.Lock()
s.operationName = operationName
ctx := s.context
s.Unlock()
if !ctx.isSamplingFinalized() {
decision := s.tracer.sampler.OnSetOperationName(s, operationName)
s.applySamplingDecision(decision, true)
}
s.observer.OnSetOperationName(operationName)
return s
}
// SetTag implements SetTag() of opentracing.Span
func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
return s.setTagInternal(key, value, true)
}
func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span {
var ctx SpanContext
var operationName string
if lock {
ctx = s.SpanContext()
operationName = s.OperationName()
} else {
ctx = s.context
operationName = s.operationName
}
s.observer.OnSetTag(key, value)
if key == string(ext.SamplingPriority) && !setSamplingPriority(ctx.samplingState, operationName, s.tracer, value) {
return s
}
if !ctx.isSamplingFinalized() {
decision := s.tracer.sampler.OnSetTag(s, key, value)
s.applySamplingDecision(decision, lock)
}
if ctx.isWriteable() {
if lock {
s.Lock()
defer s.Unlock()
}
s.appendTagNoLocking(key, value)
}
return s
}
// SpanContext returns span context
func (s *Span) SpanContext() SpanContext {
s.Lock()
defer s.Unlock()
return s.context
}
// StartTime returns span start time
func (s *Span) StartTime() time.Time {
s.Lock()
defer s.Unlock()
return s.startTime
}
// Duration returns span duration
func (s *Span) Duration() time.Duration {
s.Lock()
defer s.Unlock()
return s.duration
}
// Tags returns tags for span
func (s *Span) Tags() opentracing.Tags {
s.Lock()
defer s.Unlock()
var result = make(opentracing.Tags, len(s.tags))
for _, tag := range s.tags {
result[tag.key] = tag.value
}
return result
}
// Logs returns micro logs for span
func (s *Span) Logs() []opentracing.LogRecord {
s.Lock()
defer s.Unlock()
logs := append([]opentracing.LogRecord(nil), s.logs...)
if s.numDroppedLogs != 0 {
fixLogs(logs, s.numDroppedLogs)
}
return logs
}
// References returns references for this span
func (s *Span) References() []opentracing.SpanReference {
s.Lock()
defer s.Unlock()
if s.references == nil || len(s.references) == 0 {
return nil
}
result := make([]opentracing.SpanReference, len(s.references))
for i, r := range s.references {
result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context}
}
return result
}
func (s *Span) appendTagNoLocking(key string, value interface{}) {
s.tags = append(s.tags, Tag{key: key, value: value})
}
// LogFields implements opentracing.Span API
func (s *Span) LogFields(fields ...log.Field) {
s.Lock()
defer s.Unlock()
if !s.context.IsSampled() {
return
}
s.logFieldsNoLocking(fields...)
}
// this function should only be called while holding a Write lock
func (s *Span) logFieldsNoLocking(fields ...log.Field) {
lr := opentracing.LogRecord{
Fields: fields,
Timestamp: time.Now(),
}
s.appendLogNoLocking(lr)
}
// LogKV implements opentracing.Span API
func (s *Span) LogKV(alternatingKeyValues ...interface{}) {
s.RLock()
sampled := s.context.IsSampled()
s.RUnlock()
if !sampled {
return
}
fields, err := log.InterleavedKVToFields(alternatingKeyValues...)
if err != nil {
s.LogFields(log.Error(err), log.String("function", "LogKV"))
return
}
s.LogFields(fields...)
}
// LogEvent implements opentracing.Span API
func (s *Span) LogEvent(event string) {
s.Log(opentracing.LogData{Event: event})
}
// LogEventWithPayload implements opentracing.Span API
func (s *Span) LogEventWithPayload(event string, payload interface{}) {
s.Log(opentracing.LogData{Event: event, Payload: payload})
}
// Log implements opentracing.Span API
func (s *Span) Log(ld opentracing.LogData) {
s.Lock()
defer s.Unlock()
if s.context.IsSampled() {
if ld.Timestamp.IsZero() {
ld.Timestamp = s.tracer.timeNow()
}
s.appendLogNoLocking(ld.ToLogRecord())
}
}
// this function should only be called while holding a Write lock
func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) {
maxLogs := s.tracer.options.maxLogsPerSpan
if maxLogs == 0 || len(s.logs) < maxLogs {
s.logs = append(s.logs, lr)
return
}
// We have too many logs. We don't touch the first numOld logs; we treat the
// rest as a circular buffer and overwrite the oldest log among those.
numOld := (maxLogs - 1) / 2
numNew := maxLogs - numOld
s.logs[numOld+s.numDroppedLogs%numNew] = lr
s.numDroppedLogs++
}
// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
// the end (i.e. pos circular left shifts).
func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
// This algorithm is described in:
// http://www.cplusplus.com/reference/algorithm/rotate
for first, middle, next := 0, pos, pos; first != middle; {
buf[first], buf[next] = buf[next], buf[first]
first++
next++
if next == len(buf) {
next = middle
} else if first == middle {
middle = next
}
}
}
func fixLogs(logs []opentracing.LogRecord, numDroppedLogs int) {
// We dropped some log events, which means that we used part of Logs as a
// circular buffer (see appendLog). De-circularize it.
numOld := (len(logs) - 1) / 2
numNew := len(logs) - numOld
rotateLogBuffer(logs[numOld:], numDroppedLogs%numNew)
// Replace the log in the middle (the oldest "new" log) with information
// about the dropped logs. This means that we are effectively dropping one
// more "new" log.
numDropped := numDroppedLogs + 1
logs[numOld] = opentracing.LogRecord{
// Keep the timestamp of the last dropped event.
Timestamp: logs[numOld].Timestamp,
Fields: []log.Field{
log.String("event", "dropped Span logs"),
log.Int("dropped_log_count", numDropped),
log.String("component", "jaeger-client"),
},
}
}
func (s *Span) fixLogsIfDropped() {
if s.numDroppedLogs == 0 {
return
}
fixLogs(s.logs, s.numDroppedLogs)
s.numDroppedLogs = 0
}
// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext.
// The call is proxied via tracer.baggageSetter to allow policies to be applied
// before allowing to set/replace baggage keys.
// The setter eventually stores a new SpanContext with extended baggage:
//
// span.context = span.context.WithBaggageItem(key, value)
//
// See SpanContext.WithBaggageItem() for explanation why it's done this way.
func (s *Span) SetBaggageItem(key, value string) opentracing.Span {
s.Lock()
defer s.Unlock()
s.tracer.setBaggage(s, key, value)
return s
}
// BaggageItem implements BaggageItem() of opentracing.SpanContext
func (s *Span) BaggageItem(key string) string {
s.RLock()
defer s.RUnlock()
return s.context.baggage[key]
}
// Finish implements opentracing.Span API
// After finishing the Span object it returns back to the allocator unless the reporter retains it again,
// so after that, the Span object should no longer be used because it won't be valid anymore.
func (s *Span) Finish() {
s.FinishWithOptions(opentracing.FinishOptions{})
}
// FinishWithOptions implements opentracing.Span API
func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
if options.FinishTime.IsZero() {
options.FinishTime = s.tracer.timeNow()
}
s.observer.OnFinish(options)
s.Lock()
s.duration = options.FinishTime.Sub(s.startTime)
ctx := s.context
s.Unlock()
if !ctx.isSamplingFinalized() {
decision := s.tracer.sampler.OnFinishSpan(s)
s.applySamplingDecision(decision, true)
}
if ctx.IsSampled() {
s.Lock()
s.fixLogsIfDropped()
if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 {
// Note: bulk logs are not subject to maxLogsPerSpan limit
if options.LogRecords != nil {
s.logs = append(s.logs, options.LogRecords...)
}
for _, ld := range options.BulkLogData {
s.logs = append(s.logs, ld.ToLogRecord())
}
}
s.Unlock()
}
// call reportSpan even for non-sampled traces, to return span to the pool
// and update metrics counter
s.tracer.reportSpan(s)
}
// Context implements opentracing.Span API
func (s *Span) Context() opentracing.SpanContext {
s.Lock()
defer s.Unlock()
return s.context
}
// Tracer implements opentracing.Span API
func (s *Span) Tracer() opentracing.Tracer {
return s.tracer
}
func (s *Span) String() string {
s.RLock()
defer s.RUnlock()
return s.context.String()
}
// OperationName allows retrieving current operation name.
func (s *Span) OperationName() string {
s.RLock()
defer s.RUnlock()
return s.operationName
}
// Retain increases object counter to increase the lifetime of the object
func (s *Span) Retain() *Span {
atomic.AddInt32(&s.referenceCounter, 1)
return s
}
// Release decrements object counter and return to the
// allocator manager when counter will below zero
func (s *Span) Release() {
if atomic.AddInt32(&s.referenceCounter, -1) == -1 {
s.tracer.spanAllocator.Put(s)
}
}
// reset span state and release unused data
func (s *Span) reset() {
s.firstInProcess = false
s.context = emptyContext
s.operationName = ""
s.tracer = nil
s.startTime = time.Time{}
s.duration = 0
s.observer = nil
atomic.StoreInt32(&s.referenceCounter, 0)
// Note: To reuse memory we can save the pointers on the heap
s.tags = s.tags[:0]
s.logs = s.logs[:0]
s.numDroppedLogs = 0
s.references = s.references[:0]
}
func (s *Span) serviceName() string {
return s.tracer.serviceName
}
func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) {
var ctx SpanContext
if lock {
ctx = s.SpanContext()
} else {
ctx = s.context
}
if !decision.Retryable {
ctx.samplingState.setFinal()
}
if decision.Sample {
ctx.samplingState.setSampled()
if len(decision.Tags) > 0 {
if lock {
s.Lock()
defer s.Unlock()
}
for _, tag := range decision.Tags {
s.appendTagNoLocking(tag.key, tag.value)
}
}
}
}
// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
// The behavior of setSamplingPriority is surprising
// If noDebugFlagOnForcedSampling is set
// setSamplingPriority(..., 1) always sets only flagSampled
// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes
// setSamplingPriority(..., 1) sets both flagSampled and flagDebug
// However,
// setSamplingPriority(..., 0) always only resets flagSampled
//
// This means that doing a setSamplingPriority(..., 1) followed by setSamplingPriority(..., 0) can
// leave flagDebug set
func setSamplingPriority(state *samplingState, operationName string, tracer *Tracer, value interface{}) bool {
val, ok := value.(uint16)
if !ok {
return false
}
if val == 0 {
state.unsetSampled()
state.setFinal()
return true
}
if tracer.options.noDebugFlagOnForcedSampling {
state.setSampled()
state.setFinal()
return true
} else if tracer.isDebugAllowed(operationName) {
state.setDebugAndSampled()
state.setFinal()
return true
}
return false
}
// EnableFirehose enables firehose flag on the span context
func EnableFirehose(s *Span) {
s.Lock()
defer s.Unlock()
s.context.samplingState.setFirehose()
}

View File

@@ -0,0 +1,56 @@
// Copyright (c) 2019 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import "sync"
// SpanAllocator abstraction of managing span allocations
type SpanAllocator interface {
Get() *Span
Put(*Span)
}
type syncPollSpanAllocator struct {
spanPool sync.Pool
}
func newSyncPollSpanAllocator() SpanAllocator {
return &syncPollSpanAllocator{
spanPool: sync.Pool{New: func() interface{} {
return &Span{}
}},
}
}
func (pool *syncPollSpanAllocator) Get() *Span {
return pool.spanPool.Get().(*Span)
}
func (pool *syncPollSpanAllocator) Put(span *Span) {
span.reset()
pool.spanPool.Put(span)
}
type simpleSpanAllocator struct{}
func (pool simpleSpanAllocator) Get() *Span {
return &Span{}
}
func (pool simpleSpanAllocator) Put(span *Span) {
// @comment https://github.com/jaegertracing/jaeger-client-go/pull/381#issuecomment-475904351
// since finished spans are not reused, no need to reset them
// span.reset()
}

418
vendor/github.com/uber/jaeger-client-go/span_context.go generated vendored Normal file
View File

@@ -0,0 +1,418 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"errors"
"fmt"
"strconv"
"strings"
"sync"
"go.uber.org/atomic"
)
const (
flagSampled = 1
flagDebug = 2
flagFirehose = 8
)
var (
errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state")
errMalformedTracerStateString = errors.New("String does not match tracer state format")
emptyContext = SpanContext{}
)
// TraceID represents unique 128bit identifier of a trace
type TraceID struct {
High, Low uint64
}
// SpanID represents unique 64bit identifier of a span
type SpanID uint64
// SpanContext represents propagated span identity and state
type SpanContext struct {
// traceID represents globally unique ID of the trace.
// Usually generated as a random number.
traceID TraceID
// spanID represents span ID that must be unique within its trace,
// but does not have to be globally unique.
spanID SpanID
// parentID refers to the ID of the parent span.
// Should be 0 if the current span is a root span.
parentID SpanID
// Distributed Context baggage. The is a snapshot in time.
baggage map[string]string
// debugID can be set to some correlation ID when the context is being
// extracted from a TextMap carrier.
//
// See JaegerDebugHeader in constants.go
debugID string
// samplingState is shared across all spans
samplingState *samplingState
// remote indicates that span context represents a remote parent
remote bool
}
type samplingState struct {
// Span context's state flags that are propagated across processes. Only lower 8 bits are used.
// We use an int32 instead of byte to be able to use CAS operations.
stateFlags atomic.Int32
// When state is not final, sampling will be retried on other span write operations,
// like SetOperationName / SetTag, and the spans will remain writable.
final atomic.Bool
// localRootSpan stores the SpanID of the first span created in this process for a given trace.
localRootSpan SpanID
// extendedState allows samplers to keep intermediate state.
// The keys and values in this map are completely opaque: interface{} -> interface{}.
extendedState sync.Map
}
func (s *samplingState) isLocalRootSpan(id SpanID) bool {
return id == s.localRootSpan
}
func (s *samplingState) setFlag(newFlag int32) {
swapped := false
for !swapped {
old := s.stateFlags.Load()
swapped = s.stateFlags.CAS(old, old|newFlag)
}
}
func (s *samplingState) unsetFlag(newFlag int32) {
swapped := false
for !swapped {
old := s.stateFlags.Load()
swapped = s.stateFlags.CAS(old, old&^newFlag)
}
}
func (s *samplingState) setSampled() {
s.setFlag(flagSampled)
}
func (s *samplingState) unsetSampled() {
s.unsetFlag(flagSampled)
}
func (s *samplingState) setDebugAndSampled() {
s.setFlag(flagDebug | flagSampled)
}
func (s *samplingState) setFirehose() {
s.setFlag(flagFirehose)
}
func (s *samplingState) setFlags(flags byte) {
s.stateFlags.Store(int32(flags))
}
func (s *samplingState) setFinal() {
s.final.Store(true)
}
func (s *samplingState) flags() byte {
return byte(s.stateFlags.Load())
}
func (s *samplingState) isSampled() bool {
return s.stateFlags.Load()&flagSampled == flagSampled
}
func (s *samplingState) isDebug() bool {
return s.stateFlags.Load()&flagDebug == flagDebug
}
func (s *samplingState) isFirehose() bool {
return s.stateFlags.Load()&flagFirehose == flagFirehose
}
func (s *samplingState) isFinal() bool {
return s.final.Load()
}
func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} {
if value, ok := s.extendedState.Load(key); ok {
return value
}
value := initValue()
value, _ = s.extendedState.LoadOrStore(key, value)
return value
}
// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
for k, v := range c.baggage {
if !handler(k, v) {
break
}
}
}
// IsSampled returns whether this trace was chosen for permanent storage
// by the sampling mechanism of the tracer.
func (c SpanContext) IsSampled() bool {
return c.samplingState.isSampled()
}
// IsDebug indicates whether sampling was explicitly requested by the service.
func (c SpanContext) IsDebug() bool {
return c.samplingState.isDebug()
}
// IsSamplingFinalized indicates whether the sampling decision has been finalized.
func (c SpanContext) IsSamplingFinalized() bool {
return c.samplingState.isFinal()
}
// IsFirehose indicates whether the firehose flag was set
func (c SpanContext) IsFirehose() bool {
return c.samplingState.isFirehose()
}
// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist,
// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler).
func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} {
return c.samplingState.extendedStateForKey(key, initValue)
}
// IsValid indicates whether this context actually represents a valid trace.
func (c SpanContext) IsValid() bool {
return c.traceID.IsValid() && c.spanID != 0
}
// SetFirehose enables firehose mode for this trace.
func (c SpanContext) SetFirehose() {
c.samplingState.setFirehose()
}
func (c SpanContext) String() string {
var flags int32
if c.samplingState != nil {
flags = c.samplingState.stateFlags.Load()
}
if c.traceID.High == 0 {
return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
}
return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
}
// ContextFromString reconstructs the Context encoded in a string
func ContextFromString(value string) (SpanContext, error) {
var context SpanContext
if value == "" {
return emptyContext, errEmptyTracerStateString
}
parts := strings.Split(value, ":")
if len(parts) != 4 {
return emptyContext, errMalformedTracerStateString
}
var err error
if context.traceID, err = TraceIDFromString(parts[0]); err != nil {
return emptyContext, err
}
if context.spanID, err = SpanIDFromString(parts[1]); err != nil {
return emptyContext, err
}
if context.parentID, err = SpanIDFromString(parts[2]); err != nil {
return emptyContext, err
}
flags, err := strconv.ParseUint(parts[3], 10, 8)
if err != nil {
return emptyContext, err
}
context.samplingState = &samplingState{}
context.samplingState.setFlags(byte(flags))
return context, nil
}
// TraceID returns the trace ID of this span context
func (c SpanContext) TraceID() TraceID {
return c.traceID
}
// SpanID returns the span ID of this span context
func (c SpanContext) SpanID() SpanID {
return c.spanID
}
// ParentID returns the parent span ID of this span context
func (c SpanContext) ParentID() SpanID {
return c.parentID
}
// Flags returns the bitmap containing such bits as 'sampled' and 'debug'.
func (c SpanContext) Flags() byte {
return c.samplingState.flags()
}
// Span can be written to if it is sampled or the sampling decision has not been finalized.
func (c SpanContext) isWriteable() bool {
state := c.samplingState
return !state.isFinal() || state.isSampled()
}
func (c SpanContext) isSamplingFinalized() bool {
return c.samplingState.isFinal()
}
// NewSpanContext creates a new instance of SpanContext
func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
samplingState := &samplingState{}
if sampled {
samplingState.setSampled()
}
return SpanContext{
traceID: traceID,
spanID: spanID,
parentID: parentID,
samplingState: samplingState,
baggage: baggage}
}
// CopyFrom copies data from ctx into this context, including span identity and baggage.
// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing.
func (c *SpanContext) CopyFrom(ctx *SpanContext) {
c.traceID = ctx.traceID
c.spanID = ctx.spanID
c.parentID = ctx.parentID
c.samplingState = ctx.samplingState
if l := len(ctx.baggage); l > 0 {
c.baggage = make(map[string]string, l)
for k, v := range ctx.baggage {
c.baggage[k] = v
}
} else {
c.baggage = nil
}
}
// WithBaggageItem creates a new context with an extra baggage item.
// Delete a baggage item if provided blank value.
//
// The SpanContext is designed to be immutable and passed by value. As such,
// it cannot contain any locks, and should only hold immutable data, including baggage.
// Another reason for why baggage is immutable is when the span context is passed
// as a parent when starting a new span. The new span's baggage cannot affect the parent
// span's baggage, so the child span either needs to take a copy of the parent baggage
// (which is expensive and unnecessary since baggage rarely changes in the life span of
// a trace), or it needs to do a copy-on-write, which is the approach taken here.
func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
var newBaggage map[string]string
// unset baggage item
if value == "" {
if _, ok := c.baggage[key]; !ok {
return c
}
newBaggage = make(map[string]string, len(c.baggage))
for k, v := range c.baggage {
newBaggage[k] = v
}
delete(newBaggage, key)
return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
}
if c.baggage == nil {
newBaggage = map[string]string{key: value}
} else {
newBaggage = make(map[string]string, len(c.baggage)+1)
for k, v := range c.baggage {
newBaggage[k] = v
}
newBaggage[key] = value
}
// Use positional parameters so the compiler will help catch new fields.
return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
}
// isDebugIDContainerOnly returns true when the instance of the context is only
// used to return the debug/correlation ID from extract() method. This happens
// in the situation when "jaeger-debug-id" header is passed in the carrier to
// the extract() method, but the request otherwise has no span context in it.
// Previously this would've returned opentracing.ErrSpanContextNotFound from the
// extract method, but now it returns a dummy context with only debugID filled in.
//
// See JaegerDebugHeader in constants.go
// See TextMapPropagator#Extract
func (c *SpanContext) isDebugIDContainerOnly() bool {
return !c.traceID.IsValid() && c.debugID != ""
}
// ------- TraceID -------
func (t TraceID) String() string {
if t.High == 0 {
return fmt.Sprintf("%016x", t.Low)
}
return fmt.Sprintf("%016x%016x", t.High, t.Low)
}
// TraceIDFromString creates a TraceID from a hexadecimal string
func TraceIDFromString(s string) (TraceID, error) {
var hi, lo uint64
var err error
if len(s) > 32 {
return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
} else if len(s) > 16 {
hiLen := len(s) - 16
if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
return TraceID{}, err
}
if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
return TraceID{}, err
}
} else {
if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
return TraceID{}, err
}
}
return TraceID{High: hi, Low: lo}, nil
}
// IsValid checks if the trace ID is valid, i.e. not zero.
func (t TraceID) IsValid() bool {
return t.High != 0 || t.Low != 0
}
// ------- SpanID -------
func (s SpanID) String() string {
return fmt.Sprintf("%016x", uint64(s))
}
// SpanIDFromString creates a SpanID from a hexadecimal string
func SpanIDFromString(s string) (SpanID, error) {
if len(s) > 16 {
return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s)
}
id, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return SpanID(0), err
}
return SpanID(id), nil
}

View File

@@ -0,0 +1,6 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package agent
var GoUnusedProtection__ int;

View File

@@ -0,0 +1,28 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package agent
import(
"bytes"
"context"
"fmt"
"time"
"github.com/uber/jaeger-client-go/thrift"
"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = time.Now
var _ = bytes.Equal
var _ = jaeger.GoUnusedProtection__
var _ = zipkincore.GoUnusedProtection__
func init() {
}

View File

@@ -0,0 +1,396 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package agent
import(
"bytes"
"context"
"fmt"
"time"
"github.com/uber/jaeger-client-go/thrift"
"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = time.Now
var _ = bytes.Equal
var _ = jaeger.GoUnusedProtection__
var _ = zipkincore.GoUnusedProtection__
type Agent interface {
// Parameters:
// - Spans
EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error)
// Parameters:
// - Batch
EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error)
}
type AgentClient struct {
c thrift.TClient
meta thrift.ResponseMeta
}
func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
return &AgentClient{
c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
}
}
func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
return &AgentClient{
c: thrift.NewTStandardClient(iprot, oprot),
}
}
func NewAgentClient(c thrift.TClient) *AgentClient {
return &AgentClient{
c: c,
}
}
func (p *AgentClient) Client_() thrift.TClient {
return p.c
}
func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta {
return p.meta
}
func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
p.meta = meta
}
// Parameters:
// - Spans
func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) {
var _args0 AgentEmitZipkinBatchArgs
_args0.Spans = spans
p.SetLastResponseMeta_(thrift.ResponseMeta{})
if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil {
return err
}
return nil
}
// Parameters:
// - Batch
func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) {
var _args1 AgentEmitBatchArgs
_args1.Batch = batch
p.SetLastResponseMeta_(thrift.ResponseMeta{})
if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil {
return err
}
return nil
}
type AgentProcessor struct {
processorMap map[string]thrift.TProcessorFunction
handler Agent
}
func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
p.processorMap[key] = processor
}
func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
processor, ok = p.processorMap[key]
return processor, ok
}
func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
return p.processorMap
}
func NewAgentProcessor(handler Agent) *AgentProcessor {
self2 := &AgentProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler:handler}
self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler:handler}
return self2
}
func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
if err2 != nil { return false, thrift.WrapTException(err2) }
if processor, ok := p.GetProcessorFunction(name); ok {
return processor.Process(ctx, seqId, iprot, oprot)
}
iprot.Skip(ctx, thrift.STRUCT)
iprot.ReadMessageEnd(ctx)
x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
x3.Write(ctx, oprot)
oprot.WriteMessageEnd(ctx)
oprot.Flush(ctx)
return false, x3
}
type agentProcessorEmitZipkinBatch struct {
handler Agent
}
func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := AgentEmitZipkinBatchArgs{}
var err2 error
if err2 = args.Read(ctx, iprot); err2 != nil {
iprot.ReadMessageEnd(ctx)
return false, thrift.WrapTException(err2)
}
iprot.ReadMessageEnd(ctx)
tickerCancel := func() {}
_ = tickerCancel
if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil {
tickerCancel()
return true, thrift.WrapTException(err2)
}
tickerCancel()
return true, nil
}
type agentProcessorEmitBatch struct {
handler Agent
}
func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := AgentEmitBatchArgs{}
var err2 error
if err2 = args.Read(ctx, iprot); err2 != nil {
iprot.ReadMessageEnd(ctx)
return false, thrift.WrapTException(err2)
}
iprot.ReadMessageEnd(ctx)
tickerCancel := func() {}
_ = tickerCancel
if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil {
tickerCancel()
return true, thrift.WrapTException(err2)
}
tickerCancel()
return true, nil
}
// HELPER FUNCTIONS AND STRUCTURES
// Attributes:
// - Spans
type AgentEmitZipkinBatchArgs struct {
Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"`
}
func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
return &AgentEmitZipkinBatchArgs{}
}
func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
return p.Spans
}
func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP { break; }
switch fieldId {
case 1:
if fieldTypeId == thrift.LIST {
if err := p.ReadField1(ctx, iprot); err != nil {
return err
}
} else {
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
default:
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(ctx); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin(ctx)
if err != nil {
return thrift.PrependError("error reading list begin: ", err)
}
tSlice := make([]*zipkincore.Span, 0, size)
p.Spans = tSlice
for i := 0; i < size; i ++ {
_elem4 := &zipkincore.Span{}
if err := _elem4.Read(ctx, iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
}
p.Spans = append(p.Spans, _elem4)
}
if err := iprot.ReadListEnd(ctx); err != nil {
return thrift.PrependError("error reading list end: ", err)
}
return nil
}
func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
if p != nil {
if err := p.writeField1(ctx, oprot); err != nil { return err }
}
if err := oprot.WriteFieldStop(ctx); err != nil {
return thrift.PrependError("write field stop error: ", err) }
if err := oprot.WriteStructEnd(ctx); err != nil {
return thrift.PrependError("write struct stop error: ", err) }
return nil
}
func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) }
if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
return thrift.PrependError("error writing list begin: ", err)
}
for _, v := range p.Spans {
if err := v.Write(ctx, oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
}
}
if err := oprot.WriteListEnd(ctx); err != nil {
return thrift.PrependError("error writing list end: ", err)
}
if err := oprot.WriteFieldEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) }
return err
}
func (p *AgentEmitZipkinBatchArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
}
// Attributes:
// - Batch
type AgentEmitBatchArgs struct {
Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"`
}
func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
return &AgentEmitBatchArgs{}
}
var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
if !p.IsSetBatch() {
return AgentEmitBatchArgs_Batch_DEFAULT
}
return p.Batch
}
func (p *AgentEmitBatchArgs) IsSetBatch() bool {
return p.Batch != nil
}
func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP { break; }
switch fieldId {
case 1:
if fieldTypeId == thrift.STRUCT {
if err := p.ReadField1(ctx, iprot); err != nil {
return err
}
} else {
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
default:
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(ctx); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
p.Batch = &jaeger.Batch{}
if err := p.Batch.Read(ctx, iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
}
return nil
}
func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
if p != nil {
if err := p.writeField1(ctx, oprot); err != nil { return err }
}
if err := oprot.WriteFieldStop(ctx); err != nil {
return thrift.PrependError("write field stop error: ", err) }
if err := oprot.WriteStructEnd(ctx); err != nil {
return thrift.PrependError("write struct stop error: ", err) }
return nil
}
func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) }
if err := p.Batch.Write(ctx, oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
}
if err := oprot.WriteFieldEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) }
return err
}
func (p *AgentEmitBatchArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
}

View File

@@ -0,0 +1,6 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package baggage
var GoUnusedProtection__ int;

View File

@@ -0,0 +1,23 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package baggage
import(
"bytes"
"context"
"fmt"
"time"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = time.Now
var _ = bytes.Equal
func init() {
}

View File

@@ -0,0 +1,565 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package baggage
import(
"bytes"
"context"
"fmt"
"time"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = time.Now
var _ = bytes.Equal
// Attributes:
// - BaggageKey
// - MaxValueLength
type BaggageRestriction struct {
BaggageKey string `thrift:"baggageKey,1,required" db:"baggageKey" json:"baggageKey"`
MaxValueLength int32 `thrift:"maxValueLength,2,required" db:"maxValueLength" json:"maxValueLength"`
}
func NewBaggageRestriction() *BaggageRestriction {
return &BaggageRestriction{}
}
func (p *BaggageRestriction) GetBaggageKey() string {
return p.BaggageKey
}
func (p *BaggageRestriction) GetMaxValueLength() int32 {
return p.MaxValueLength
}
func (p *BaggageRestriction) Read(ctx context.Context, iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
var issetBaggageKey bool = false;
var issetMaxValueLength bool = false;
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP { break; }
switch fieldId {
case 1:
if fieldTypeId == thrift.STRING {
if err := p.ReadField1(ctx, iprot); err != nil {
return err
}
issetBaggageKey = true
} else {
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
case 2:
if fieldTypeId == thrift.I32 {
if err := p.ReadField2(ctx, iprot); err != nil {
return err
}
issetMaxValueLength = true
} else {
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
default:
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(ctx); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
if !issetBaggageKey{
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set"));
}
if !issetMaxValueLength{
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set"));
}
return nil
}
func (p *BaggageRestriction) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
if v, err := iprot.ReadString(ctx); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
p.BaggageKey = v
}
return nil
}
func (p *BaggageRestriction) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
if v, err := iprot.ReadI32(ctx); err != nil {
return thrift.PrependError("error reading field 2: ", err)
} else {
p.MaxValueLength = v
}
return nil
}
func (p *BaggageRestriction) Write(ctx context.Context, oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin(ctx, "BaggageRestriction"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
if p != nil {
if err := p.writeField1(ctx, oprot); err != nil { return err }
if err := p.writeField2(ctx, oprot); err != nil { return err }
}
if err := oprot.WriteFieldStop(ctx); err != nil {
return thrift.PrependError("write field stop error: ", err) }
if err := oprot.WriteStructEnd(ctx); err != nil {
return thrift.PrependError("write struct stop error: ", err) }
return nil
}
func (p *BaggageRestriction) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin(ctx, "baggageKey", thrift.STRING, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) }
if err := oprot.WriteString(ctx, string(p.BaggageKey)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) }
if err := oprot.WriteFieldEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) }
return err
}
func (p *BaggageRestriction) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin(ctx, "maxValueLength", thrift.I32, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) }
if err := oprot.WriteI32(ctx, int32(p.MaxValueLength)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) }
if err := oprot.WriteFieldEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) }
return err
}
func (p *BaggageRestriction) Equals(other *BaggageRestriction) bool {
if p == other {
return true
} else if p == nil || other == nil {
return false
}
if p.BaggageKey != other.BaggageKey { return false }
if p.MaxValueLength != other.MaxValueLength { return false }
return true
}
func (p *BaggageRestriction) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("BaggageRestriction(%+v)", *p)
}
type BaggageRestrictionManager interface {
// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
// Usually, baggageRestrictions apply to all services however there may be situations
// where a baggageKey might only be allowed to be set by a specific service.
//
// Parameters:
// - ServiceName
GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error)
}
type BaggageRestrictionManagerClient struct {
c thrift.TClient
meta thrift.ResponseMeta
}
func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient {
return &BaggageRestrictionManagerClient{
c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
}
}
func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient {
return &BaggageRestrictionManagerClient{
c: thrift.NewTStandardClient(iprot, oprot),
}
}
func NewBaggageRestrictionManagerClient(c thrift.TClient) *BaggageRestrictionManagerClient {
return &BaggageRestrictionManagerClient{
c: c,
}
}
func (p *BaggageRestrictionManagerClient) Client_() thrift.TClient {
return p.c
}
func (p *BaggageRestrictionManagerClient) LastResponseMeta_() thrift.ResponseMeta {
return p.meta
}
func (p *BaggageRestrictionManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
p.meta = meta
}
// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
// Usually, baggageRestrictions apply to all services however there may be situations
// where a baggageKey might only be allowed to be set by a specific service.
//
// Parameters:
// - ServiceName
func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error) {
var _args0 BaggageRestrictionManagerGetBaggageRestrictionsArgs
_args0.ServiceName = serviceName
var _result2 BaggageRestrictionManagerGetBaggageRestrictionsResult
var _meta1 thrift.ResponseMeta
_meta1, _err = p.Client_().Call(ctx, "getBaggageRestrictions", &_args0, &_result2)
p.SetLastResponseMeta_(_meta1)
if _err != nil {
return
}
return _result2.GetSuccess(), nil
}
type BaggageRestrictionManagerProcessor struct {
processorMap map[string]thrift.TProcessorFunction
handler BaggageRestrictionManager
}
func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
p.processorMap[key] = processor
}
func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
processor, ok = p.processorMap[key]
return processor, ok
}
func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
return p.processorMap
}
func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor {
self3 := &BaggageRestrictionManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
self3.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler:handler}
return self3
}
func (p *BaggageRestrictionManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
if err2 != nil { return false, thrift.WrapTException(err2) }
if processor, ok := p.GetProcessorFunction(name); ok {
return processor.Process(ctx, seqId, iprot, oprot)
}
iprot.Skip(ctx, thrift.STRUCT)
iprot.ReadMessageEnd(ctx)
x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
x4.Write(ctx, oprot)
oprot.WriteMessageEnd(ctx)
oprot.Flush(ctx)
return false, x4
}
type baggageRestrictionManagerProcessorGetBaggageRestrictions struct {
handler BaggageRestrictionManager
}
func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
var err2 error
if err2 = args.Read(ctx, iprot); err2 != nil {
iprot.ReadMessageEnd(ctx)
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId)
x.Write(ctx, oprot)
oprot.WriteMessageEnd(ctx)
oprot.Flush(ctx)
return false, thrift.WrapTException(err2)
}
iprot.ReadMessageEnd(ctx)
tickerCancel := func() {}
// Start a goroutine to do server side connectivity check.
if thrift.ServerConnectivityCheckInterval > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
defer cancel()
var tickerCtx context.Context
tickerCtx, tickerCancel = context.WithCancel(context.Background())
defer tickerCancel()
go func(ctx context.Context, cancel context.CancelFunc) {
ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if !iprot.Transport().IsOpen() {
cancel()
return
}
}
}
}(tickerCtx, cancel)
}
result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
var retval []*BaggageRestriction
if retval, err2 = p.handler.GetBaggageRestrictions(ctx, args.ServiceName); err2 != nil {
tickerCancel()
if err2 == thrift.ErrAbandonRequest {
return false, thrift.WrapTException(err2)
}
x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: " + err2.Error())
oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId)
x.Write(ctx, oprot)
oprot.WriteMessageEnd(ctx)
oprot.Flush(ctx)
return true, thrift.WrapTException(err2)
} else {
result.Success = retval
}
tickerCancel()
if err2 = oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil {
err = thrift.WrapTException(err2)
}
if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
err = thrift.WrapTException(err2)
}
if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
err = thrift.WrapTException(err2)
}
if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
err = thrift.WrapTException(err2)
}
if err != nil {
return
}
return true, err
}
// HELPER FUNCTIONS AND STRUCTURES
// Attributes:
// - ServiceName
type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct {
ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"`
}
func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs {
return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string {
return p.ServiceName
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP { break; }
switch fieldId {
case 1:
if fieldTypeId == thrift.STRING {
if err := p.ReadField1(ctx, iprot); err != nil {
return err
}
} else {
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
default:
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(ctx); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
if v, err := iprot.ReadString(ctx); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
p.ServiceName = v
}
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
if p != nil {
if err := p.writeField1(ctx, oprot); err != nil { return err }
}
if err := oprot.WriteFieldStop(ctx); err != nil {
return thrift.PrependError("write field stop error: ", err) }
if err := oprot.WriteStructEnd(ctx); err != nil {
return thrift.PrependError("write struct stop error: ", err) }
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
if err := oprot.WriteFieldEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
return err
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p)
}
// Attributes:
// - Success
type BaggageRestrictionManagerGetBaggageRestrictionsResult struct {
Success []*BaggageRestriction `thrift:"success,0" db:"success" json:"success,omitempty"`
}
func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult {
return &BaggageRestrictionManagerGetBaggageRestrictionsResult{}
}
var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction {
return p.Success
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool {
return p.Success != nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP { break; }
switch fieldId {
case 0:
if fieldTypeId == thrift.LIST {
if err := p.ReadField0(ctx, iprot); err != nil {
return err
}
} else {
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
default:
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(ctx); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin(ctx)
if err != nil {
return thrift.PrependError("error reading list begin: ", err)
}
tSlice := make([]*BaggageRestriction, 0, size)
p.Success = tSlice
for i := 0; i < size; i ++ {
_elem5 := &BaggageRestriction{}
if err := _elem5.Read(ctx, iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err)
}
p.Success = append(p.Success, _elem5)
}
if err := iprot.ReadListEnd(ctx); err != nil {
return thrift.PrependError("error reading list end: ", err)
}
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
if p != nil {
if err := p.writeField0(ctx, oprot); err != nil { return err }
}
if err := oprot.WriteFieldStop(ctx); err != nil {
return thrift.PrependError("write field stop error: ", err) }
if err := oprot.WriteStructEnd(ctx); err != nil {
return thrift.PrependError("write struct stop error: ", err) }
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
return thrift.PrependError("error writing list begin: ", err)
}
for _, v := range p.Success {
if err := v.Write(ctx, oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
}
}
if err := oprot.WriteListEnd(ctx); err != nil {
return thrift.PrependError("error writing list end: ", err)
}
if err := oprot.WriteFieldEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
}
return err
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p)
}

View File

@@ -0,0 +1,6 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package jaeger
var GoUnusedProtection__ int;

View File

@@ -0,0 +1,23 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package jaeger
import(
"bytes"
"context"
"fmt"
"time"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = time.Now
var _ = bytes.Equal
func init() {
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package sampling
var GoUnusedProtection__ int;

View File

@@ -0,0 +1,23 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package sampling
import(
"bytes"
"context"
"fmt"
"time"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = time.Now
var _ = bytes.Equal
func init() {
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package zipkincore
var GoUnusedProtection__ int;

View File

@@ -0,0 +1,39 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package zipkincore
import(
"bytes"
"context"
"fmt"
"time"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = time.Now
var _ = bytes.Equal
const CLIENT_SEND = "cs"
const CLIENT_RECV = "cr"
const SERVER_SEND = "ss"
const SERVER_RECV = "sr"
const MESSAGE_SEND = "ms"
const MESSAGE_RECV = "mr"
const WIRE_SEND = "ws"
const WIRE_RECV = "wr"
const CLIENT_SEND_FRAGMENT = "csf"
const CLIENT_RECV_FRAGMENT = "crf"
const SERVER_SEND_FRAGMENT = "ssf"
const SERVER_RECV_FRAGMENT = "srf"
const LOCAL_COMPONENT = "lc"
const CLIENT_ADDR = "ca"
const SERVER_ADDR = "sa"
const MESSAGE_ADDR = "ma"
func init() {
}

File diff suppressed because it is too large Load Diff

View File

View File

@@ -0,0 +1,11 @@
# Apache Thrift
This is a partial copy of Apache Thrift v0.14.1 (https://github.com/apache/thrift/commit/f6fa1794539e68ac294038ac388d6bde40a6c237).
It is vendored code to avoid compatibility issues with Thrift versions.
The file logger.go is modified to remove dependency on "testing" (see Issue #585).
See:
* https://github.com/jaegertracing/jaeger-client-go/pull/584
* https://github.com/jaegertracing/jaeger-client-go/pull/303

View File

@@ -0,0 +1,180 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
)
const (
UNKNOWN_APPLICATION_EXCEPTION = 0
UNKNOWN_METHOD = 1
INVALID_MESSAGE_TYPE_EXCEPTION = 2
WRONG_METHOD_NAME = 3
BAD_SEQUENCE_ID = 4
MISSING_RESULT = 5
INTERNAL_ERROR = 6
PROTOCOL_ERROR = 7
INVALID_TRANSFORM = 8
INVALID_PROTOCOL = 9
UNSUPPORTED_CLIENT_TYPE = 10
)
var defaultApplicationExceptionMessage = map[int32]string{
UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception",
UNKNOWN_METHOD: "unknown method",
INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
WRONG_METHOD_NAME: "wrong method name",
BAD_SEQUENCE_ID: "bad sequence ID",
MISSING_RESULT: "missing result",
INTERNAL_ERROR: "unknown internal error",
PROTOCOL_ERROR: "unknown protocol error",
INVALID_TRANSFORM: "Invalid transform",
INVALID_PROTOCOL: "Invalid protocol",
UNSUPPORTED_CLIENT_TYPE: "Unsupported client type",
}
// Application level Thrift exception
type TApplicationException interface {
TException
TypeId() int32
Read(ctx context.Context, iprot TProtocol) error
Write(ctx context.Context, oprot TProtocol) error
}
type tApplicationException struct {
message string
type_ int32
}
var _ TApplicationException = (*tApplicationException)(nil)
func (tApplicationException) TExceptionType() TExceptionType {
return TExceptionTypeApplication
}
func (e tApplicationException) Error() string {
if e.message != "" {
return e.message
}
return defaultApplicationExceptionMessage[e.type_]
}
func NewTApplicationException(type_ int32, message string) TApplicationException {
return &tApplicationException{message, type_}
}
func (p *tApplicationException) TypeId() int32 {
return p.type_
}
func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error {
// TODO: this should really be generated by the compiler
_, err := iprot.ReadStructBegin(ctx)
if err != nil {
return err
}
message := ""
type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
for {
_, ttype, id, err := iprot.ReadFieldBegin(ctx)
if err != nil {
return err
}
if ttype == STOP {
break
}
switch id {
case 1:
if ttype == STRING {
if message, err = iprot.ReadString(ctx); err != nil {
return err
}
} else {
if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
return err
}
}
case 2:
if ttype == I32 {
if type_, err = iprot.ReadI32(ctx); err != nil {
return err
}
} else {
if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
return err
}
}
default:
if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
return err
}
}
if err = iprot.ReadFieldEnd(ctx); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(ctx); err != nil {
return err
}
p.message = message
p.type_ = type_
return nil
}
func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) {
err = oprot.WriteStructBegin(ctx, "TApplicationException")
if len(p.Error()) > 0 {
err = oprot.WriteFieldBegin(ctx, "message", STRING, 1)
if err != nil {
return
}
err = oprot.WriteString(ctx, p.Error())
if err != nil {
return
}
err = oprot.WriteFieldEnd(ctx)
if err != nil {
return
}
}
err = oprot.WriteFieldBegin(ctx, "type", I32, 2)
if err != nil {
return
}
err = oprot.WriteI32(ctx, p.type_)
if err != nil {
return
}
err = oprot.WriteFieldEnd(ctx)
if err != nil {
return
}
err = oprot.WriteFieldStop(ctx)
if err != nil {
return
}
err = oprot.WriteStructEnd(ctx)
return
}

View File

@@ -0,0 +1,555 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
)
type TBinaryProtocol struct {
trans TRichTransport
origTransport TTransport
cfg *TConfiguration
buffer [64]byte
}
type TBinaryProtocolFactory struct {
cfg *TConfiguration
}
// Deprecated: Use NewTBinaryProtocolConf instead.
func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
return NewTBinaryProtocolConf(t, &TConfiguration{
noPropagation: true,
})
}
// Deprecated: Use NewTBinaryProtocolConf instead.
func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
return NewTBinaryProtocolConf(t, &TConfiguration{
TBinaryStrictRead: &strictRead,
TBinaryStrictWrite: &strictWrite,
noPropagation: true,
})
}
func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol {
PropagateTConfiguration(t, conf)
p := &TBinaryProtocol{
origTransport: t,
cfg: conf,
}
if et, ok := t.(TRichTransport); ok {
p.trans = et
} else {
p.trans = NewTRichTransport(t)
}
return p
}
// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
return NewTBinaryProtocolFactoryConf(&TConfiguration{
noPropagation: true,
})
}
// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
return NewTBinaryProtocolFactoryConf(&TConfiguration{
TBinaryStrictRead: &strictRead,
TBinaryStrictWrite: &strictWrite,
noPropagation: true,
})
}
func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory {
return &TBinaryProtocolFactory{
cfg: conf,
}
}
func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
return NewTBinaryProtocolConf(t, p.cfg)
}
func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) {
p.cfg = conf
}
/**
* Writing Methods
*/
func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
if p.cfg.GetTBinaryStrictWrite() {
version := uint32(VERSION_1) | uint32(typeId)
e := p.WriteI32(ctx, int32(version))
if e != nil {
return e
}
e = p.WriteString(ctx, name)
if e != nil {
return e
}
e = p.WriteI32(ctx, seqId)
return e
} else {
e := p.WriteString(ctx, name)
if e != nil {
return e
}
e = p.WriteByte(ctx, int8(typeId))
if e != nil {
return e
}
e = p.WriteI32(ctx, seqId)
return e
}
return nil
}
func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error {
return nil
}
func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
e := p.WriteByte(ctx, int8(typeId))
if e != nil {
return e
}
e = p.WriteI16(ctx, id)
return e
}
func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error {
e := p.WriteByte(ctx, STOP)
return e
}
func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
e := p.WriteByte(ctx, int8(keyType))
if e != nil {
return e
}
e = p.WriteByte(ctx, int8(valueType))
if e != nil {
return e
}
e = p.WriteI32(ctx, int32(size))
return e
}
func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
e := p.WriteByte(ctx, int8(elemType))
if e != nil {
return e
}
e = p.WriteI32(ctx, int32(size))
return e
}
func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
e := p.WriteByte(ctx, int8(elemType))
if e != nil {
return e
}
e = p.WriteI32(ctx, int32(size))
return e
}
func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error {
if value {
return p.WriteByte(ctx, 1)
}
return p.WriteByte(ctx, 0)
}
func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error {
e := p.trans.WriteByte(byte(value))
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error {
v := p.buffer[0:2]
binary.BigEndian.PutUint16(v, uint16(value))
_, e := p.trans.Write(v)
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error {
v := p.buffer[0:4]
binary.BigEndian.PutUint32(v, uint32(value))
_, e := p.trans.Write(v)
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error {
v := p.buffer[0:8]
binary.BigEndian.PutUint64(v, uint64(value))
_, err := p.trans.Write(v)
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error {
return p.WriteI64(ctx, int64(math.Float64bits(value)))
}
func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error {
e := p.WriteI32(ctx, int32(len(value)))
if e != nil {
return e
}
_, err := p.trans.WriteString(value)
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error {
e := p.WriteI32(ctx, int32(len(value)))
if e != nil {
return e
}
_, err := p.trans.Write(value)
return NewTProtocolException(err)
}
/**
* Reading methods
*/
func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
size, e := p.ReadI32(ctx)
if e != nil {
return "", typeId, 0, NewTProtocolException(e)
}
if size < 0 {
typeId = TMessageType(size & 0x0ff)
version := int64(int64(size) & VERSION_MASK)
if version != VERSION_1 {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
}
name, e = p.ReadString(ctx)
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
seqId, e = p.ReadI32(ctx)
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
return name, typeId, seqId, nil
}
if p.cfg.GetTBinaryStrictRead() {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
}
name, e2 := p.readStringBody(size)
if e2 != nil {
return name, typeId, seqId, e2
}
b, e3 := p.ReadByte(ctx)
if e3 != nil {
return name, typeId, seqId, e3
}
typeId = TMessageType(b)
seqId, e4 := p.ReadI32(ctx)
if e4 != nil {
return name, typeId, seqId, e4
}
return name, typeId, seqId, nil
}
func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
return
}
func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) {
t, err := p.ReadByte(ctx)
typeId = TType(t)
if err != nil {
return name, typeId, seqId, err
}
if t != STOP {
seqId, err = p.ReadI16(ctx)
}
return name, typeId, seqId, err
}
func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error {
return nil
}
var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) {
k, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
kType = TType(k)
v, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
vType = TType(v)
size32, e := p.ReadI32(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return kType, vType, size, nil
}
func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
b, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
size32, e := p.ReadI32(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return
}
func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
b, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
size32, e := p.ReadI32(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return elemType, size, nil
}
func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) {
b, e := p.ReadByte(ctx)
v := true
if b != 1 {
v = false
}
return v, e
}
func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) {
v, err := p.trans.ReadByte()
return int8(v), err
}
func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) {
buf := p.buffer[0:2]
err = p.readAll(ctx, buf)
value = int16(binary.BigEndian.Uint16(buf))
return value, err
}
func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) {
buf := p.buffer[0:4]
err = p.readAll(ctx, buf)
value = int32(binary.BigEndian.Uint32(buf))
return value, err
}
func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) {
buf := p.buffer[0:8]
err = p.readAll(ctx, buf)
value = int64(binary.BigEndian.Uint64(buf))
return value, err
}
func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
buf := p.buffer[0:8]
err = p.readAll(ctx, buf)
value = math.Float64frombits(binary.BigEndian.Uint64(buf))
return value, err
}
func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) {
size, e := p.ReadI32(ctx)
if e != nil {
return "", e
}
err = checkSizeForProtocol(size, p.cfg)
if err != nil {
return
}
if size < 0 {
err = invalidDataLength
return
}
if size == 0 {
return "", nil
}
if size < int32(len(p.buffer)) {
// Avoid allocation on small reads
buf := p.buffer[:size]
read, e := io.ReadFull(p.trans, buf)
return string(buf[:read]), NewTProtocolException(e)
}
return p.readStringBody(size)
}
func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
size, e := p.ReadI32(ctx)
if e != nil {
return nil, e
}
if err := checkSizeForProtocol(size, p.cfg); err != nil {
return nil, err
}
buf, err := safeReadBytes(size, p.trans)
return buf, NewTProtocolException(err)
}
func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
return NewTProtocolException(p.trans.Flush(ctx))
}
func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
return SkipDefaultDepth(ctx, p, fieldType)
}
func (p *TBinaryProtocol) Transport() TTransport {
return p.origTransport
}
func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) {
var read int
_, deadlineSet := ctx.Deadline()
for {
read, err = io.ReadFull(p.trans, buf)
if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil {
// This is I/O timeout without anything read,
// and we still have time left, keep retrying.
continue
}
// For anything else, don't retry
break
}
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
buf, err := safeReadBytes(size, p.trans)
return string(buf), NewTProtocolException(err)
}
func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) {
PropagateTConfiguration(p.trans, conf)
PropagateTConfiguration(p.origTransport, conf)
p.cfg = conf
}
var (
_ TConfigurationSetter = (*TBinaryProtocolFactory)(nil)
_ TConfigurationSetter = (*TBinaryProtocol)(nil)
)
// This function is shared between TBinaryProtocol and TCompactProtocol.
//
// It tries to read size bytes from trans, in a way that prevents large
// allocations when size is insanely large (mostly caused by malformed message).
func safeReadBytes(size int32, trans io.Reader) ([]byte, error) {
if size < 0 {
return nil, nil
}
buf := new(bytes.Buffer)
_, err := io.CopyN(buf, trans, int64(size))
return buf.Bytes(), err
}

View File

@@ -0,0 +1,109 @@
package thrift
import (
"context"
"fmt"
)
// ResponseMeta represents the metadata attached to the response.
type ResponseMeta struct {
// The headers in the response, if any.
// If the underlying transport/protocol is not THeader, this will always be nil.
Headers THeaderMap
}
type TClient interface {
Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
}
type TStandardClient struct {
seqId int32
iprot, oprot TProtocol
}
// TStandardClient implements TClient, and uses the standard message format for Thrift.
// It is not safe for concurrent use.
func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
return &TStandardClient{
iprot: inputProtocol,
oprot: outputProtocol,
}
}
func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error {
// Set headers from context object on THeaderProtocol
if headerProt, ok := oprot.(*THeaderProtocol); ok {
headerProt.ClearWriteHeaders()
for _, key := range GetWriteHeaderList(ctx) {
if value, ok := GetHeader(ctx, key); ok {
headerProt.SetWriteHeader(key, value)
}
}
}
if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil {
return err
}
if err := args.Write(ctx, oprot); err != nil {
return err
}
if err := oprot.WriteMessageEnd(ctx); err != nil {
return err
}
return oprot.Flush(ctx)
}
func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error {
rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx)
if err != nil {
return err
}
if method != rMethod {
return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
} else if seqId != rSeqId {
return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
} else if rTypeId == EXCEPTION {
var exception tApplicationException
if err := exception.Read(ctx, iprot); err != nil {
return err
}
if err := iprot.ReadMessageEnd(ctx); err != nil {
return err
}
return &exception
} else if rTypeId != REPLY {
return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
}
if err := result.Read(ctx, iprot); err != nil {
return err
}
return iprot.ReadMessageEnd(ctx)
}
func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
p.seqId++
seqId := p.seqId
if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
return ResponseMeta{}, err
}
// method is oneway
if result == nil {
return ResponseMeta{}, nil
}
err := p.Recv(ctx, p.iprot, seqId, method, result)
var headers THeaderMap
if hp, ok := p.iprot.(*THeaderProtocol); ok {
headers = hp.transport.readHeaders
}
return ResponseMeta{
Headers: headers,
}, err
}

View File

@@ -0,0 +1,865 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
)
const (
COMPACT_PROTOCOL_ID = 0x082
COMPACT_VERSION = 1
COMPACT_VERSION_MASK = 0x1f
COMPACT_TYPE_MASK = 0x0E0
COMPACT_TYPE_BITS = 0x07
COMPACT_TYPE_SHIFT_AMOUNT = 5
)
type tCompactType byte
const (
COMPACT_BOOLEAN_TRUE = 0x01
COMPACT_BOOLEAN_FALSE = 0x02
COMPACT_BYTE = 0x03
COMPACT_I16 = 0x04
COMPACT_I32 = 0x05
COMPACT_I64 = 0x06
COMPACT_DOUBLE = 0x07
COMPACT_BINARY = 0x08
COMPACT_LIST = 0x09
COMPACT_SET = 0x0A
COMPACT_MAP = 0x0B
COMPACT_STRUCT = 0x0C
)
var (
ttypeToCompactType map[TType]tCompactType
)
func init() {
ttypeToCompactType = map[TType]tCompactType{
STOP: STOP,
BOOL: COMPACT_BOOLEAN_TRUE,
BYTE: COMPACT_BYTE,
I16: COMPACT_I16,
I32: COMPACT_I32,
I64: COMPACT_I64,
DOUBLE: COMPACT_DOUBLE,
STRING: COMPACT_BINARY,
LIST: COMPACT_LIST,
SET: COMPACT_SET,
MAP: COMPACT_MAP,
STRUCT: COMPACT_STRUCT,
}
}
type TCompactProtocolFactory struct {
cfg *TConfiguration
}
// Deprecated: Use NewTCompactProtocolFactoryConf instead.
func NewTCompactProtocolFactory() *TCompactProtocolFactory {
return NewTCompactProtocolFactoryConf(&TConfiguration{
noPropagation: true,
})
}
func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory {
return &TCompactProtocolFactory{
cfg: conf,
}
}
func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return NewTCompactProtocolConf(trans, p.cfg)
}
func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) {
p.cfg = conf
}
type TCompactProtocol struct {
trans TRichTransport
origTransport TTransport
cfg *TConfiguration
// Used to keep track of the last field for the current and previous structs,
// so we can do the delta stuff.
lastField []int
lastFieldId int
// If we encounter a boolean field begin, save the TField here so it can
// have the value incorporated.
booleanFieldName string
booleanFieldId int16
booleanFieldPending bool
// If we read a field header, and it's a boolean field, save the boolean
// value here so that readBool can use it.
boolValue bool
boolValueIsNotNull bool
buffer [64]byte
}
// Deprecated: Use NewTCompactProtocolConf instead.
func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
return NewTCompactProtocolConf(trans, &TConfiguration{
noPropagation: true,
})
}
func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol {
PropagateTConfiguration(trans, conf)
p := &TCompactProtocol{
origTransport: trans,
cfg: conf,
}
if et, ok := trans.(TRichTransport); ok {
p.trans = et
} else {
p.trans = NewTRichTransport(trans)
}
return p
}
//
// Public Writing methods.
//
// Write a message header to the wire. Compact Protocol messages contain the
// protocol version so we can migrate forwards in the future if need be.
func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
if err != nil {
return NewTProtocolException(err)
}
err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
if err != nil {
return NewTProtocolException(err)
}
_, err = p.writeVarint32(seqid)
if err != nil {
return NewTProtocolException(err)
}
e := p.WriteString(ctx, name)
return e
}
func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil }
// Write a struct begin. This doesn't actually put anything on the wire. We
// use it as an opportunity to put special placeholder markers on the field
// stack so we can get the field id deltas correct.
func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error {
p.lastField = append(p.lastField, p.lastFieldId)
p.lastFieldId = 0
return nil
}
// Write a struct end. This doesn't actually put anything on the wire. We use
// this as an opportunity to pop the last field from the current struct off
// of the field stack.
func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error {
if len(p.lastField) <= 0 {
return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before"))
}
p.lastFieldId = p.lastField[len(p.lastField)-1]
p.lastField = p.lastField[:len(p.lastField)-1]
return nil
}
func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
if typeId == BOOL {
// we want to possibly include the value, so we'll wait.
p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
return nil
}
_, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF)
return NewTProtocolException(err)
}
// The workhorse of writeFieldBegin. It has the option of doing a
// 'type override' of the type header. This is used specifically in the
// boolean field case.
func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) {
// short lastField = lastField_.pop();
// if there's a type override, use that.
var typeToWrite byte
if typeOverride == 0xFF {
typeToWrite = byte(p.getCompactType(typeId))
} else {
typeToWrite = typeOverride
}
// check if we can use delta encoding for the field id
fieldId := int(id)
written := 0
if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
// write them together
err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
if err != nil {
return 0, err
}
} else {
// write them separate
err := p.writeByteDirect(typeToWrite)
if err != nil {
return 0, err
}
err = p.WriteI16(ctx, id)
written = 1 + 2
if err != nil {
return 0, err
}
}
p.lastFieldId = fieldId
return written, nil
}
func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil }
func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error {
err := p.writeByteDirect(STOP)
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
if size == 0 {
err := p.writeByteDirect(0)
return NewTProtocolException(err)
}
_, err := p.writeVarint32(int32(size))
if err != nil {
return NewTProtocolException(err)
}
err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil }
// Write a list header.
func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
_, err := p.writeCollectionBegin(elemType, size)
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil }
// Write a set header.
func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
_, err := p.writeCollectionBegin(elemType, size)
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil }
func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error {
v := byte(COMPACT_BOOLEAN_FALSE)
if value {
v = byte(COMPACT_BOOLEAN_TRUE)
}
if p.booleanFieldPending {
// we haven't written the field header yet
_, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v)
p.booleanFieldPending = false
return NewTProtocolException(err)
}
// we're not part of a field, so just write the value.
err := p.writeByteDirect(v)
return NewTProtocolException(err)
}
// Write a byte. Nothing to see here!
func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error {
err := p.writeByteDirect(byte(value))
return NewTProtocolException(err)
}
// Write an I16 as a zigzag varint.
func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error {
_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
return NewTProtocolException(err)
}
// Write an i32 as a zigzag varint.
func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error {
_, err := p.writeVarint32(p.int32ToZigzag(value))
return NewTProtocolException(err)
}
// Write an i64 as a zigzag varint.
func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error {
_, err := p.writeVarint64(p.int64ToZigzag(value))
return NewTProtocolException(err)
}
// Write a double to the wire as 8 bytes.
func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error {
buf := p.buffer[0:8]
binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
_, err := p.trans.Write(buf)
return NewTProtocolException(err)
}
// Write a string to the wire with a varint size preceding.
func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error {
_, e := p.writeVarint32(int32(len(value)))
if e != nil {
return NewTProtocolException(e)
}
if len(value) > 0 {
}
_, e = p.trans.WriteString(value)
return e
}
// Write a byte array, using a varint for the size.
func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error {
_, e := p.writeVarint32(int32(len(bin)))
if e != nil {
return NewTProtocolException(e)
}
if len(bin) > 0 {
_, e = p.trans.Write(bin)
return NewTProtocolException(e)
}
return nil
}
//
// Reading methods.
//
// Read a message header.
func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
var protocolId byte
_, deadlineSet := ctx.Deadline()
for {
protocolId, err = p.readByteDirect()
if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
// keep retrying I/O timeout errors since we still have
// time left
continue
}
// For anything else, don't retry
break
}
if err != nil {
return
}
if protocolId != COMPACT_PROTOCOL_ID {
e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
}
versionAndType, err := p.readByteDirect()
if err != nil {
return
}
version := versionAndType & COMPACT_VERSION_MASK
typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
if version != COMPACT_VERSION {
e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
err = NewTProtocolExceptionWithType(BAD_VERSION, e)
return
}
seqId, e := p.readVarint32()
if e != nil {
err = NewTProtocolException(e)
return
}
name, err = p.ReadString(ctx)
return
}
func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil }
// Read a struct begin. There's nothing on the wire for this, but it is our
// opportunity to push a new struct begin marker onto the field stack.
func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
p.lastField = append(p.lastField, p.lastFieldId)
p.lastFieldId = 0
return
}
// Doesn't actually consume any wire data, just removes the last field for
// this struct from the field stack.
func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error {
// consume the last field we read off the wire.
if len(p.lastField) <= 0 {
return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before"))
}
p.lastFieldId = p.lastField[len(p.lastField)-1]
p.lastField = p.lastField[:len(p.lastField)-1]
return nil
}
// Read a field header off the wire.
func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
t, err := p.readByteDirect()
if err != nil {
return
}
// if it's a stop, then we can return immediately, as the struct is over.
if (t & 0x0f) == STOP {
return "", STOP, 0, nil
}
// mask off the 4 MSB of the type header. it could contain a field id delta.
modifier := int16((t & 0xf0) >> 4)
if modifier == 0 {
// not a delta. look ahead for the zigzag varint field id.
id, err = p.ReadI16(ctx)
if err != nil {
return
}
} else {
// has a delta. add the delta to the last read field id.
id = int16(p.lastFieldId) + modifier
}
typeId, e := p.getTType(tCompactType(t & 0x0f))
if e != nil {
err = NewTProtocolException(e)
return
}
// if this happens to be a boolean field, the value is encoded in the type
if p.isBoolType(t) {
// save the boolean value in a special instance variable.
p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
p.boolValueIsNotNull = true
}
// push the new field onto the field stack so we can keep the deltas going.
p.lastFieldId = int(id)
return
}
func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil }
// Read a map header off the wire. If the size is zero, skip reading the key
// and value type. This means that 0-length maps will yield TMaps without the
// "correct" types.
func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
size32, e := p.readVarint32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
keyAndValueType := byte(STOP)
if size != 0 {
keyAndValueType, err = p.readByteDirect()
if err != nil {
return
}
}
keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
return
}
func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil }
// Read a list header off the wire. If the list size is 0-14, the size will
// be packed into the element type header. If it's a longer list, the 4 MSB
// of the element type header will be 0xF, and a varint will follow with the
// true size.
func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
size_and_type, err := p.readByteDirect()
if err != nil {
return
}
size = int((size_and_type >> 4) & 0x0f)
if size == 15 {
size2, e := p.readVarint32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size2 < 0 {
err = invalidDataLength
return
}
size = int(size2)
}
elemType, e := p.getTType(tCompactType(size_and_type))
if e != nil {
err = NewTProtocolException(e)
return
}
return
}
func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil }
// Read a set header off the wire. If the set size is 0-14, the size will
// be packed into the element type header. If it's a longer set, the 4 MSB
// of the element type header will be 0xF, and a varint will follow with the
// true size.
func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
return p.ReadListBegin(ctx)
}
func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil }
// Read a boolean off the wire. If this is a boolean field, the value should
// already have been read during readFieldBegin, so we'll just consume the
// pre-stored value. Otherwise, read a byte.
func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) {
if p.boolValueIsNotNull {
p.boolValueIsNotNull = false
return p.boolValue, nil
}
v, err := p.readByteDirect()
return v == COMPACT_BOOLEAN_TRUE, err
}
// Read a single byte off the wire. Nothing interesting here.
func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) {
v, err := p.readByteDirect()
if err != nil {
return 0, NewTProtocolException(err)
}
return int8(v), err
}
// Read an i16 from the wire as a zigzag varint.
func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) {
v, err := p.ReadI32(ctx)
return int16(v), err
}
// Read an i32 from the wire as a zigzag varint.
func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) {
v, e := p.readVarint32()
if e != nil {
return 0, NewTProtocolException(e)
}
value = p.zigzagToInt32(v)
return value, nil
}
// Read an i64 from the wire as a zigzag varint.
func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) {
v, e := p.readVarint64()
if e != nil {
return 0, NewTProtocolException(e)
}
value = p.zigzagToInt64(v)
return value, nil
}
// No magic here - just read a double off the wire.
func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
longBits := p.buffer[0:8]
_, e := io.ReadFull(p.trans, longBits)
if e != nil {
return 0.0, NewTProtocolException(e)
}
return math.Float64frombits(p.bytesToUint64(longBits)), nil
}
// Reads a []byte (via readBinary), and then UTF-8 decodes it.
func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) {
length, e := p.readVarint32()
if e != nil {
return "", NewTProtocolException(e)
}
err = checkSizeForProtocol(length, p.cfg)
if err != nil {
return
}
if length == 0 {
return "", nil
}
if length < int32(len(p.buffer)) {
// Avoid allocation on small reads
buf := p.buffer[:length]
read, e := io.ReadFull(p.trans, buf)
return string(buf[:read]), NewTProtocolException(e)
}
buf, e := safeReadBytes(length, p.trans)
return string(buf), NewTProtocolException(e)
}
// Read a []byte from the wire.
func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
length, e := p.readVarint32()
if e != nil {
return nil, NewTProtocolException(e)
}
err = checkSizeForProtocol(length, p.cfg)
if err != nil {
return
}
if length == 0 {
return []byte{}, nil
}
buf, e := safeReadBytes(length, p.trans)
return buf, NewTProtocolException(e)
}
func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
return NewTProtocolException(p.trans.Flush(ctx))
}
func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
return SkipDefaultDepth(ctx, p, fieldType)
}
func (p *TCompactProtocol) Transport() TTransport {
return p.origTransport
}
//
// Internal writing methods
//
// Abstract method for writing the start of lists and sets. List and sets on
// the wire differ only by the type indicator.
func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
if size <= 14 {
return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
}
err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
if err != nil {
return 0, err
}
m, err := p.writeVarint32(int32(size))
return 1 + m, err
}
// Write an i32 as a varint. Results in 1-5 bytes on the wire.
// TODO(pomack): make a permanent buffer like writeVarint64?
func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
i32buf := p.buffer[0:5]
idx := 0
for {
if (n & ^0x7F) == 0 {
i32buf[idx] = byte(n)
idx++
// p.writeByteDirect(byte(n));
break
// return;
} else {
i32buf[idx] = byte((n & 0x7F) | 0x80)
idx++
// p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
u := uint32(n)
n = int32(u >> 7)
}
}
return p.trans.Write(i32buf[0:idx])
}
// Write an i64 as a varint. Results in 1-10 bytes on the wire.
func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
varint64out := p.buffer[0:10]
idx := 0
for {
if (n & ^0x7F) == 0 {
varint64out[idx] = byte(n)
idx++
break
} else {
varint64out[idx] = byte((n & 0x7F) | 0x80)
idx++
u := uint64(n)
n = int64(u >> 7)
}
}
return p.trans.Write(varint64out[0:idx])
}
// Convert l into a zigzag long. This allows negative numbers to be
// represented compactly as a varint.
func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
return (l << 1) ^ (l >> 63)
}
// Convert l into a zigzag long. This allows negative numbers to be
// represented compactly as a varint.
func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
return (n << 1) ^ (n >> 31)
}
func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
binary.LittleEndian.PutUint64(buf, n)
}
func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
binary.LittleEndian.PutUint64(buf, uint64(n))
}
// Writes a byte without any possibility of all that field header nonsense.
// Used internally by other writing methods that know they need to write a byte.
func (p *TCompactProtocol) writeByteDirect(b byte) error {
return p.trans.WriteByte(b)
}
// Writes a byte without any possibility of all that field header nonsense.
func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
return 1, p.writeByteDirect(byte(n))
}
//
// Internal reading methods
//
// Read an i32 from the wire as a varint. The MSB of each byte is set
// if there is another byte to follow. This can read up to 5 bytes.
func (p *TCompactProtocol) readVarint32() (int32, error) {
// if the wire contains the right stuff, this will just truncate the i64 we
// read and get us the right sign.
v, err := p.readVarint64()
return int32(v), err
}
// Read an i64 from the wire as a proper varint. The MSB of each byte is set
// if there is another byte to follow. This can read up to 10 bytes.
func (p *TCompactProtocol) readVarint64() (int64, error) {
shift := uint(0)
result := int64(0)
for {
b, err := p.readByteDirect()
if err != nil {
return 0, err
}
result |= int64(b&0x7f) << shift
if (b & 0x80) != 0x80 {
break
}
shift += 7
}
return result, nil
}
// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
func (p *TCompactProtocol) readByteDirect() (byte, error) {
return p.trans.ReadByte()
}
//
// encoding helpers
//
// Convert from zigzag int to int.
func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
u := uint32(n)
return int32(u>>1) ^ -(n & 1)
}
// Convert from zigzag long to long.
func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
u := uint64(n)
return int64(u>>1) ^ -(n & 1)
}
// Note that it's important that the mask bytes are long literals,
// otherwise they'll default to ints, and when you shift an int left 56 bits,
// you just get a messed up int.
func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
return int64(binary.LittleEndian.Uint64(b))
}
// Note that it's important that the mask bytes are long literals,
// otherwise they'll default to ints, and when you shift an int left 56 bits,
// you just get a messed up int.
func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
return binary.LittleEndian.Uint64(b)
}
//
// type testing and converting
//
func (p *TCompactProtocol) isBoolType(b byte) bool {
return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
}
// Given a tCompactType constant, convert it to its corresponding
// TType value.
func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
switch byte(t) & 0x0f {
case STOP:
return STOP, nil
case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
return BOOL, nil
case COMPACT_BYTE:
return BYTE, nil
case COMPACT_I16:
return I16, nil
case COMPACT_I32:
return I32, nil
case COMPACT_I64:
return I64, nil
case COMPACT_DOUBLE:
return DOUBLE, nil
case COMPACT_BINARY:
return STRING, nil
case COMPACT_LIST:
return LIST, nil
case COMPACT_SET:
return SET, nil
case COMPACT_MAP:
return MAP, nil
case COMPACT_STRUCT:
return STRUCT, nil
}
return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f))
}
// Given a TType value, find the appropriate TCompactProtocol.Types constant.
func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
return ttypeToCompactType[t]
}
func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) {
PropagateTConfiguration(p.trans, conf)
PropagateTConfiguration(p.origTransport, conf)
p.cfg = conf
}
var (
_ TConfigurationSetter = (*TCompactProtocolFactory)(nil)
_ TConfigurationSetter = (*TCompactProtocol)(nil)
)

View File

@@ -0,0 +1,378 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"crypto/tls"
"fmt"
"time"
)
// Default TConfiguration values.
const (
DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024
DEFAULT_MAX_FRAME_SIZE = 16384000
DEFAULT_TBINARY_STRICT_READ = false
DEFAULT_TBINARY_STRICT_WRITE = true
DEFAULT_CONNECT_TIMEOUT = 0
DEFAULT_SOCKET_TIMEOUT = 0
)
// TConfiguration defines some configurations shared between TTransport,
// TProtocol, TTransportFactory, TProtocolFactory, and other implementations.
//
// When constructing TConfiguration, you only need to specify the non-default
// fields. All zero values have sane default values.
//
// Not all configurations defined are applicable to all implementations.
// Implementations are free to ignore the configurations not applicable to them.
//
// All functions attached to this type are nil-safe.
//
// See [1] for spec.
//
// NOTE: When using TConfiguration, fill in all the configurations you want to
// set across the stack, not only the ones you want to set in the immediate
// TTransport/TProtocol.
//
// For example, say you want to migrate this old code into using TConfiguration:
//
// sccket := thrift.NewTSocketTimeout("host:port", time.Second)
// transFactory := thrift.NewTFramedTransportFactoryMaxLength(
// thrift.NewTTransportFactory(),
// 1024 * 1024 * 256,
// )
// protoFactory := thrift.NewTBinaryProtocolFactory(true, true)
//
// This is the wrong way to do it because in the end the TConfiguration used by
// socket and transFactory will be overwritten by the one used by protoFactory
// because of TConfiguration propagation:
//
// // bad example, DO NOT USE
// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{
// ConnectTimeout: time.Second,
// SocketTimeout: time.Second,
// })
// transFactory := thrift.NewTFramedTransportFactoryConf(
// thrift.NewTTransportFactory(),
// &thrift.TConfiguration{
// MaxFrameSize: 1024 * 1024 * 256,
// },
// )
// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{
// TBinaryStrictRead: thrift.BoolPtr(true),
// TBinaryStrictWrite: thrift.BoolPtr(true),
// })
//
// This is the correct way to do it:
//
// conf := &thrift.TConfiguration{
// ConnectTimeout: time.Second,
// SocketTimeout: time.Second,
//
// MaxFrameSize: 1024 * 1024 * 256,
//
// TBinaryStrictRead: thrift.BoolPtr(true),
// TBinaryStrictWrite: thrift.BoolPtr(true),
// }
// sccket := thrift.NewTSocketConf("host:port", conf)
// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf)
// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf)
//
// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md
type TConfiguration struct {
// If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead.
MaxMessageSize int32
// If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead.
//
// Also if MaxMessageSize < MaxFrameSize,
// MaxMessageSize will be used instead.
MaxFrameSize int32
// Connect and socket timeouts to be used by TSocket and TSSLSocket.
//
// 0 means no timeout.
//
// If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be
// used.
ConnectTimeout time.Duration
SocketTimeout time.Duration
// TLS config to be used by TSSLSocket.
TLSConfig *tls.Config
// Strict read/write configurations for TBinaryProtocol.
//
// BoolPtr helper function is available to use literal values.
TBinaryStrictRead *bool
TBinaryStrictWrite *bool
// The wrapped protocol id to be used in THeader transport/protocol.
//
// THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions
// are provided to help filling this value.
THeaderProtocolID *THeaderProtocolID
// Used internally by deprecated constructors, to avoid overriding
// underlying TTransport/TProtocol's cfg by accidental propagations.
//
// For external users this is always false.
noPropagation bool
}
// GetMaxMessageSize returns the max message size an implementation should
// follow.
//
// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil.
func (tc *TConfiguration) GetMaxMessageSize() int32 {
if tc == nil || tc.MaxMessageSize <= 0 {
return DEFAULT_MAX_MESSAGE_SIZE
}
return tc.MaxMessageSize
}
// GetMaxFrameSize returns the max frame size an implementation should follow.
//
// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil.
//
// If the configured max message size is smaller than the configured max frame
// size, the smaller one will be returned instead.
func (tc *TConfiguration) GetMaxFrameSize() int32 {
if tc == nil {
return DEFAULT_MAX_FRAME_SIZE
}
maxFrameSize := tc.MaxFrameSize
if maxFrameSize <= 0 {
maxFrameSize = DEFAULT_MAX_FRAME_SIZE
}
if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize {
return maxMessageSize
}
return maxFrameSize
}
// GetConnectTimeout returns the connect timeout should be used by TSocket and
// TSSLSocket.
//
// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead.
func (tc *TConfiguration) GetConnectTimeout() time.Duration {
if tc == nil || tc.ConnectTimeout < 0 {
return DEFAULT_CONNECT_TIMEOUT
}
return tc.ConnectTimeout
}
// GetSocketTimeout returns the socket timeout should be used by TSocket and
// TSSLSocket.
//
// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead.
func (tc *TConfiguration) GetSocketTimeout() time.Duration {
if tc == nil || tc.SocketTimeout < 0 {
return DEFAULT_SOCKET_TIMEOUT
}
return tc.SocketTimeout
}
// GetTLSConfig returns the tls config should be used by TSSLSocket.
//
// It's nil-safe. If tc is nil, nil will be returned instead.
func (tc *TConfiguration) GetTLSConfig() *tls.Config {
if tc == nil {
return nil
}
return tc.TLSConfig
}
// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol
// should follow.
//
// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or
// tc.TBinaryStrictRead is nil.
func (tc *TConfiguration) GetTBinaryStrictRead() bool {
if tc == nil || tc.TBinaryStrictRead == nil {
return DEFAULT_TBINARY_STRICT_READ
}
return *tc.TBinaryStrictRead
}
// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol
// should follow.
//
// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or
// tc.TBinaryStrictWrite is nil.
func (tc *TConfiguration) GetTBinaryStrictWrite() bool {
if tc == nil || tc.TBinaryStrictWrite == nil {
return DEFAULT_TBINARY_STRICT_WRITE
}
return *tc.TBinaryStrictWrite
}
// GetTHeaderProtocolID returns the THeaderProtocolID should be used by
// THeaderProtocol clients (for servers, they always use the same one as the
// client instead).
//
// It's nil-safe. If either tc or tc.THeaderProtocolID is nil,
// THeaderProtocolDefault will be returned instead.
// THeaderProtocolDefault will also be returned if configured value is invalid.
func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID {
if tc == nil || tc.THeaderProtocolID == nil {
return THeaderProtocolDefault
}
protoID := *tc.THeaderProtocolID
if err := protoID.Validate(); err != nil {
return THeaderProtocolDefault
}
return protoID
}
// THeaderProtocolIDPtr validates and returns the pointer to id.
//
// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault
// and the validation error will be returned.
func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) {
err := id.Validate()
if err != nil {
id = THeaderProtocolDefault
}
return &id, err
}
// THeaderProtocolIDPtrMust validates and returns the pointer to id.
//
// It's similar to THeaderProtocolIDPtr, but it panics on validation errors
// instead of returning them.
func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID {
ptr, err := THeaderProtocolIDPtr(id)
if err != nil {
panic(err)
}
return ptr
}
// TConfigurationSetter is an optional interface TProtocol, TTransport,
// TProtocolFactory, TTransportFactory, and other implementations can implement.
//
// It's intended to be called during intializations.
// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the
// middle of a message is undefined:
// It may or may not change the behavior of the current processing message,
// and it may even cause the current message to fail.
//
// Note for implementations: SetTConfiguration might be called multiple times
// with the same value in quick successions due to the implementation of the
// propagation. Implementations should make SetTConfiguration as simple as
// possible (usually just overwrite the stored configuration and propagate it to
// the wrapped TTransports/TProtocols).
type TConfigurationSetter interface {
SetTConfiguration(*TConfiguration)
}
// PropagateTConfiguration propagates cfg to impl if impl implements
// TConfigurationSetter and cfg is non-nil, otherwise it does nothing.
//
// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration
// with everything being default value, use &TConfiguration{} explicitly instead.
func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) {
if cfg == nil || cfg.noPropagation {
return
}
if setter, ok := impl.(TConfigurationSetter); ok {
setter.SetTConfiguration(cfg)
}
}
func checkSizeForProtocol(size int32, cfg *TConfiguration) error {
if size < 0 {
return NewTProtocolExceptionWithType(
NEGATIVE_SIZE,
fmt.Errorf("negative size: %d", size),
)
}
if size > cfg.GetMaxMessageSize() {
return NewTProtocolExceptionWithType(
SIZE_LIMIT,
fmt.Errorf("size exceeded max allowed: %d", size),
)
}
return nil
}
type tTransportFactoryConf struct {
delegate TTransportFactory
cfg *TConfiguration
}
func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) {
trans, err := f.delegate.GetTransport(orig)
if err == nil {
PropagateTConfiguration(orig, f.cfg)
PropagateTConfiguration(trans, f.cfg)
}
return trans, err
}
func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(f.delegate, f.cfg)
f.cfg = cfg
}
// TTransportFactoryConf wraps a TTransportFactory to propagate
// TConfiguration on the factory's GetTransport calls.
func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory {
return &tTransportFactoryConf{
delegate: delegate,
cfg: conf,
}
}
type tProtocolFactoryConf struct {
delegate TProtocolFactory
cfg *TConfiguration
}
func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol {
proto := f.delegate.GetProtocol(trans)
PropagateTConfiguration(trans, f.cfg)
PropagateTConfiguration(proto, f.cfg)
return proto
}
func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(f.delegate, f.cfg)
f.cfg = cfg
}
// TProtocolFactoryConf wraps a TProtocolFactory to propagate
// TConfiguration on the factory's GetProtocol calls.
func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory {
return &tProtocolFactoryConf{
delegate: delegate,
cfg: conf,
}
}
var (
_ TConfigurationSetter = (*tTransportFactoryConf)(nil)
_ TConfigurationSetter = (*tProtocolFactoryConf)(nil)
)

View File

@@ -0,0 +1,24 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import "context"
var defaultCtx = context.Background()

View File

@@ -0,0 +1,116 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"errors"
)
// Generic Thrift exception
type TException interface {
error
TExceptionType() TExceptionType
}
// Prepends additional information to an error without losing the Thrift exception interface
func PrependError(prepend string, err error) error {
msg := prepend + err.Error()
var te TException
if errors.As(err, &te) {
switch te.TExceptionType() {
case TExceptionTypeTransport:
if t, ok := err.(TTransportException); ok {
return prependTTransportException(prepend, t)
}
case TExceptionTypeProtocol:
if t, ok := err.(TProtocolException); ok {
return prependTProtocolException(prepend, t)
}
case TExceptionTypeApplication:
var t TApplicationException
if errors.As(err, &t) {
return NewTApplicationException(t.TypeId(), msg)
}
}
return wrappedTException{
err: err,
msg: msg,
tExceptionType: te.TExceptionType(),
}
}
return errors.New(msg)
}
// TExceptionType is an enum type to categorize different "subclasses" of TExceptions.
type TExceptionType byte
// TExceptionType values
const (
TExceptionTypeUnknown TExceptionType = iota
TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler
TExceptionTypeApplication // TApplicationExceptions
TExceptionTypeProtocol // TProtocolExceptions
TExceptionTypeTransport // TTransportExceptions
)
// WrapTException wraps an error into TException.
//
// If err is nil or already TException, it's returned as-is.
// Otherwise it will be wraped into TException with TExceptionType() returning
// TExceptionTypeUnknown, and Unwrap() returning the original error.
func WrapTException(err error) TException {
if err == nil {
return nil
}
if te, ok := err.(TException); ok {
return te
}
return wrappedTException{
err: err,
msg: err.Error(),
tExceptionType: TExceptionTypeUnknown,
}
}
type wrappedTException struct {
err error
msg string
tExceptionType TExceptionType
}
func (w wrappedTException) Error() string {
return w.msg
}
func (w wrappedTException) TExceptionType() TExceptionType {
return w.tExceptionType
}
func (w wrappedTException) Unwrap() error {
return w.err
}
var _ TException = wrappedTException{}

View File

@@ -0,0 +1,110 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
)
// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs.
type (
headerKey string
headerKeyList int
)
// Values for headerKeyList.
const (
headerKeyListRead headerKeyList = iota
headerKeyListWrite
)
// SetHeader sets a header in the context.
func SetHeader(ctx context.Context, key, value string) context.Context {
return context.WithValue(
ctx,
headerKey(key),
value,
)
}
// UnsetHeader unsets a previously set header in the context.
func UnsetHeader(ctx context.Context, key string) context.Context {
return context.WithValue(
ctx,
headerKey(key),
nil,
)
}
// GetHeader returns a value of the given header from the context.
func GetHeader(ctx context.Context, key string) (value string, ok bool) {
if v := ctx.Value(headerKey(key)); v != nil {
value, ok = v.(string)
}
return
}
// SetReadHeaderList sets the key list of read THeaders in the context.
func SetReadHeaderList(ctx context.Context, keys []string) context.Context {
return context.WithValue(
ctx,
headerKeyListRead,
keys,
)
}
// GetReadHeaderList returns the key list of read THeaders from the context.
func GetReadHeaderList(ctx context.Context) []string {
if v := ctx.Value(headerKeyListRead); v != nil {
if value, ok := v.([]string); ok {
return value
}
}
return nil
}
// SetWriteHeaderList sets the key list of THeaders to write in the context.
func SetWriteHeaderList(ctx context.Context, keys []string) context.Context {
return context.WithValue(
ctx,
headerKeyListWrite,
keys,
)
}
// GetWriteHeaderList returns the key list of THeaders to write from the context.
func GetWriteHeaderList(ctx context.Context) []string {
if v := ctx.Value(headerKeyListWrite); v != nil {
if value, ok := v.([]string); ok {
return value
}
}
return nil
}
// AddReadTHeaderToContext adds the whole THeader headers into context.
func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context {
keys := make([]string, 0, len(headers))
for key, value := range headers {
ctx = SetHeader(ctx, key, value)
keys = append(keys, key)
}
return SetReadHeaderList(ctx, keys)
}

View File

@@ -0,0 +1,351 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"errors"
)
// THeaderProtocol is a thrift protocol that implements THeader:
// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
//
// It supports either binary or compact protocol as the wrapped protocol.
//
// Most of the THeader handlings are happening inside THeaderTransport.
type THeaderProtocol struct {
transport *THeaderTransport
// Will be initialized on first read/write.
protocol TProtocol
cfg *TConfiguration
}
// Deprecated: Use NewTHeaderProtocolConf instead.
func NewTHeaderProtocol(trans TTransport) *THeaderProtocol {
return newTHeaderProtocolConf(trans, &TConfiguration{
noPropagation: true,
})
}
// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying
// transport with given TConfiguration.
//
// The passed in transport will be wrapped with THeaderTransport.
//
// Note that THeaderTransport handles frame and zlib by itself,
// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
// instead of rich transports like TZlibTransport or TFramedTransport.
func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol {
return newTHeaderProtocolConf(trans, conf)
}
func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol {
t := NewTHeaderTransportConf(trans, cfg)
p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t)
PropagateTConfiguration(p, cfg)
return &THeaderProtocol{
transport: t,
protocol: p,
cfg: cfg,
}
}
type tHeaderProtocolFactory struct {
cfg *TConfiguration
}
func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return newTHeaderProtocolConf(trans, f.cfg)
}
func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) {
f.cfg = cfg
}
// Deprecated: Use NewTHeaderProtocolFactoryConf instead.
func NewTHeaderProtocolFactory() TProtocolFactory {
return NewTHeaderProtocolFactoryConf(&TConfiguration{
noPropagation: true,
})
}
// NewTHeaderProtocolFactoryConf creates a factory for THeader with given
// TConfiguration.
func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory {
return tHeaderProtocolFactory{
cfg: conf,
}
}
// Transport returns the underlying transport.
//
// It's guaranteed to be of type *THeaderTransport.
func (p *THeaderProtocol) Transport() TTransport {
return p.transport
}
// GetReadHeaders returns the THeaderMap read from transport.
func (p *THeaderProtocol) GetReadHeaders() THeaderMap {
return p.transport.GetReadHeaders()
}
// SetWriteHeader sets a header for write.
func (p *THeaderProtocol) SetWriteHeader(key, value string) {
p.transport.SetWriteHeader(key, value)
}
// ClearWriteHeaders clears all write headers previously set.
func (p *THeaderProtocol) ClearWriteHeaders() {
p.transport.ClearWriteHeaders()
}
// AddTransform add a transform for writing.
func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error {
return p.transport.AddTransform(transform)
}
func (p *THeaderProtocol) Flush(ctx context.Context) error {
return p.transport.Flush(ctx)
}
func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error {
newProto, err := p.transport.Protocol().GetProtocol(p.transport)
if err != nil {
return err
}
PropagateTConfiguration(newProto, p.cfg)
p.protocol = newProto
p.transport.SequenceID = seqID
return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID)
}
func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error {
if err := p.protocol.WriteMessageEnd(ctx); err != nil {
return err
}
return p.transport.Flush(ctx)
}
func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error {
return p.protocol.WriteStructBegin(ctx, name)
}
func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error {
return p.protocol.WriteStructEnd(ctx)
}
func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error {
return p.protocol.WriteFieldBegin(ctx, name, typeID, id)
}
func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error {
return p.protocol.WriteFieldEnd(ctx)
}
func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error {
return p.protocol.WriteFieldStop(ctx)
}
func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
return p.protocol.WriteMapBegin(ctx, keyType, valueType, size)
}
func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error {
return p.protocol.WriteMapEnd(ctx)
}
func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
return p.protocol.WriteListBegin(ctx, elemType, size)
}
func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error {
return p.protocol.WriteListEnd(ctx)
}
func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
return p.protocol.WriteSetBegin(ctx, elemType, size)
}
func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error {
return p.protocol.WriteSetEnd(ctx)
}
func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error {
return p.protocol.WriteBool(ctx, value)
}
func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error {
return p.protocol.WriteByte(ctx, value)
}
func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error {
return p.protocol.WriteI16(ctx, value)
}
func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error {
return p.protocol.WriteI32(ctx, value)
}
func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error {
return p.protocol.WriteI64(ctx, value)
}
func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error {
return p.protocol.WriteDouble(ctx, value)
}
func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error {
return p.protocol.WriteString(ctx, value)
}
func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error {
return p.protocol.WriteBinary(ctx, value)
}
// ReadFrame calls underlying THeaderTransport's ReadFrame function.
func (p *THeaderProtocol) ReadFrame(ctx context.Context) error {
return p.transport.ReadFrame(ctx)
}
func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) {
if err = p.transport.ReadFrame(ctx); err != nil {
return
}
var newProto TProtocol
newProto, err = p.transport.Protocol().GetProtocol(p.transport)
if err != nil {
var tAppExc TApplicationException
if !errors.As(err, &tAppExc) {
return
}
if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil {
return
}
if e := tAppExc.Write(ctx, p.protocol); e != nil {
return
}
if e := p.protocol.WriteMessageEnd(ctx); e != nil {
return
}
if e := p.transport.Flush(ctx); e != nil {
return
}
return
}
PropagateTConfiguration(newProto, p.cfg)
p.protocol = newProto
return p.protocol.ReadMessageBegin(ctx)
}
func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error {
return p.protocol.ReadMessageEnd(ctx)
}
func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
return p.protocol.ReadStructBegin(ctx)
}
func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error {
return p.protocol.ReadStructEnd(ctx)
}
func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) {
return p.protocol.ReadFieldBegin(ctx)
}
func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error {
return p.protocol.ReadFieldEnd(ctx)
}
func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
return p.protocol.ReadMapBegin(ctx)
}
func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error {
return p.protocol.ReadMapEnd(ctx)
}
func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
return p.protocol.ReadListBegin(ctx)
}
func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error {
return p.protocol.ReadListEnd(ctx)
}
func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
return p.protocol.ReadSetBegin(ctx)
}
func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error {
return p.protocol.ReadSetEnd(ctx)
}
func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) {
return p.protocol.ReadBool(ctx)
}
func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) {
return p.protocol.ReadByte(ctx)
}
func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) {
return p.protocol.ReadI16(ctx)
}
func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) {
return p.protocol.ReadI32(ctx)
}
func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) {
return p.protocol.ReadI64(ctx)
}
func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
return p.protocol.ReadDouble(ctx)
}
func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) {
return p.protocol.ReadString(ctx)
}
func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
return p.protocol.ReadBinary(ctx)
}
func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error {
return p.protocol.Skip(ctx, fieldType)
}
// SetTConfiguration implements TConfigurationSetter.
func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(p.transport, cfg)
PropagateTConfiguration(p.protocol, cfg)
p.cfg = cfg
}
var (
_ TConfigurationSetter = (*tHeaderProtocolFactory)(nil)
_ TConfigurationSetter = (*THeaderProtocol)(nil)
)

View File

@@ -0,0 +1,810 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bufio"
"bytes"
"compress/zlib"
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
)
// Size in bytes for 32-bit ints.
const size32 = 4
type headerMeta struct {
MagicFlags uint32
SequenceID int32
HeaderLength uint16
}
const headerMetaSize = 10
type clientType int
const (
clientUnknown clientType = iota
clientHeaders
clientFramedBinary
clientUnframedBinary
clientFramedCompact
clientUnframedCompact
)
// Constants defined in THeader format:
// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
const (
THeaderHeaderMagic uint32 = 0x0fff0000
THeaderHeaderMask uint32 = 0xffff0000
THeaderFlagsMask uint32 = 0x0000ffff
THeaderMaxFrameSize uint32 = 0x3fffffff
)
// THeaderMap is the type of the header map in THeader transport.
type THeaderMap map[string]string
// THeaderProtocolID is the wrapped protocol id used in THeader.
type THeaderProtocolID int32
// Supported THeaderProtocolID values.
const (
THeaderProtocolBinary THeaderProtocolID = 0x00
THeaderProtocolCompact THeaderProtocolID = 0x02
THeaderProtocolDefault = THeaderProtocolBinary
)
// Declared globally to avoid repetitive allocations, not really used.
var globalMemoryBuffer = NewTMemoryBuffer()
// Validate checks whether the THeaderProtocolID is a valid/supported one.
func (id THeaderProtocolID) Validate() error {
_, err := id.GetProtocol(globalMemoryBuffer)
return err
}
// GetProtocol gets the corresponding TProtocol from the wrapped protocol id.
func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
switch id {
default:
return nil, NewTApplicationException(
INVALID_PROTOCOL,
fmt.Sprintf("THeader protocol id %d not supported", id),
)
case THeaderProtocolBinary:
return NewTBinaryProtocolTransport(trans), nil
case THeaderProtocolCompact:
return NewTCompactProtocol(trans), nil
}
}
// THeaderTransformID defines the numeric id of the transform used.
type THeaderTransformID int32
// THeaderTransformID values.
//
// Values not defined here are not currently supported, namely HMAC and Snappy.
const (
TransformNone THeaderTransformID = iota // 0, no special handling
TransformZlib // 1, zlib
)
var supportedTransformIDs = map[THeaderTransformID]bool{
TransformNone: true,
TransformZlib: true,
}
// TransformReader is an io.ReadCloser that handles transforms reading.
type TransformReader struct {
io.Reader
closers []io.Closer
}
var _ io.ReadCloser = (*TransformReader)(nil)
// NewTransformReaderWithCapacity initializes a TransformReader with expected
// closers capacity.
//
// If you don't know the closers capacity beforehand, just use
//
// &TransformReader{Reader: baseReader}
//
// instead would be sufficient.
func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader {
return &TransformReader{
Reader: baseReader,
closers: make([]io.Closer, 0, capacity),
}
}
// Close calls the underlying closers in appropriate order,
// stops at and returns the first error encountered.
func (tr *TransformReader) Close() error {
// Call closers in reversed order
for i := len(tr.closers) - 1; i >= 0; i-- {
if err := tr.closers[i].Close(); err != nil {
return err
}
}
return nil
}
// AddTransform adds a transform.
func (tr *TransformReader) AddTransform(id THeaderTransformID) error {
switch id {
default:
return NewTApplicationException(
INVALID_TRANSFORM,
fmt.Sprintf("THeaderTransformID %d not supported", id),
)
case TransformNone:
// no-op
case TransformZlib:
readCloser, err := zlib.NewReader(tr.Reader)
if err != nil {
return err
}
tr.Reader = readCloser
tr.closers = append(tr.closers, readCloser)
}
return nil
}
// TransformWriter is an io.WriteCloser that handles transforms writing.
type TransformWriter struct {
io.Writer
closers []io.Closer
}
var _ io.WriteCloser = (*TransformWriter)(nil)
// NewTransformWriter creates a new TransformWriter with base writer and transforms.
func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) {
writer := &TransformWriter{
Writer: baseWriter,
closers: make([]io.Closer, 0, len(transforms)),
}
for _, id := range transforms {
if err := writer.AddTransform(id); err != nil {
return nil, err
}
}
return writer, nil
}
// Close calls the underlying closers in appropriate order,
// stops at and returns the first error encountered.
func (tw *TransformWriter) Close() error {
// Call closers in reversed order
for i := len(tw.closers) - 1; i >= 0; i-- {
if err := tw.closers[i].Close(); err != nil {
return err
}
}
return nil
}
// AddTransform adds a transform.
func (tw *TransformWriter) AddTransform(id THeaderTransformID) error {
switch id {
default:
return NewTApplicationException(
INVALID_TRANSFORM,
fmt.Sprintf("THeaderTransformID %d not supported", id),
)
case TransformNone:
// no-op
case TransformZlib:
writeCloser := zlib.NewWriter(tw.Writer)
tw.Writer = writeCloser
tw.closers = append(tw.closers, writeCloser)
}
return nil
}
// THeaderInfoType is the type id of the info headers.
type THeaderInfoType int32
// Supported THeaderInfoType values.
const (
_ THeaderInfoType = iota // Skip 0
InfoKeyValue // 1
// Rest of the info types are not supported.
)
// THeaderTransport is a Transport mode that implements THeader.
//
// Note that THeaderTransport handles frame and zlib by itself,
// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
// instead of rich transports like TZlibTransport or TFramedTransport.
type THeaderTransport struct {
SequenceID int32
Flags uint32
transport TTransport
// THeaderMap for read and write
readHeaders THeaderMap
writeHeaders THeaderMap
// Reading related variables.
reader *bufio.Reader
// When frame is detected, we read the frame fully into frameBuffer.
frameBuffer bytes.Buffer
// When it's non-nil, Read should read from frameReader instead of
// reader, and EOF error indicates end of frame instead of end of all
// transport.
frameReader io.ReadCloser
// Writing related variables
writeBuffer bytes.Buffer
writeTransforms []THeaderTransformID
clientType clientType
protocolID THeaderProtocolID
cfg *TConfiguration
// buffer is used in the following scenarios to avoid repetitive
// allocations, while 4 is big enough for all those scenarios:
//
// * header padding (max size 4)
// * write the frame size (size 4)
buffer [4]byte
}
var _ TTransport = (*THeaderTransport)(nil)
// Deprecated: Use NewTHeaderTransportConf instead.
func NewTHeaderTransport(trans TTransport) *THeaderTransport {
return NewTHeaderTransportConf(trans, &TConfiguration{
noPropagation: true,
})
}
// NewTHeaderTransportConf creates THeaderTransport from the
// underlying transport, with given TConfiguration attached.
//
// If trans is already a *THeaderTransport, it will be returned as is,
// but with TConfiguration overridden by the value passed in.
//
// The protocol ID in TConfiguration is only useful for client transports.
// For servers,
// the protocol ID will be overridden again to the one set by the client,
// to ensure that servers always speak the same dialect as the client.
func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport {
if ht, ok := trans.(*THeaderTransport); ok {
ht.SetTConfiguration(conf)
return ht
}
PropagateTConfiguration(trans, conf)
return &THeaderTransport{
transport: trans,
reader: bufio.NewReader(trans),
writeHeaders: make(THeaderMap),
protocolID: conf.GetTHeaderProtocolID(),
cfg: conf,
}
}
// Open calls the underlying transport's Open function.
func (t *THeaderTransport) Open() error {
return t.transport.Open()
}
// IsOpen calls the underlying transport's IsOpen function.
func (t *THeaderTransport) IsOpen() bool {
return t.transport.IsOpen()
}
// ReadFrame tries to read the frame header, guess the client type, and handle
// unframed clients.
func (t *THeaderTransport) ReadFrame(ctx context.Context) error {
if !t.needReadFrame() {
// No need to read frame, skipping.
return nil
}
// Peek and handle the first 32 bits.
// They could either be the length field of a framed message,
// or the first bytes of an unframed message.
var buf []byte
var err error
// This is also usually the first read from a connection,
// so handle retries around socket timeouts.
_, deadlineSet := ctx.Deadline()
for {
buf, err = t.reader.Peek(size32)
if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
// This is I/O timeout and we still have time,
// continue trying
continue
}
// For anything else, do not retry
break
}
if err != nil {
return err
}
frameSize := binary.BigEndian.Uint32(buf)
if frameSize&VERSION_MASK == VERSION_1 {
t.clientType = clientUnframedBinary
return nil
}
if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
t.clientType = clientUnframedCompact
return nil
}
// At this point it should be a framed message,
// sanity check on frameSize then discard the peeked part.
if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) {
return NewTProtocolExceptionWithType(
SIZE_LIMIT,
errors.New("frame too large"),
)
}
t.reader.Discard(size32)
// Read the frame fully into frameBuffer.
_, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize))
if err != nil {
return err
}
t.frameReader = ioutil.NopCloser(&t.frameBuffer)
// Peek and handle the next 32 bits.
buf = t.frameBuffer.Bytes()[:size32]
version := binary.BigEndian.Uint32(buf)
if version&THeaderHeaderMask == THeaderHeaderMagic {
t.clientType = clientHeaders
return t.parseHeaders(ctx, frameSize)
}
if version&VERSION_MASK == VERSION_1 {
t.clientType = clientFramedBinary
return nil
}
if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
t.clientType = clientFramedCompact
return nil
}
if err := t.endOfFrame(); err != nil {
return err
}
return NewTProtocolExceptionWithType(
NOT_IMPLEMENTED,
errors.New("unsupported client transport type"),
)
}
// endOfFrame does end of frame handling.
//
// It closes frameReader, and also resets frame related states.
func (t *THeaderTransport) endOfFrame() error {
defer func() {
t.frameBuffer.Reset()
t.frameReader = nil
}()
return t.frameReader.Close()
}
func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error {
if t.clientType != clientHeaders {
return nil
}
var err error
var meta headerMeta
if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil {
return err
}
frameSize -= headerMetaSize
t.Flags = meta.MagicFlags & THeaderFlagsMask
t.SequenceID = meta.SequenceID
headerLength := int64(meta.HeaderLength) * 4
if int64(frameSize) < headerLength {
return NewTProtocolExceptionWithType(
SIZE_LIMIT,
errors.New("header size is larger than the whole frame"),
)
}
headerBuf := NewTMemoryBuffer()
_, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength)
if err != nil {
return err
}
hp := NewTCompactProtocol(headerBuf)
hp.SetTConfiguration(t.cfg)
// At this point the header is already read into headerBuf,
// and t.frameBuffer starts from the actual payload.
protoID, err := hp.readVarint32()
if err != nil {
return err
}
t.protocolID = THeaderProtocolID(protoID)
var transformCount int32
transformCount, err = hp.readVarint32()
if err != nil {
return err
}
if transformCount > 0 {
reader := NewTransformReaderWithCapacity(
&t.frameBuffer,
int(transformCount),
)
t.frameReader = reader
transformIDs := make([]THeaderTransformID, transformCount)
for i := 0; i < int(transformCount); i++ {
id, err := hp.readVarint32()
if err != nil {
return err
}
transformIDs[i] = THeaderTransformID(id)
}
// The transform IDs on the wire was added based on the order of
// writing, so on the reading side we need to reverse the order.
for i := transformCount - 1; i >= 0; i-- {
id := transformIDs[i]
if err := reader.AddTransform(id); err != nil {
return err
}
}
}
// The info part does not use the transforms yet, so it's
// important to continue using headerBuf.
headers := make(THeaderMap)
for {
infoType, err := hp.readVarint32()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return err
}
if THeaderInfoType(infoType) == InfoKeyValue {
count, err := hp.readVarint32()
if err != nil {
return err
}
for i := 0; i < int(count); i++ {
key, err := hp.ReadString(ctx)
if err != nil {
return err
}
value, err := hp.ReadString(ctx)
if err != nil {
return err
}
headers[key] = value
}
} else {
// Skip reading info section on the first
// unsupported info type.
break
}
}
t.readHeaders = headers
return nil
}
func (t *THeaderTransport) needReadFrame() bool {
if t.clientType == clientUnknown {
// This is a new connection that's never read before.
return true
}
if t.isFramed() && t.frameReader == nil {
// We just finished the last frame.
return true
}
return false
}
func (t *THeaderTransport) Read(p []byte) (read int, err error) {
// Here using context.Background instead of a context passed in is safe.
// First is that there's no way to pass context into this function.
// Then, 99% of the case when calling this Read frame is already read
// into frameReader. ReadFrame here is more of preventing bugs that
// didn't call ReadFrame before calling Read.
err = t.ReadFrame(context.Background())
if err != nil {
return
}
if t.frameReader != nil {
read, err = t.frameReader.Read(p)
if err == nil && t.frameBuffer.Len() <= 0 {
// the last Read finished the frame, do endOfFrame
// handling here.
err = t.endOfFrame()
} else if err == io.EOF {
err = t.endOfFrame()
if err != nil {
return
}
if read == 0 {
// Try to read the next frame when we hit EOF
// (end of frame) immediately.
// When we got here, it means the last read
// finished the previous frame, but didn't
// do endOfFrame handling yet.
// We have to read the next frame here,
// as otherwise we would return 0 and nil,
// which is a case not handled well by most
// protocol implementations.
return t.Read(p)
}
}
return
}
return t.reader.Read(p)
}
// Write writes data to the write buffer.
//
// You need to call Flush to actually write them to the transport.
func (t *THeaderTransport) Write(p []byte) (int, error) {
return t.writeBuffer.Write(p)
}
// Flush writes the appropriate header and the write buffer to the underlying transport.
func (t *THeaderTransport) Flush(ctx context.Context) error {
if t.writeBuffer.Len() == 0 {
return nil
}
defer t.writeBuffer.Reset()
switch t.clientType {
default:
fallthrough
case clientUnknown:
t.clientType = clientHeaders
fallthrough
case clientHeaders:
headers := NewTMemoryBuffer()
hp := NewTCompactProtocol(headers)
hp.SetTConfiguration(t.cfg)
if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil {
return NewTTransportExceptionFromError(err)
}
if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil {
return NewTTransportExceptionFromError(err)
}
for _, transform := range t.writeTransforms {
if _, err := hp.writeVarint32(int32(transform)); err != nil {
return NewTTransportExceptionFromError(err)
}
}
if len(t.writeHeaders) > 0 {
if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil {
return NewTTransportExceptionFromError(err)
}
if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil {
return NewTTransportExceptionFromError(err)
}
for key, value := range t.writeHeaders {
if err := hp.WriteString(ctx, key); err != nil {
return NewTTransportExceptionFromError(err)
}
if err := hp.WriteString(ctx, value); err != nil {
return NewTTransportExceptionFromError(err)
}
}
}
padding := 4 - headers.Len()%4
if padding < 4 {
buf := t.buffer[:padding]
for i := range buf {
buf[i] = 0
}
if _, err := headers.Write(buf); err != nil {
return NewTTransportExceptionFromError(err)
}
}
var payload bytes.Buffer
meta := headerMeta{
MagicFlags: THeaderHeaderMagic + t.Flags&THeaderFlagsMask,
SequenceID: t.SequenceID,
HeaderLength: uint16(headers.Len() / 4),
}
if err := binary.Write(&payload, binary.BigEndian, meta); err != nil {
return NewTTransportExceptionFromError(err)
}
if _, err := io.Copy(&payload, headers); err != nil {
return NewTTransportExceptionFromError(err)
}
writer, err := NewTransformWriter(&payload, t.writeTransforms)
if err != nil {
return NewTTransportExceptionFromError(err)
}
if _, err := io.Copy(writer, &t.writeBuffer); err != nil {
return NewTTransportExceptionFromError(err)
}
if err := writer.Close(); err != nil {
return NewTTransportExceptionFromError(err)
}
// First write frame length
buf := t.buffer[:size32]
binary.BigEndian.PutUint32(buf, uint32(payload.Len()))
if _, err := t.transport.Write(buf); err != nil {
return NewTTransportExceptionFromError(err)
}
// Then write the payload
if _, err := io.Copy(t.transport, &payload); err != nil {
return NewTTransportExceptionFromError(err)
}
case clientFramedBinary, clientFramedCompact:
buf := t.buffer[:size32]
binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len()))
if _, err := t.transport.Write(buf); err != nil {
return NewTTransportExceptionFromError(err)
}
fallthrough
case clientUnframedBinary, clientUnframedCompact:
if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil {
return NewTTransportExceptionFromError(err)
}
}
select {
default:
case <-ctx.Done():
return NewTTransportExceptionFromError(ctx.Err())
}
return t.transport.Flush(ctx)
}
// Close closes the transport, along with its underlying transport.
func (t *THeaderTransport) Close() error {
if err := t.Flush(context.Background()); err != nil {
return err
}
return t.transport.Close()
}
// RemainingBytes calls underlying transport's RemainingBytes.
//
// Even in framed cases, because of all the possible compression transforms
// involved, the remaining frame size is likely to be different from the actual
// remaining readable bytes, so we don't bother to keep tracking the remaining
// frame size by ourselves and just use the underlying transport's
// RemainingBytes directly.
func (t *THeaderTransport) RemainingBytes() uint64 {
return t.transport.RemainingBytes()
}
// GetReadHeaders returns the THeaderMap read from transport.
func (t *THeaderTransport) GetReadHeaders() THeaderMap {
return t.readHeaders
}
// SetWriteHeader sets a header for write.
func (t *THeaderTransport) SetWriteHeader(key, value string) {
t.writeHeaders[key] = value
}
// ClearWriteHeaders clears all write headers previously set.
func (t *THeaderTransport) ClearWriteHeaders() {
t.writeHeaders = make(THeaderMap)
}
// AddTransform add a transform for writing.
func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error {
if !supportedTransformIDs[transform] {
return NewTProtocolExceptionWithType(
NOT_IMPLEMENTED,
fmt.Errorf("THeaderTransformID %d not supported", transform),
)
}
t.writeTransforms = append(t.writeTransforms, transform)
return nil
}
// Protocol returns the wrapped protocol id used in this THeaderTransport.
func (t *THeaderTransport) Protocol() THeaderProtocolID {
switch t.clientType {
default:
return t.protocolID
case clientFramedBinary, clientUnframedBinary:
return THeaderProtocolBinary
case clientFramedCompact, clientUnframedCompact:
return THeaderProtocolCompact
}
}
func (t *THeaderTransport) isFramed() bool {
switch t.clientType {
default:
return false
case clientHeaders, clientFramedBinary, clientFramedCompact:
return true
}
}
// SetTConfiguration implements TConfigurationSetter.
func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(t.transport, cfg)
t.cfg = cfg
}
// THeaderTransportFactory is a TTransportFactory implementation to create
// THeaderTransport.
//
// It also implements TConfigurationSetter.
type THeaderTransportFactory struct {
// The underlying factory, could be nil.
Factory TTransportFactory
cfg *TConfiguration
}
// Deprecated: Use NewTHeaderTransportFactoryConf instead.
func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory {
return NewTHeaderTransportFactoryConf(factory, &TConfiguration{
noPropagation: true,
})
}
// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with
// the given *TConfiguration.
func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
return &THeaderTransportFactory{
Factory: factory,
cfg: conf,
}
}
// GetTransport implements TTransportFactory.
func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
if f.Factory != nil {
t, err := f.Factory.GetTransport(trans)
if err != nil {
return nil, err
}
return NewTHeaderTransportConf(t, f.cfg), nil
}
return NewTHeaderTransportConf(trans, f.cfg), nil
}
// SetTConfiguration implements TConfigurationSetter.
func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(f.Factory, f.cfg)
f.cfg = cfg
}
var (
_ TConfigurationSetter = (*THeaderTransportFactory)(nil)
_ TConfigurationSetter = (*THeaderTransport)(nil)
)

View File

@@ -0,0 +1,59 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"log"
"os"
)
// Logger is a simple wrapper of a logging function.
//
// In reality the users might actually use different logging libraries, and they
// are not always compatible with each other.
//
// Logger is meant to be a simple common ground that it's easy to wrap whatever
// logging library they use into.
//
// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design
// discussion behind it.
type Logger func(msg string)
// NopLogger is a Logger implementation that does nothing.
func NopLogger(msg string) {}
// StdLogger wraps stdlib log package into a Logger.
//
// If logger passed in is nil, it will fallback to use stderr and default flags.
func StdLogger(logger *log.Logger) Logger {
if logger == nil {
logger = log.New(os.Stderr, "", log.LstdFlags)
}
return func(msg string) {
logger.Print(msg)
}
}
func fallbackLogger(logger Logger) Logger {
if logger == nil {
return StdLogger(nil)
}
return logger
}

View File

@@ -0,0 +1,80 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bytes"
"context"
)
// Memory buffer-based implementation of the TTransport interface.
type TMemoryBuffer struct {
*bytes.Buffer
size int
}
type TMemoryBufferTransportFactory struct {
size int
}
func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
if trans != nil {
t, ok := trans.(*TMemoryBuffer)
if ok && t.size > 0 {
return NewTMemoryBufferLen(t.size), nil
}
}
return NewTMemoryBufferLen(p.size), nil
}
func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
return &TMemoryBufferTransportFactory{size: size}
}
func NewTMemoryBuffer() *TMemoryBuffer {
return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
}
func NewTMemoryBufferLen(size int) *TMemoryBuffer {
buf := make([]byte, 0, size)
return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
}
func (p *TMemoryBuffer) IsOpen() bool {
return true
}
func (p *TMemoryBuffer) Open() error {
return nil
}
func (p *TMemoryBuffer) Close() error {
p.Buffer.Reset()
return nil
}
// Flushing a memory buffer is a no-op
func (p *TMemoryBuffer) Flush(ctx context.Context) error {
return nil
}
func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
return uint64(p.Buffer.Len())
}

View File

@@ -0,0 +1,31 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
// Message type constants in the Thrift protocol.
type TMessageType int32
const (
INVALID_TMESSAGE_TYPE TMessageType = 0
CALL TMessageType = 1
REPLY TMessageType = 2
EXCEPTION TMessageType = 3
ONEWAY TMessageType = 4
)

View File

@@ -0,0 +1,164 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"math"
"strconv"
)
type Numeric interface {
Int64() int64
Int32() int32
Int16() int16
Byte() byte
Int() int
Float64() float64
Float32() float32
String() string
isNull() bool
}
type numeric struct {
iValue int64
dValue float64
sValue string
isNil bool
}
var (
INFINITY Numeric
NEGATIVE_INFINITY Numeric
NAN Numeric
ZERO Numeric
NUMERIC_NULL Numeric
)
func NewNumericFromDouble(dValue float64) Numeric {
if math.IsInf(dValue, 1) {
return INFINITY
}
if math.IsInf(dValue, -1) {
return NEGATIVE_INFINITY
}
if math.IsNaN(dValue) {
return NAN
}
iValue := int64(dValue)
sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
isNil := false
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
}
func NewNumericFromI64(iValue int64) Numeric {
dValue := float64(iValue)
sValue := strconv.FormatInt(iValue, 10)
isNil := false
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
}
func NewNumericFromI32(iValue int32) Numeric {
dValue := float64(iValue)
sValue := strconv.FormatInt(int64(iValue), 10)
isNil := false
return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
}
func NewNumericFromString(sValue string) Numeric {
if sValue == INFINITY.String() {
return INFINITY
}
if sValue == NEGATIVE_INFINITY.String() {
return NEGATIVE_INFINITY
}
if sValue == NAN.String() {
return NAN
}
iValue, _ := strconv.ParseInt(sValue, 10, 64)
dValue, _ := strconv.ParseFloat(sValue, 64)
isNil := len(sValue) == 0
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
}
func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
if isNull {
return NewNullNumeric()
}
if sValue == JSON_INFINITY {
return INFINITY
}
if sValue == JSON_NEGATIVE_INFINITY {
return NEGATIVE_INFINITY
}
if sValue == JSON_NAN {
return NAN
}
iValue, _ := strconv.ParseInt(sValue, 10, 64)
dValue, _ := strconv.ParseFloat(sValue, 64)
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
}
func NewNullNumeric() Numeric {
return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
}
func (p *numeric) Int64() int64 {
return p.iValue
}
func (p *numeric) Int32() int32 {
return int32(p.iValue)
}
func (p *numeric) Int16() int16 {
return int16(p.iValue)
}
func (p *numeric) Byte() byte {
return byte(p.iValue)
}
func (p *numeric) Int() int {
return int(p.iValue)
}
func (p *numeric) Float64() float64 {
return p.dValue
}
func (p *numeric) Float32() float32 {
return float32(p.dValue)
}
func (p *numeric) String() string {
return p.sValue
}
func (p *numeric) isNull() bool {
return p.isNil
}
func init() {
INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
}

View File

@@ -0,0 +1,80 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import "context"
// A processor is a generic object which operates upon an input stream and
// writes to some output stream.
type TProcessor interface {
Process(ctx context.Context, in, out TProtocol) (bool, TException)
// ProcessorMap returns a map of thrift method names to TProcessorFunctions.
ProcessorMap() map[string]TProcessorFunction
// AddToProcessorMap adds the given TProcessorFunction to the internal
// processor map at the given key.
//
// If one is already set at the given key, it will be replaced with the new
// TProcessorFunction.
AddToProcessorMap(string, TProcessorFunction)
}
type TProcessorFunction interface {
Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
}
// The default processor factory just returns a singleton
// instance.
type TProcessorFactory interface {
GetProcessor(trans TTransport) TProcessor
}
type tProcessorFactory struct {
processor TProcessor
}
func NewTProcessorFactory(p TProcessor) TProcessorFactory {
return &tProcessorFactory{processor: p}
}
func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
return p.processor
}
/**
* The default processor factory just returns a singleton
* instance.
*/
type TProcessorFunctionFactory interface {
GetProcessorFunction(trans TTransport) TProcessorFunction
}
type tProcessorFunctionFactory struct {
processor TProcessorFunction
}
func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
return &tProcessorFunctionFactory{processor: p}
}
func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
return p.processor
}

View File

@@ -0,0 +1,177 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"errors"
"fmt"
)
const (
VERSION_MASK = 0xffff0000
VERSION_1 = 0x80010000
)
type TProtocol interface {
WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error
WriteMessageEnd(ctx context.Context) error
WriteStructBegin(ctx context.Context, name string) error
WriteStructEnd(ctx context.Context) error
WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error
WriteFieldEnd(ctx context.Context) error
WriteFieldStop(ctx context.Context) error
WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error
WriteMapEnd(ctx context.Context) error
WriteListBegin(ctx context.Context, elemType TType, size int) error
WriteListEnd(ctx context.Context) error
WriteSetBegin(ctx context.Context, elemType TType, size int) error
WriteSetEnd(ctx context.Context) error
WriteBool(ctx context.Context, value bool) error
WriteByte(ctx context.Context, value int8) error
WriteI16(ctx context.Context, value int16) error
WriteI32(ctx context.Context, value int32) error
WriteI64(ctx context.Context, value int64) error
WriteDouble(ctx context.Context, value float64) error
WriteString(ctx context.Context, value string) error
WriteBinary(ctx context.Context, value []byte) error
ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error)
ReadMessageEnd(ctx context.Context) error
ReadStructBegin(ctx context.Context) (name string, err error)
ReadStructEnd(ctx context.Context) error
ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error)
ReadFieldEnd(ctx context.Context) error
ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error)
ReadMapEnd(ctx context.Context) error
ReadListBegin(ctx context.Context) (elemType TType, size int, err error)
ReadListEnd(ctx context.Context) error
ReadSetBegin(ctx context.Context) (elemType TType, size int, err error)
ReadSetEnd(ctx context.Context) error
ReadBool(ctx context.Context) (value bool, err error)
ReadByte(ctx context.Context) (value int8, err error)
ReadI16(ctx context.Context) (value int16, err error)
ReadI32(ctx context.Context) (value int32, err error)
ReadI64(ctx context.Context) (value int64, err error)
ReadDouble(ctx context.Context) (value float64, err error)
ReadString(ctx context.Context) (value string, err error)
ReadBinary(ctx context.Context) (value []byte, err error)
Skip(ctx context.Context, fieldType TType) (err error)
Flush(ctx context.Context) (err error)
Transport() TTransport
}
// The maximum recursive depth the skip() function will traverse
const DEFAULT_RECURSION_DEPTH = 64
// Skips over the next data element from the provided input TProtocol object.
func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) {
return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH)
}
// Skips over the next data element from the provided input TProtocol object.
func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) {
if maxDepth <= 0 {
return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
}
switch fieldType {
case BOOL:
_, err = self.ReadBool(ctx)
return
case BYTE:
_, err = self.ReadByte(ctx)
return
case I16:
_, err = self.ReadI16(ctx)
return
case I32:
_, err = self.ReadI32(ctx)
return
case I64:
_, err = self.ReadI64(ctx)
return
case DOUBLE:
_, err = self.ReadDouble(ctx)
return
case STRING:
_, err = self.ReadString(ctx)
return
case STRUCT:
if _, err = self.ReadStructBegin(ctx); err != nil {
return err
}
for {
_, typeId, _, _ := self.ReadFieldBegin(ctx)
if typeId == STOP {
break
}
err := Skip(ctx, self, typeId, maxDepth-1)
if err != nil {
return err
}
self.ReadFieldEnd(ctx)
}
return self.ReadStructEnd(ctx)
case MAP:
keyType, valueType, size, err := self.ReadMapBegin(ctx)
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(ctx, self, keyType, maxDepth-1)
if err != nil {
return err
}
self.Skip(ctx, valueType)
}
return self.ReadMapEnd(ctx)
case SET:
elemType, size, err := self.ReadSetBegin(ctx)
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(ctx, self, elemType, maxDepth-1)
if err != nil {
return err
}
}
return self.ReadSetEnd(ctx)
case LIST:
elemType, size, err := self.ReadListBegin(ctx)
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(ctx, self, elemType, maxDepth-1)
if err != nil {
return err
}
}
return self.ReadListEnd(ctx)
default:
return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
}
return nil
}

View File

@@ -0,0 +1,104 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"encoding/base64"
"errors"
)
// Thrift Protocol exception
type TProtocolException interface {
TException
TypeId() int
}
const (
UNKNOWN_PROTOCOL_EXCEPTION = 0
INVALID_DATA = 1
NEGATIVE_SIZE = 2
SIZE_LIMIT = 3
BAD_VERSION = 4
NOT_IMPLEMENTED = 5
DEPTH_LIMIT = 6
)
type tProtocolException struct {
typeId int
err error
msg string
}
var _ TProtocolException = (*tProtocolException)(nil)
func (tProtocolException) TExceptionType() TExceptionType {
return TExceptionTypeProtocol
}
func (p *tProtocolException) TypeId() int {
return p.typeId
}
func (p *tProtocolException) String() string {
return p.msg
}
func (p *tProtocolException) Error() string {
return p.msg
}
func (p *tProtocolException) Unwrap() error {
return p.err
}
func NewTProtocolException(err error) TProtocolException {
if err == nil {
return nil
}
if e, ok := err.(TProtocolException); ok {
return e
}
if errors.As(err, new(base64.CorruptInputError)) {
return NewTProtocolExceptionWithType(INVALID_DATA, err)
}
return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err)
}
func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
if err == nil {
return nil
}
return &tProtocolException{
typeId: errType,
err: err,
msg: err.Error(),
}
}
func prependTProtocolException(prepend string, err TProtocolException) TProtocolException {
return &tProtocolException{
typeId: err.TypeId(),
err: err,
msg: prepend + err.Error(),
}
}

View File

@@ -0,0 +1,25 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
// Factory interface for constructing protocol instances.
type TProtocolFactory interface {
GetProtocol(trans TTransport) TProtocol
}

View File

@@ -0,0 +1,94 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
)
// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs.
type responseHelperKey struct{}
// TResponseHelper defines a object with a set of helper functions that can be
// retrieved from the context object passed into server handler functions.
//
// Use GetResponseHelper to retrieve the injected TResponseHelper implementation
// from the context object.
//
// The zero value of TResponseHelper is valid with all helper functions being
// no-op.
type TResponseHelper struct {
// THeader related functions
*THeaderResponseHelper
}
// THeaderResponseHelper defines THeader related TResponseHelper functions.
//
// The zero value of *THeaderResponseHelper is valid with all helper functions
// being no-op.
type THeaderResponseHelper struct {
proto *THeaderProtocol
}
// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the
// underlying TProtocol.
func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper {
if hp, ok := proto.(*THeaderProtocol); ok {
return &THeaderResponseHelper{
proto: hp,
}
}
return nil
}
// SetHeader sets a response header.
//
// It's no-op if the underlying protocol/transport does not support THeader.
func (h *THeaderResponseHelper) SetHeader(key, value string) {
if h != nil && h.proto != nil {
h.proto.SetWriteHeader(key, value)
}
}
// ClearHeaders clears all the response headers previously set.
//
// It's no-op if the underlying protocol/transport does not support THeader.
func (h *THeaderResponseHelper) ClearHeaders() {
if h != nil && h.proto != nil {
h.proto.ClearWriteHeaders()
}
}
// GetResponseHelper retrieves the TResponseHelper implementation injected into
// the context object.
//
// If no helper was found in the context object, a nop helper with ok == false
// will be returned.
func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) {
if v := ctx.Value(responseHelperKey{}); v != nil {
helper, ok = v.(TResponseHelper)
}
return
}
// SetResponseHelper injects TResponseHelper into the context object.
func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context {
return context.WithValue(ctx, responseHelperKey{}, helper)
}

View File

@@ -0,0 +1,71 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"errors"
"io"
)
type RichTransport struct {
TTransport
}
// Wraps Transport to provide TRichTransport interface
func NewTRichTransport(trans TTransport) *RichTransport {
return &RichTransport{trans}
}
func (r *RichTransport) ReadByte() (c byte, err error) {
return readByte(r.TTransport)
}
func (r *RichTransport) WriteByte(c byte) error {
return writeByte(r.TTransport, c)
}
func (r *RichTransport) WriteString(s string) (n int, err error) {
return r.Write([]byte(s))
}
func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
return r.TTransport.RemainingBytes()
}
func readByte(r io.Reader) (c byte, err error) {
v := [1]byte{0}
n, err := r.Read(v[0:1])
if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
return v[0], nil
}
if n > 0 && err != nil {
return v[0], err
}
if err != nil {
return 0, err
}
return v[0], nil
}
func writeByte(w io.Writer, c byte) error {
v := [1]byte{c}
_, err := w.Write(v[0:1])
return err
}

View File

@@ -0,0 +1,136 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"sync"
)
type TSerializer struct {
Transport *TMemoryBuffer
Protocol TProtocol
}
type TStruct interface {
Write(ctx context.Context, p TProtocol) error
Read(ctx context.Context, p TProtocol) error
}
func NewTSerializer() *TSerializer {
transport := NewTMemoryBufferLen(1024)
protocol := NewTBinaryProtocolTransport(transport)
return &TSerializer{
Transport: transport,
Protocol: protocol,
}
}
func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
t.Transport.Reset()
if err = msg.Write(ctx, t.Protocol); err != nil {
return
}
if err = t.Protocol.Flush(ctx); err != nil {
return
}
if err = t.Transport.Flush(ctx); err != nil {
return
}
return t.Transport.String(), nil
}
func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
t.Transport.Reset()
if err = msg.Write(ctx, t.Protocol); err != nil {
return
}
if err = t.Protocol.Flush(ctx); err != nil {
return
}
if err = t.Transport.Flush(ctx); err != nil {
return
}
b = append(b, t.Transport.Bytes()...)
return
}
// TSerializerPool is the thread-safe version of TSerializer, it uses resource
// pool of TSerializer under the hood.
//
// It must be initialized with either NewTSerializerPool or
// NewTSerializerPoolSizeFactory.
type TSerializerPool struct {
pool sync.Pool
}
// NewTSerializerPool creates a new TSerializerPool.
//
// NewTSerializer can be used as the arg here.
func NewTSerializerPool(f func() *TSerializer) *TSerializerPool {
return &TSerializerPool{
pool: sync.Pool{
New: func() interface{} {
return f()
},
},
}
}
// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given
// size and protocol factory.
//
// Note that the size is not the limit. The TMemoryBuffer underneath can grow
// larger than that. It just dictates the initial size.
func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool {
return &TSerializerPool{
pool: sync.Pool{
New: func() interface{} {
transport := NewTMemoryBufferLen(size)
protocol := factory.GetProtocol(transport)
return &TSerializer{
Transport: transport,
Protocol: protocol,
}
},
},
}
}
func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) {
s := t.pool.Get().(*TSerializer)
defer t.pool.Put(s)
return s.WriteString(ctx, msg)
}
func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) {
s := t.pool.Get().(*TSerializer)
defer t.pool.Put(s)
return s.Write(ctx, msg)
}

View File

@@ -0,0 +1,34 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
// Server transport. Object which provides client transports.
type TServerTransport interface {
Listen() error
Accept() (TTransport, error)
Close() error
// Optional method implementation. This signals to the server transport
// that it should break out of any accept() or listen() that it is currently
// blocked on. This method, if implemented, MUST be thread safe, as it may
// be called from a different thread context than the other TServerTransport
// methods.
Interrupt() error
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,332 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"errors"
"fmt"
"io"
"sync"
"sync/atomic"
"time"
)
// ErrAbandonRequest is a special error server handler implementations can
// return to indicate that the request has been abandoned.
//
// TSimpleServer will check for this error, and close the client connection
// instead of writing the response/error back to the client.
//
// It shall only be used when the server handler implementation know that the
// client already abandoned the request (by checking that the passed in context
// is already canceled, for example).
var ErrAbandonRequest = errors.New("request abandoned")
// ServerConnectivityCheckInterval defines the ticker interval used by
// connectivity check in thrift compiled TProcessorFunc implementations.
//
// It's defined as a variable instead of constant, so that thrift server
// implementations can change its value to control the behavior.
//
// If it's changed to <=0, the feature will be disabled.
var ServerConnectivityCheckInterval = time.Millisecond * 5
/*
* This is not a typical TSimpleServer as it is not blocked after accept a socket.
* It is more like a TThreadedServer that can handle different connections in different goroutines.
* This will work if golang user implements a conn-pool like thing in client side.
*/
type TSimpleServer struct {
closed int32
wg sync.WaitGroup
mu sync.Mutex
processorFactory TProcessorFactory
serverTransport TServerTransport
inputTransportFactory TTransportFactory
outputTransportFactory TTransportFactory
inputProtocolFactory TProtocolFactory
outputProtocolFactory TProtocolFactory
// Headers to auto forward in THeaderProtocol
forwardHeaders []string
logger Logger
}
func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
}
func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
serverTransport,
transportFactory,
protocolFactory,
)
}
func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
serverTransport,
inputTransportFactory,
outputTransportFactory,
inputProtocolFactory,
outputProtocolFactory,
)
}
func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
return NewTSimpleServerFactory6(processorFactory,
serverTransport,
NewTTransportFactory(),
NewTTransportFactory(),
NewTBinaryProtocolFactoryDefault(),
NewTBinaryProtocolFactoryDefault(),
)
}
func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
return NewTSimpleServerFactory6(processorFactory,
serverTransport,
transportFactory,
transportFactory,
protocolFactory,
protocolFactory,
)
}
func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
return &TSimpleServer{
processorFactory: processorFactory,
serverTransport: serverTransport,
inputTransportFactory: inputTransportFactory,
outputTransportFactory: outputTransportFactory,
inputProtocolFactory: inputProtocolFactory,
outputProtocolFactory: outputProtocolFactory,
}
}
func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
return p.processorFactory
}
func (p *TSimpleServer) ServerTransport() TServerTransport {
return p.serverTransport
}
func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
return p.inputTransportFactory
}
func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
return p.outputTransportFactory
}
func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
return p.inputProtocolFactory
}
func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
return p.outputProtocolFactory
}
func (p *TSimpleServer) Listen() error {
return p.serverTransport.Listen()
}
// SetForwardHeaders sets the list of header keys that will be auto forwarded
// while using THeaderProtocol.
//
// "forward" means that when the server is also a client to other upstream
// thrift servers, the context object user gets in the processor functions will
// have both read and write headers set, with write headers being forwarded.
// Users can always override the write headers by calling SetWriteHeaderList
// before calling thrift client functions.
func (p *TSimpleServer) SetForwardHeaders(headers []string) {
size := len(headers)
if size == 0 {
p.forwardHeaders = nil
return
}
keys := make([]string, size)
copy(keys, headers)
p.forwardHeaders = keys
}
// SetLogger sets the logger used by this TSimpleServer.
//
// If no logger was set before Serve is called, a default logger using standard
// log library will be used.
func (p *TSimpleServer) SetLogger(logger Logger) {
p.logger = logger
}
func (p *TSimpleServer) innerAccept() (int32, error) {
client, err := p.serverTransport.Accept()
p.mu.Lock()
defer p.mu.Unlock()
closed := atomic.LoadInt32(&p.closed)
if closed != 0 {
return closed, nil
}
if err != nil {
return 0, err
}
if client != nil {
p.wg.Add(1)
go func() {
defer p.wg.Done()
if err := p.processRequests(client); err != nil {
p.logger(fmt.Sprintf("error processing request: %v", err))
}
}()
}
return 0, nil
}
func (p *TSimpleServer) AcceptLoop() error {
for {
closed, err := p.innerAccept()
if err != nil {
return err
}
if closed != 0 {
return nil
}
}
}
func (p *TSimpleServer) Serve() error {
p.logger = fallbackLogger(p.logger)
err := p.Listen()
if err != nil {
return err
}
p.AcceptLoop()
return nil
}
func (p *TSimpleServer) Stop() error {
p.mu.Lock()
defer p.mu.Unlock()
if atomic.LoadInt32(&p.closed) != 0 {
return nil
}
atomic.StoreInt32(&p.closed, 1)
p.serverTransport.Interrupt()
p.wg.Wait()
return nil
}
// If err is actually EOF, return nil, otherwise return err as-is.
func treatEOFErrorsAsNil(err error) error {
if err == nil {
return nil
}
if errors.Is(err, io.EOF) {
return nil
}
var te TTransportException
if errors.As(err, &te) && te.TypeId() == END_OF_FILE {
return nil
}
return err
}
func (p *TSimpleServer) processRequests(client TTransport) (err error) {
defer func() {
err = treatEOFErrorsAsNil(err)
}()
processor := p.processorFactory.GetProcessor(client)
inputTransport, err := p.inputTransportFactory.GetTransport(client)
if err != nil {
return err
}
inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
var outputTransport TTransport
var outputProtocol TProtocol
// for THeaderProtocol, we must use the same protocol instance for
// input and output so that the response is in the same dialect that
// the server detected the request was in.
headerProtocol, ok := inputProtocol.(*THeaderProtocol)
if ok {
outputProtocol = inputProtocol
} else {
oTrans, err := p.outputTransportFactory.GetTransport(client)
if err != nil {
return err
}
outputTransport = oTrans
outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport)
}
if inputTransport != nil {
defer inputTransport.Close()
}
if outputTransport != nil {
defer outputTransport.Close()
}
for {
if atomic.LoadInt32(&p.closed) != 0 {
return nil
}
ctx := SetResponseHelper(
defaultCtx,
TResponseHelper{
THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol),
},
)
if headerProtocol != nil {
// We need to call ReadFrame here, otherwise we won't
// get any headers on the AddReadTHeaderToContext call.
//
// ReadFrame is safe to be called multiple times so it
// won't break when it's called again later when we
// actually start to read the message.
if err := headerProtocol.ReadFrame(ctx); err != nil {
return err
}
ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders())
ctx = SetWriteHeaderList(ctx, p.forwardHeaders)
}
ok, err := processor.Process(ctx, inputProtocol, outputProtocol)
if errors.Is(err, ErrAbandonRequest) {
return client.Close()
}
if errors.As(err, new(TTransportException)) && err != nil {
return err
}
var tae TApplicationException
if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD {
continue
}
if !ok {
break
}
}
return nil
}

View File

@@ -0,0 +1,70 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"errors"
"io"
)
var errTransportInterrupted = errors.New("Transport Interrupted")
type Flusher interface {
Flush() (err error)
}
type ContextFlusher interface {
Flush(ctx context.Context) (err error)
}
type ReadSizeProvider interface {
RemainingBytes() (num_bytes uint64)
}
// Encapsulates the I/O layer
type TTransport interface {
io.ReadWriteCloser
ContextFlusher
ReadSizeProvider
// Opens the transport for communication
Open() error
// Returns true if the transport is open
IsOpen() bool
}
type stringWriter interface {
WriteString(s string) (n int, err error)
}
// This is "enchanced" transport with extra capabilities. You need to use one of these
// to construct protocol.
// Notably, TSocket does not implement this interface, and it is always a mistake to use
// TSocket directly in protocol.
type TRichTransport interface {
io.ReadWriter
io.ByteReader
io.ByteWriter
stringWriter
ContextFlusher
ReadSizeProvider
}

View File

@@ -0,0 +1,131 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"errors"
"io"
)
type timeoutable interface {
Timeout() bool
}
// Thrift Transport exception
type TTransportException interface {
TException
TypeId() int
Err() error
}
const (
UNKNOWN_TRANSPORT_EXCEPTION = 0
NOT_OPEN = 1
ALREADY_OPEN = 2
TIMED_OUT = 3
END_OF_FILE = 4
)
type tTransportException struct {
typeId int
err error
msg string
}
var _ TTransportException = (*tTransportException)(nil)
func (tTransportException) TExceptionType() TExceptionType {
return TExceptionTypeTransport
}
func (p *tTransportException) TypeId() int {
return p.typeId
}
func (p *tTransportException) Error() string {
return p.msg
}
func (p *tTransportException) Err() error {
return p.err
}
func (p *tTransportException) Unwrap() error {
return p.err
}
func (p *tTransportException) Timeout() bool {
return p.typeId == TIMED_OUT
}
func NewTTransportException(t int, e string) TTransportException {
return &tTransportException{
typeId: t,
err: errors.New(e),
msg: e,
}
}
func NewTTransportExceptionFromError(e error) TTransportException {
if e == nil {
return nil
}
if t, ok := e.(TTransportException); ok {
return t
}
te := &tTransportException{
typeId: UNKNOWN_TRANSPORT_EXCEPTION,
err: e,
msg: e.Error(),
}
if isTimeoutError(e) {
te.typeId = TIMED_OUT
return te
}
if errors.Is(e, io.EOF) {
te.typeId = END_OF_FILE
return te
}
return te
}
func prependTTransportException(prepend string, e TTransportException) TTransportException {
return &tTransportException{
typeId: e.TypeId(),
err: e,
msg: prepend + e.Error(),
}
}
// isTimeoutError returns true when err is an error caused by timeout.
//
// Note that this also includes TTransportException wrapped timeout errors.
func isTimeoutError(err error) bool {
var t timeoutable
if errors.As(err, &t) {
return t.Timeout()
}
return false
}

View File

@@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
// Factory class used to create wrapped instance of Transports.
// This is used primarily in servers, which get Transports from
// a ServerTransport and then may want to mutate them (i.e. create
// a BufferedTransport from the underlying base transport)
type TTransportFactory interface {
GetTransport(trans TTransport) (TTransport, error)
}
type tTransportFactory struct{}
// Return a wrapped instance of the base Transport.
func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
return trans, nil
}
func NewTTransportFactory() TTransportFactory {
return &tTransportFactory{}
}

Some files were not shown because too many files have changed in this diff Show More