Compare commits

..

15 Commits

Author SHA1 Message Date
jillianwilson
3798d08f49 addressing pr comments 2021-11-11 19:13:16 -04:00
jillianwilson
5ad29d60e3 Addressing pr comments 2021-11-04 17:08:12 -03:00
jillianwilson
e9d2b63a58 Exiting out on certain startup errors 2021-11-04 17:04:15 -03:00
jillianwilson
0b4b9d117f Addressing PR comments 2021-11-04 16:58:17 -03:00
jillianwilson
1e1e3490c6 correcting README 2021-11-04 16:36:14 -03:00
Jillian W
af00cb27c7 Delete mutatingwebhook-ca-bundle.yaml 2021-11-01 13:31:16 -03:00
Jillian W
2433890cfd Delete golangci.yml 2021-11-01 13:30:40 -03:00
Jillian W
3542cafe88 Delete Makefile 2021-11-01 13:29:45 -03:00
jillianwilson
fe57e68769 Code cleanup and readme 2021-10-31 20:57:58 -03:00
jillianwilson
591b8949cd Revert "Add option to cosume connect events rather than polling to restart deployments"
This reverts commit a5f4a7a0c1.
2021-10-27 11:54:04 -03:00
jillianwilson
a8e6a4a4f1 Webhook that injects secrets into pods 2021-10-27 11:46:46 -03:00
jillianwilson
a5f4a7a0c1 Add option to cosume connect events rather than polling to restart deployments 2021-10-27 11:46:27 -03:00
Jillian W
b35c668959 Merge pull request #78 from 1Password/autorestart-webhook
Preparing Operator to handle Deployments that use the Kubernetes Webhook
2021-10-27 11:01:16 -03:00
jillianwilson
2fa035022c Adding better error handling when checking for op:// reference 2021-10-26 18:10:02 -03:00
jillianwilson
d715a6ed0e Addressing pr comments 2021-10-21 17:21:10 -03:00
749 changed files with 26626 additions and 86890 deletions

View File

@@ -11,7 +11,9 @@ versionFile = $(CURDIR)/.VERSION
curVersion := $(shell cat $(versionFile) | sed 's/^v//')
OPERATOR_NAME := onepassword-connect-operator
DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
INJECTOR_NAME := onepassword-secrets-injector
OPERATOR_DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
INJECTOR_DOCKER_IMG_TAG ?= $(INJECTOR_NAME):v$(curVersion)
test: ## Run test suite
go test ./...
@@ -20,18 +22,31 @@ test/coverage: ## Run test suite with coverage report
go test -v ./... -cover
build/operator: ## Build operator Docker image
@docker build -f operator/Dockerfile --build-arg operator_version=$(curVersion) -t $(DOCKER_IMG_TAG) .
@docker build -f operator/Dockerfile --build-arg operator_version=$(curVersion) -t $(OPERATOR_DOCKER_IMG_TAG) .
@echo "Successfully built and tagged image."
@echo "Tag: $(DOCKER_IMG_TAG)"
@echo "Tag: $(OPERATOR_DOCKER_IMG_TAG)"
build/operator/local: ## Build local version of the operator Docker image
@docker build -f operator/Dockerfile -t local/$(DOCKER_IMG_TAG) .
@docker build -f operator/Dockerfile -t local/$(OPERATOR_DOCKER_IMG_TAG) .
build/operator/binary: clean ## Build operator binary
@mkdir -p dist
@go build -mod vendor -a -o manager ./operator/cmd/manager/main.go
@mv manager ./dist
build/secret-injector: ## Build secret-injector Docker image
@docker build -f secret-injector/Dockerfile --build-arg operator_version=$(curVersion) -t $(INJECTOR_DOCKER_IMG_TAG) .
@echo "Successfully built and tagged image."
@echo "Tag: $(INJECTOR_DOCKER_IMG_TAG)"
build/secret-injector/local: ## Build local version of the secret-injector Docker image
@docker build -f secret-injector/Dockerfile -t local/$(INJECTOR_DOCKER_IMG_TAG) .
build/secret-injector/binary: clean ## Build secret-injector binary
@mkdir -p dist
@go build -mod vendor -a -o manager ./secret-injector/cmd/manager/main.go
@mv manager ./dist
clean:
rm -rf ./dist

View File

@@ -12,6 +12,12 @@ The 1Password Connect Kubernetes Operator will continually check for updates fro
[Click here for more details on the 1Password Kubernetes Operator](operator/README.md)
## 1Password Secrets Injector for Kubernetes
The 1Password Secrets Injector implements a mutating webhook to inject 1Password secrets as environment variables into a pod or deployment. Unlike the 1Password Kubernetes Operator, the Secrets Injector does not create a Kubernetes Secret when assigning secrets to your resource.
[Click here for more details on the 1Password Secrets Injector for Kubernetes](secret-injector/README.md)
# Security

3
go.mod
View File

@@ -4,12 +4,11 @@ go 1.13
require (
github.com/1Password/connect-sdk-go v1.0.1
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/operator-framework/operator-sdk v0.19.0
github.com/prometheus/common v0.14.0 // indirect
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.6.1
github.com/suborbital/grav v0.4.1
github.com/suborbital/vektor v0.5.0
k8s.io/api v0.18.2
k8s.io/apimachinery v0.18.2
k8s.io/client-go v12.0.0+incompatible

67
go.sum
View File

@@ -68,7 +68,6 @@ github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHS
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
@@ -159,11 +158,9 @@ github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqh
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb h1:nXPkFq8X1a9ycY3GYQpFNxHh3j2JgY7zDZfq2EXMIzk=
github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
@@ -223,19 +220,16 @@ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyG
github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce h1:KXS1Jg+ddGcWA8e1N7cupxaHHZhit5rB9tfDU+mfjyY=
github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
@@ -414,7 +408,6 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
@@ -437,8 +430,6 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
@@ -464,8 +455,6 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
@@ -566,17 +555,14 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@@ -635,7 +621,6 @@ github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N
github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU=
github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0=
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@@ -670,16 +655,10 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY=
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
github.com/nats-io/nats-server/v2 v2.3.2/go.mod h1:dUf7Cm5z5LbciFVwWx54owyCKm8x4/hL6p7rrljhLFY=
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
@@ -714,14 +693,11 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU=
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
@@ -848,14 +824,9 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0
github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4=
github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/schollz/peerdiscovery v1.6.1 h1:2V/Dw+GZY18W6e3yAeUzJouHmIcr9UlWtqsQtpfObGw=
github.com/schollz/peerdiscovery v1.6.1/go.mod h1:bq5/NB9o9/jyEwiW4ubehfToBa2LwdQQMoNiy/vSdYg=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sethvargo/go-envconfig v0.3.0/go.mod h1:XZ2JRR7vhlBEO5zMmOpLgUhgYltqYqq4d4tKagtPUv0=
github.com/sethvargo/go-envconfig v0.3.2 h1:277Lb2iTpUZjUZu1qLoLa/aetwvtZbKh8wNWXmc6dSk=
github.com/sethvargo/go-envconfig v0.3.2/go.mod h1:XZ2JRR7vhlBEO5zMmOpLgUhgYltqYqq4d4tKagtPUv0=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
@@ -868,7 +839,6 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
@@ -916,11 +886,6 @@ github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/suborbital/grav v0.4.1 h1:g70gG4EVqNcy5LMII05ayLtxMD8v5M9kBW1BcJFYsC0=
github.com/suborbital/grav v0.4.1/go.mod h1:jN+zB9O6ztW2GqruEU46EMOFjvc8K2UDLyofFJWdI8o=
github.com/suborbital/vektor v0.2.2/go.mod h1:6YQE7r6t1JcVs3twpqjXDftsLUaTNUk5YorRKHcDamI=
github.com/suborbital/vektor v0.5.0 h1:E5PPiBYboarFoprUmjjG/ieVCeIUpD/1F2MnVa/iaDs=
github.com/suborbital/vektor v0.5.0/go.mod h1:iNMR6/alEK1D7fbupwIlGlK5LajngEvq/N+RGVWaqNw=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc=
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
@@ -1022,16 +987,10 @@ golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8=
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1099,11 +1058,6 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3ob
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1127,7 +1081,6 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1174,14 +1127,6 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1189,19 +1134,12 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1292,7 +1230,6 @@ google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBr
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 h1:wDju+RU97qa0FZT0QnZDg9Uc2dH0Ql513kFvHocz+WM=
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@@ -1361,7 +1298,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
helm.sh/helm/v3 v3.2.4/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0=
@@ -1396,6 +1332,7 @@ k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftc
k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
k8s.io/apiserver v0.0.0-20191122221311-9d521947b1e1/go.mod h1:RbsZY5zzBIWnz4KbctZsTVjwIuOpTp4Z8oCgFHN4kZQ=
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
k8s.io/apiserver v0.18.2 h1:fwKxdTWwwYhxvtjo0UUfX+/fsitsNtfErPNegH2x9ic=
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA=
k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ=
@@ -1408,6 +1345,7 @@ k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRV
k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA=
k8s.io/component-base v0.0.0-20191122220729-2684fb322cb9/go.mod h1:NFuUusy/X4Tk21m21tcNUihnmp4OI7lXU7/xA+rYXkc=
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y=
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@@ -1432,6 +1370,7 @@ k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+
k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU=
k8s.io/kubectl v0.18.2 h1:9jnGSOC2DDVZmMUTMi0D1aed438mfQcgqa5TAzVjA1k=
k8s.io/kubectl v0.18.2/go.mod h1:OdgFa3AlsPKRpFFYE7ICTwulXOcMGXHTc+UKhHKvrb4=
k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4=
k8s.io/metrics v0.18.2/go.mod h1:qga8E7QfYNR9Q89cSCAjinC9pTZ7yv1XSVGUB0vJypg=

View File

@@ -13,10 +13,6 @@ import (
"github.com/1Password/onepassword-operator/operator/pkg/controller"
op "github.com/1Password/onepassword-operator/operator/pkg/onepassword"
"github.com/1Password/onepassword-operator/operator/pkg/onepassword/message"
"github.com/suborbital/grav/discovery/local"
"github.com/suborbital/grav/grav"
"github.com/suborbital/grav/transport/websocket"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
@@ -44,8 +40,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
)
const connectBusPortVariable = "OP_BUS_PORT"
const envHostVariable = "OP_CONNECT_HOST"
const envPollingIntervalVariable = "POLLING_INTERVAL"
const manageConnect = "MANAGE_CONNECT"
const restartDeploymentsEnvVariable = "AUTO_RESTART"
@@ -173,28 +167,21 @@ func main() {
// Add the Metrics Service
addMetrics(ctx, cfg)
_, connectSet := os.LookupEnv(envHostVariable)
done := make(chan bool)
updateSecretsHandler := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
// Setup update secrets task
if connectSet {
consumeConnectEvents(*updateSecretsHandler)
} else {
// If not using connect then we will use polling to get secret updates
// TODO implement 1Password events-api
ticker := time.NewTicker(getPollingIntervalForUpdatingSecrets())
go func() {
for {
select {
case <-done:
ticker.Stop()
return
case <-ticker.C:
updateSecretsHandler.UpdateKubernetesSecretsTask("", "")
}
updatedSecretsPoller := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
done := make(chan bool)
ticker := time.NewTicker(getPollingIntervalForUpdatingSecrets())
go func() {
for {
select {
case <-done:
ticker.Stop()
return
case <-ticker.C:
updatedSecretsPoller.UpdateKubernetesSecretsTask()
}
}()
}
}
}()
// Start the Cmd
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
@@ -313,40 +300,3 @@ func shouldAutoRestartDeployments() bool {
}
return false
}
func consumeConnectEvents(updateSecretsHandler op.SecretUpdateHandler) {
gwss := websocket.New()
locald := local.New()
port, found := os.LookupEnv(connectBusPortVariable)
if !found {
port = "42829"
}
g := grav.New(
grav.UseEndpoint(port, fmt.Sprintf("%s/meta/message", envHostVariable)),
grav.UseTransport(gwss),
grav.UseDiscovery(locald),
)
pod := g.Connect()
pod.OnType(message.TypeItemUpdate, ItemUpdate(updateSecretsHandler))
}
// ItemUpdateEvent Grav message handler for activity.event messages. Starts update
// process for updating Kubernetes Secrets and OnePasswordItems and triggers
// auto restarts for deployments
func ItemUpdate(updateSecretsHandler op.SecretUpdateHandler) grav.MsgFunc {
return func(msg grav.Message) error {
e := message.ItemUpdateEvent{}
if err := msg.UnmarshalData(&e); err != nil {
log.Error(err, "failed to UnmarshalData into Event")
return nil
}
log.Info(fmt.Sprintf("Detected update for item %s at vault %s", e.ItemUUID, e.VaultUUID))
updateSecretsHandler.UpdateKubernetesSecretsTask("", "")
return nil
}
}

View File

@@ -13,6 +13,14 @@ var logger = logf.Log.WithName("retrieve_item")
const secretReferencePrefix = "op://"
type InvalidOPFormatError struct {
Reference string
}
func (e *InvalidOPFormatError) Error() string {
return fmt.Sprintf("Invalid secret reference : %s. Secret references should start with op://", e.Reference)
}
func GetOnePasswordItemByPath(opConnectClient connect.Client, path string) (*onepassword.Item, error) {
vaultValue, itemValue, err := ParseVaultAndItemFromPath(path)
if err != nil {
@@ -37,7 +45,7 @@ func GetOnePasswordItemByPath(opConnectClient connect.Client, path string) (*one
func ParseReference(reference string) (string, string, error) {
if !strings.HasPrefix(reference, secretReferencePrefix) {
return "", "", fmt.Errorf("secret reference should start with `op://`")
return "", "", &InvalidOPFormatError{Reference: reference}
}
path := strings.TrimPrefix(reference, secretReferencePrefix)

View File

@@ -1,27 +0,0 @@
package message
import "encoding/json"
// TypeItemUpdate and others are sync message types
const (
TypeItemUpdate = "item.update"
)
// ItemUpdateEvent is the data for a sync status message
type ItemUpdateEvent struct {
VaultUUID string `json:"vault_uuid"`
ItemUUID string `json:"item_uuid"`
ItemVersion string `json:"item_version"`
}
// Type returns a the syns status data type
func (s *ItemUpdateEvent) Type() string {
return TypeItemUpdate
}
// Bytes returns Bytes
func (s *ItemUpdateEvent) Bytes() []byte {
bytes, _ := json.Marshal(s)
return bytes
}

View File

@@ -2,12 +2,13 @@ package onepassword
import (
"context"
"errors"
"fmt"
"github.com/1Password/connect-sdk-go/connect"
onepasswordv1 "github.com/1Password/onepassword-operator/operator/pkg/apis/onepassword/v1"
"github.com/1Password/onepassword-operator/operator/pkg/utils"
"k8s.io/apimachinery/pkg/api/errors"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -41,6 +42,10 @@ func CreateOnePasswordCRSecretsFromContainer(opClient connect.Client, kubeClient
// if value is not of format op://<vault>/<item>/<field> then ignore
vault, item, err := ParseReference(env.Value)
if err != nil {
var ev *InvalidOPFormatError
if !errors.As(err, &ev) {
return err
}
continue
}
// create a one password item custom resource to track updates for injected secrets
@@ -64,7 +69,7 @@ func CreateOnePasswordCRSecretFromReference(opClient connect.Client, kubeClient
currentOnepassworditem := &onepasswordv1.OnePasswordItem{}
err = kubeClient.Get(context.Background(), types.NamespacedName{Name: onepassworditem.Name, Namespace: onepassworditem.Namespace}, currentOnepassworditem)
if err != nil && errors.IsNotFound(err) {
if k8sErrors.IsNotFound(err) {
log.Info(fmt.Sprintf("Creating OnePasswordItem CR %v at namespace '%v'", onepassworditem.Name, onepassworditem.Namespace))
return kubeClient.Create(context.Background(), onepassworditem)
} else if err != nil {

View File

@@ -0,0 +1,234 @@
package onepassword
import (
"context"
"fmt"
"strings"
"testing"
"github.com/1Password/onepassword-operator/operator/pkg/mocks"
"github.com/1Password/connect-sdk-go/onepassword"
onepasswordv1 "github.com/1Password/onepassword-operator/operator/pkg/apis/onepassword/v1"
"github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
errors2 "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubectl/pkg/scheme"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
type onepassworditemInjections struct {
testName string
existingDeployment *appsv1.Deployment
existingNamespace *corev1.Namespace
expectedError error
expectedEvents []string
opItem map[string]string
expectedOPItem *onepasswordv1.OnePasswordItem
}
var onepassworditemTests = []onepassworditemInjections{
{
testName: "Try to Create OnePasswordItem with container with valid op reference",
existingNamespace: defaultNamespace,
existingDeployment: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: deploymentKind,
APIVersion: deploymentAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
ContainerInjectAnnotation: "test-app",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "test-app",
Env: []corev1.EnvVar{
{
Name: name,
Value: fmt.Sprintf("op://%s/%s/test", vaultId, itemId),
},
},
},
},
},
},
},
},
expectedError: nil,
opItem: map[string]string{
userKey: username,
passKey: password,
},
expectedOPItem: &onepasswordv1.OnePasswordItem{
TypeMeta: metav1.TypeMeta{
Kind: "OnePasswordItem",
APIVersion: "onepassword.com/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: injectedOnePasswordItemName,
Namespace: namespace,
Annotations: map[string]string{
InjectedAnnotation: "true",
VersionAnnotation: "old",
},
},
Spec: onepasswordv1.OnePasswordItemSpec{
ItemPath: itemPath,
},
},
},
{
testName: "Container with no op:// reference does not create OnePasswordItem",
existingNamespace: defaultNamespace,
existingDeployment: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: deploymentKind,
APIVersion: deploymentAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
ContainerInjectAnnotation: "test-app",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "test-app",
Env: []corev1.EnvVar{
{
Name: name,
Value: fmt.Sprintf("some value"),
},
},
},
},
},
},
},
},
expectedError: nil,
opItem: map[string]string{
userKey: username,
passKey: password,
},
expectedOPItem: nil,
},
{
testName: "Container with op:// reference missing vault and item does not create OnePasswordItem and returns error",
existingNamespace: defaultNamespace,
existingDeployment: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: deploymentKind,
APIVersion: deploymentAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
ContainerInjectAnnotation: "test-app",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "test-app",
Env: []corev1.EnvVar{
{
Name: name,
Value: fmt.Sprintf("op://"),
},
},
},
},
},
},
},
},
expectedError: fmt.Errorf("Invalid secret reference : %s. Secret references should match op://<vault>/<item>/<field>", "op://"),
opItem: map[string]string{
userKey: username,
passKey: password,
},
expectedOPItem: nil,
},
}
func TestOnePasswordItemSecretInjected(t *testing.T) {
for _, testData := range onepassworditemTests {
t.Run(testData.testName, func(t *testing.T) {
// Register operator types with the runtime scheme.
s := scheme.Scheme
s.AddKnownTypes(appsv1.SchemeGroupVersion, &onepasswordv1.OnePasswordItem{}, &onepasswordv1.OnePasswordItemList{}, &appsv1.Deployment{})
// Objects to track in the fake client.
objs := []runtime.Object{
testData.existingDeployment,
testData.existingNamespace,
}
// Create a fake client to mock API calls.
cl := fake.NewFakeClientWithScheme(s, objs...)
opConnectClient := &mocks.TestClient{}
mocks.GetGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
item := onepassword.Item{}
item.Fields = generateFields(testData.opItem["username"], testData.opItem["password"])
item.Version = itemVersion
item.Vault.ID = vaultUUID
item.ID = uuid
return &item, nil
}
injectedContainers := testData.existingDeployment.Spec.Template.ObjectMeta.Annotations[ContainerInjectAnnotation]
parsedInjectedContainers := strings.Split(injectedContainers, ",")
err := CreateOnePasswordItemResourceFromDeployment(opConnectClient, cl, testData.existingDeployment, parsedInjectedContainers)
assert.Equal(t, testData.expectedError, err)
// Check if Secret has been created and has the correct data
opItemCR := &onepasswordv1.OnePasswordItem{}
err = cl.Get(context.TODO(), types.NamespacedName{Name: injectedOnePasswordItemName, Namespace: namespace}, opItemCR)
if testData.expectedOPItem == nil {
assert.Error(t, err)
assert.True(t, errors2.IsNotFound(err))
} else {
assert.Equal(t, testData.expectedOPItem.Spec.ItemPath, opItemCR.Spec.ItemPath)
assert.Equal(t, testData.expectedOPItem.Name, opItemCR.Name)
assert.Equal(t, testData.expectedOPItem.Annotations[InjectedAnnotation], opItemCR.Annotations[InjectedAnnotation])
}
})
}
}

View File

@@ -36,13 +36,13 @@ type SecretUpdateHandler struct {
shouldAutoRestartDeploymentsGlobal bool
}
func (h *SecretUpdateHandler) UpdateKubernetesSecretsTask(vaultId, itemId string) error {
updatedKubernetesSecrets, err := h.updateKubernetesSecrets(vaultId, itemId)
func (h *SecretUpdateHandler) UpdateKubernetesSecretsTask() error {
updatedKubernetesSecrets, err := h.updateKubernetesSecrets()
if err != nil {
return err
}
updatedInjectedSecrets, err := h.updateInjectedSecrets(vaultId, itemId)
updatedInjectedSecrets, err := h.updateInjectedSecrets()
if err != nil {
return err
}
@@ -113,7 +113,7 @@ func (h *SecretUpdateHandler) restartDeployment(deployment *appsv1.Deployment) {
}
}
func (h *SecretUpdateHandler) updateKubernetesSecrets(vaultId, itemId string) (map[string]map[string]*corev1.Secret, error) {
func (h *SecretUpdateHandler) updateKubernetesSecrets() (map[string]map[string]*corev1.Secret, error) {
secrets := &corev1.SecretList{}
err := h.client.List(context.Background(), secrets)
if err != nil {
@@ -126,16 +126,11 @@ func (h *SecretUpdateHandler) updateKubernetesSecrets(vaultId, itemId string) (m
secret := secrets.Items[i]
itemPath := secret.Annotations[ItemPathAnnotation]
currentVersion := secret.Annotations[VersionAnnotation]
if len(itemPath) == 0 || len(currentVersion) == 0 {
continue
}
if vaultId != "" && itemId != "" && itemPath != fmt.Sprintf("vaults/%s/items%s", vaultId, itemId) {
continue
}
item, err := GetOnePasswordItemByPath(h.opConnectClient, secret.Annotations[ItemPathAnnotation])
if err != nil {
return nil, fmt.Errorf("Failed to retrieve item: %v", err)
@@ -162,19 +157,19 @@ func (h *SecretUpdateHandler) updateKubernetesSecrets(vaultId, itemId string) (m
return updatedSecrets, nil
}
func (h *SecretUpdateHandler) updateInjectedSecrets(vaultId, itemId string) (map[string]map[string]*onepasswordv1.OnePasswordItem, error) {
func (h *SecretUpdateHandler) updateInjectedSecrets() (map[string]map[string]*onepasswordv1.OnePasswordItem, error) {
// fetch all onepassworditems
onepasswordItems := &onepasswordv1.OnePasswordItemList{}
err := h.client.List(context.Background(), onepasswordItems)
if err != nil {
log.Error(err, "Failed to list OneOasswordItems")
log.Error(err, "Failed to list OnePasswordItems")
return nil, err
}
updatedItems := map[string]map[string]*onepasswordv1.OnePasswordItem{}
for _, item := range onepasswordItems.Items {
// if onepassworditem was generated by injecting a secret into a deployment then ignore
// if onepassworditem was not generated by injecting a secret into a deployment then ignore
_, injected := item.Annotations[InjectedAnnotation]
if !injected {
continue
@@ -185,9 +180,6 @@ func (h *SecretUpdateHandler) updateInjectedSecrets(vaultId, itemId string) (map
if len(itemPath) == 0 || len(currentVersion) == 0 {
continue
}
if vaultId != "" && itemId != "" && itemPath != fmt.Sprintf("vaults/%s/items%s", vaultId, itemId) {
continue
}
storedItem, err := GetOnePasswordItemByPath(h.opConnectClient, itemPath)
if err != nil {

View File

@@ -918,7 +918,7 @@ func TestUpdateSecretHandler(t *testing.T) {
shouldAutoRestartDeploymentsGlobal: testData.globalAutoRestartEnabled,
}
err := h.UpdateKubernetesSecretsTask("", "")
err := h.UpdateKubernetesSecretsTask()
assert.Equal(t, testData.expectedError, err)

View File

@@ -0,0 +1,30 @@
# Build the manager binary
FROM golang:1.17 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# Copy the go source
COPY secret-injector/cmd/main.go secret-injector/main.go
COPY secret-injector/pkg/ secret-injector/pkg/
COPY vendor/ vendor/
# Build
ARG secret_injector_version=dev
RUN CGO_ENABLED=0 \
GO111MODULE=on \
go build \
-ldflags "-X \"github.com/1Password/onepassword-operator/secret-injector/version.Version=$secret_injector_version\"" \
-mod vendor \
-a -o injector secret-injector/main.go
# Use distroless as minimal base image to package the secret-injector binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/injector .
USER nonroot:nonroot
ENTRYPOINT ["/injector"]

123
secret-injector/README.md Normal file
View File

@@ -0,0 +1,123 @@
# 1Password Secrets Injector for Kubernetes
The 1Password Secrets Injector implements a mutating webhook to inject 1Password secrets as environment variables into a pod or deployment. Unlike the 1Password Kubernetes Operator, the Secrets Injector does not create a Kubernetes Secret when assigning secrets to your resource.
## Use with the 1Password Kubernetes Operator
The 1Password Secrets Injector for Kubernetes can be used in conjuction with the 1Password Kubernetes Operator in order to provide automatic deployment restarts when a 1Password item being used by your deployment has been updated.
[Click here for more details on the 1Password Kubernetes Operator](../operator/README.md)
## Setup and Deployment
The 1Password Secrets Injector for Kubernetes uses a webhook server in order to inject secrets into pods and deployments. Admission to the webhook server must be a secure operation, thus communication with the webhook server requires a TLS certificate signed by a Kubernetes CA.
For managing TLS certifcates for your cluster please see the [official documentation](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/). The certificate and key generated in the offical documentation must be set in the [deployment](deploy/deployment.yaml) arguments (`tlsCertFile` and `tlsKeyFile` respectively) for the Secret injector.
In additon to setting the tlsCert and tlsKey for the Secret Injector service, we must also create a webhook configuration for the service. An example of the confiugration can be found [here](deploy/mutatingwebhook.yaml). In the provided example you may notice that the caBundle is not set. Please replace this value with your caBundle. This can be generated with the Kubernetes apiserver's default caBundle with the following command
```export CA_BUNDLE=$(kubectl get configmap -n kube-system extension-apiserver-authentication -o=jsonpath='{.data.client-ca-file}' | base64 | tr -d '\n')```
```
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: op-secret-injector-webhook-config
labels:
app: op-secret-injector
webhooks:
- name: op-secret-injector.1password
failurePolicy: Fail
clientConfig:
service:
name: op-secret-injector-webhook-service
namespace: op-secret-injector
path: "/inject"
caBundle: ${CA_BUNDLE} //replace this with your own CA Bundle
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: [""]
apiVersions: ["v1"]
resources: ["pods"]
namespaceSelector:
matchLabels:
op-secret-injection: enabled
```
You can automate this step using the script by [morvencao](https://github.com/morvencao/kube-mutating-webhook-tutorial).
```
cat deploy/mutatingwebhook.yaml | \
deploy/webhook-patch-ca-bundle.sh > \
deploy/mutatingwebhook-ca-bundle.yaml
```
Lastly, we must apply the deployment, service, and mutating webhook configuration to kubernetes:
```
kubectl create -f deploy/deployment.yaml
kubectl create -f deploy/service.yaml
kubectl create -f deploy/mutatingwebhook-ca-bundle.yaml
```
## Usage
For every namespace you want the 1Password Secret Injector to inject secrets for, you must add the label `op-secret-injection=enabled` label to the namespace:
```
kubectl label namespace <namespace> op-secret-injection=enabled
```
To inject a 1Password secret as an environment variable, your pod or deployment you must add an environment variable to the resource with a value referencing your 1Password item in the format `op://<vault>/<item>[/section]/<field>`. You must also annotate your pod/deployment spec with `operator.1password.io/inject` which expects a comma separated list of the names of the containers to that will be mutated and have secrets injected.
Note: You must also include the command needed to run the container as the secret injector prepends a script to this command in order to allow for secret injection.
```
#example
apiVersion: apps/v1
kind: Deployment
metadata:
name: app-example
spec:
selector:
matchLabels:
app: app-example
template:
metadata:
annotations:
operator.1password.io/inject: "app-example,another-example"
labels:
app: app-example
spec:
containers:
- name: app-example
image: my-image
command: ["./example"]
env:
- name: DB_USERNAME
value: op://my-vault/my-item/sql/username
- name: DB_PASSWORD
value: op://my-vault/my-item/sql/password
- name: another-example
image: my-image
env:
- name: DB_USERNAME
value: op://my-vault/my-item/sql/username
- name: DB_PASSWORD
value: op://my-vault/my-item/sql/password
- name: my-app //because my-app is not listed in the inject annotation above this container will not be injected with secrets
image: my-image
env:
- name: DB_USERNAME
value: op://my-vault/my-item/sql/username
- name: DB_PASSWORD
value: op://my-vault/my-item/sql/password
```
## Troubleshooting
If you are trouble getting secrets injected in your pod, check the following:
1. Check that that the namespace of your pod has the `op-secret-injection=enabled` label
2. Check that the `caBundle` in `mutatingwebhookconfiguration.yaml` is set with a correct value
3. Ensure that the 1Password Secret Injector webhook is running (`op-secret-injector` by default).
4. Check that your container has a `command` field specifying the command to run the app in your container

View File

@@ -0,0 +1,87 @@
package main
import (
"context"
"crypto/tls"
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/1Password/onepassword-operator/secret-injector/pkg/webhook"
"github.com/golang/glog"
)
const (
connectTokenSecretKeyEnv = "OP_CONNECT_TOKEN_KEY"
connectTokenSecretNameEnv = "OP_CONNECT_TOKEN_NAME"
connectHostEnv = "OP_CONNECT_HOST"
)
func main() {
var parameters webhook.SecretInjectorParameters
glog.Info("Starting webhook")
// get command line parameters
flag.IntVar(&parameters.Port, "port", 8443, "Webhook server port.")
flag.StringVar(&parameters.CertFile, "tlsCertFile", "/etc/webhook/certs/cert.pem", "File containing the x509 Certificate for HTTPS.")
flag.StringVar(&parameters.KeyFile, "tlsKeyFile", "/etc/webhook/certs/key.pem", "File containing the x509 private key to --tlsCertFile.")
flag.Parse()
pair, err := tls.LoadX509KeyPair(parameters.CertFile, parameters.KeyFile)
if err != nil {
glog.Errorf("Failed to load key pair: %v", err)
os.Exit(1)
}
connectHost, present := os.LookupEnv(connectHostEnv)
if !present || connectHost == "" {
glog.Error("Connect host not set")
}
connectTokenName, present := os.LookupEnv(connectTokenSecretNameEnv)
if !present || connectTokenName == "" {
glog.Error("Connect token name not set")
}
connectTokenKey, present := os.LookupEnv(connectTokenSecretKeyEnv)
if !present || connectTokenKey == "" {
glog.Error("Connect token key not set")
}
webhookConfig := webhook.Config{
ConnectHost: connectHost,
ConnectTokenName: connectTokenName,
ConnectTokenKey: connectTokenKey,
}
secretInjector := &webhook.SecretInjector{
Config: webhookConfig,
Server: &http.Server{
Addr: fmt.Sprintf(":%v", parameters.Port),
TLSConfig: &tls.Config{Certificates: []tls.Certificate{pair}},
},
}
// define http server and server handler
mux := http.NewServeMux()
mux.HandleFunc("/inject", secretInjector.Serve)
secretInjector.Server.Handler = mux
// start webhook server in new rountine
go func() {
if err := secretInjector.Server.ListenAndServeTLS("", ""); err != nil {
glog.Errorf("Failed to listen and serve webhook server: %v", err)
os.Exit(1)
}
}()
// listening OS shutdown singal
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
<-signalChan
glog.Infof("Got OS shutdown signal, shutting down webhook server gracefully...")
secretInjector.Server.Shutdown(context.Background())
}

View File

@@ -0,0 +1,42 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: op-secret-injector-webhook-deployment
namespace: op-secret-injector
labels:
app: op-secret-injector
spec:
replicas: 1
selector:
matchLabels:
app: op-secret-injector
template:
metadata:
labels:
app: op-secret-injector
spec:
containers:
- name: op-secret-injector
image: local/onepassword-secrets-injector:v1.1.0
imagePullPolicy: Never
args:
- -tlsCertFile=/etc/webhook/certs/cert.pem
- -tlsKeyFile=/etc/webhook/certs/key.pem
- -alsologtostderr
- -v=4
- 2>&1
env:
- name: OP_CONNECT_HOST
value: http://onepassword-connect:8080/
- name: OP_CONNECT_TOKEN_NAME
value: onepassword-token
- name: OP_CONNECT_TOKEN_KEY
value: token
volumeMounts:
- name: webhook-certs
mountPath: /etc/webhook/certs
readOnly: true
volumes:
- name: webhook-certs
secret:
secretName: op-secret-injector-webhook-certs

View File

@@ -0,0 +1,23 @@
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: op-secret-injector-webhook-config
labels:
app: op-secret-injector
webhooks:
- name: op-secret-injector.1password
failurePolicy: Fail
clientConfig:
service:
name: op-secret-injector-webhook-service
namespace: op-secret-injector
path: "/inject"
caBundle: ${CA_BUNDLE}
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: [""]
apiVersions: ["v1"]
resources: ["pods"]
namespaceSelector:
matchLabels:
op-secret-injection: enabled

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: op-secret-injector-webhook-service
namespace: op-secret-injector
labels:
app: op-secret-injector
spec:
ports:
- port: 443
targetPort: 8443
selector:
app: op-secret-injector

View File

@@ -0,0 +1,494 @@
package webhook
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/golang/glog"
"k8s.io/api/admission/v1beta1"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
)
const (
// binVolumeName is the name of the volume where the OP CLI binary is stored.
binVolumeName = "op-bin"
// binVolumeMountPath is the mount path where the OP CLI binary can be found.
binVolumeMountPath = "/op/bin/"
connectTokenEnv = "OP_CONNECT_TOKEN"
connectHostEnv = "OP_CONNECT_HOST"
)
// binVolume is the shared, in-memory volume where the OP CLI binary lives.
var binVolume = corev1.Volume{
Name: binVolumeName,
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
},
},
}
// binVolumeMount is the shared volume mount where the OP CLI binary lives.
var binVolumeMount = corev1.VolumeMount{
Name: binVolumeName,
MountPath: binVolumeMountPath,
ReadOnly: true,
}
var (
runtimeScheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(runtimeScheme)
deserializer = codecs.UniversalDeserializer()
// (https://github.com/kubernetes/kubernetes/issues/57982)
defaulter = runtime.ObjectDefaulter(runtimeScheme)
)
const (
injectionStatus = "operator.1password.io/status"
injectAnnotation = "operator.1password.io/inject"
versionAnnotation = "operator.1password.io/version"
)
type SecretInjector struct {
Config Config
Server *http.Server
}
// the command line parameters for configuraing the webhook
type SecretInjectorParameters struct {
Port int // webhook server port
CertFile string // path to the x509 certificate for https
KeyFile string // path to the x509 private key matching `CertFile`
}
type Config struct {
ConnectHost string // the host in which a connect server is running
ConnectTokenName string // the token name of the secret that stores the connect token
ConnectTokenKey string // the name of the data field in the secret the stores the connect token
}
type patchOperation struct {
Op string `json:"op"`
Path string `json:"path"`
Value interface{} `json:"value,omitempty"`
}
func init() {
_ = corev1.AddToScheme(runtimeScheme)
_ = admissionregistrationv1beta1.AddToScheme(runtimeScheme)
_ = v1.AddToScheme(runtimeScheme)
}
func applyDefaultsWorkaround(containers []corev1.Container, volumes []corev1.Volume) {
defaulter.Default(&corev1.Pod{
Spec: corev1.PodSpec{
Containers: containers,
Volumes: volumes,
},
})
}
// Check if the pod should have secrets injected
func mutationRequired(metadata *metav1.ObjectMeta) bool {
annotations := metadata.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
status := annotations[injectionStatus]
_, enabled := annotations[injectAnnotation]
// if pod has not already been injected and injection has been enabled mark the pod for injection
required := false
if strings.ToLower(status) != "injected" && enabled {
required = true
}
glog.Infof("Pod %v at namepspace %v. Secret injection status: %v Secret Injection Enabled:%v", metadata.Name, metadata.Namespace, status, required)
return required
}
func addContainers(target, added []corev1.Container, basePath string) (patch []patchOperation) {
first := len(target) == 0
var value interface{}
for _, add := range added {
value = add
path := basePath
if first {
first = false
value = []corev1.Container{add}
} else {
path = path + "/-"
}
patch = append(patch, patchOperation{
Op: "add",
Path: path,
Value: value,
})
}
return patch
}
func addVolume(target, added []corev1.Volume, basePath string) (patch []patchOperation) {
first := len(target) == 0
var value interface{}
for _, add := range added {
value = add
path := basePath
if first {
first = false
value = []corev1.Volume{add}
} else {
path = path + "/-"
}
patch = append(patch, patchOperation{
Op: "add",
Path: path,
Value: value,
})
}
return patch
}
func updateAnnotation(target map[string]string, added map[string]string) (patch []patchOperation) {
for key, value := range added {
if target == nil || target[key] == "" {
target = map[string]string{}
patch = append(patch, patchOperation{
Op: "add",
Path: "/metadata/annotations",
Value: map[string]string{
key: value,
},
})
} else {
patch = append(patch, patchOperation{
Op: "replace",
Path: "/metadata/annotations/" + key,
Value: value,
})
}
}
return patch
}
// mutation process for injecting secrets into pods
func (s *SecretInjector) mutate(ar *v1beta1.AdmissionReview) *v1beta1.AdmissionResponse {
ctx := context.Background()
req := ar.Request
var pod corev1.Pod
if err := json.Unmarshal(req.Object.Raw, &pod); err != nil {
glog.Errorf("Could not unmarshal raw object: %v", err)
return &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
glog.Infof("Checking if secret injection is needed for %v %s at namespace %v",
req.Kind, pod.Name, req.Namespace)
// determine whether to inject secrets
if !mutationRequired(&pod.ObjectMeta) {
glog.Infof("Secret injection not required for %s at namespace %s", pod.Name, pod.Namespace)
return &v1beta1.AdmissionResponse{
Allowed: true,
}
}
containersStr := pod.Annotations[injectAnnotation]
containers := map[string]struct{}{}
if containersStr == "" {
glog.Infof("No containers set for secret injection for %s/%s", pod.Namespace, pod.Name)
return &v1beta1.AdmissionResponse{
Allowed: true,
}
}
for _, container := range strings.Split(containersStr, ",") {
containers[container] = struct{}{}
}
version, ok := pod.Annotations[versionAnnotation]
if !ok {
version = "2.0.0-beta.4"
}
mutated := false
var patch []patchOperation
for i, c := range pod.Spec.InitContainers {
_, mutate := containers[c.Name]
if !mutate {
continue
}
c, didMutate, initContainerPatch, err := s.mutateContainer(ctx, &c, i)
if err != nil {
return &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
if didMutate {
mutated = true
pod.Spec.InitContainers[i] = *c
}
patch = append(patch, initContainerPatch...)
}
for i, c := range pod.Spec.Containers {
_, mutate := containers[c.Name]
if !mutate {
continue
}
c, didMutate, containerPatch, err := s.mutateContainer(ctx, &c, i)
if err != nil {
glog.Error("Error occured mutating container for secret injection: ", err)
return &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
patch = append(patch, containerPatch...)
if didMutate {
mutated = true
pod.Spec.Containers[i] = *c
}
}
if !mutated {
glog.Infof("No containers set for secret injection for %s/%s", pod.Namespace, pod.Name)
return &v1beta1.AdmissionResponse{
Allowed: true,
}
}
// binInitContainer is the container that pulls the OP CLI
// into a shared volume mount.
var binInitContainer = corev1.Container{
Name: "copy-op-bin",
Image: "1password/op" + ":" + version,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"sh", "-c",
fmt.Sprintf("cp /usr/local/bin/op %s", binVolumeMountPath)},
VolumeMounts: []corev1.VolumeMount{
{
Name: binVolumeName,
MountPath: binVolumeMountPath,
},
},
}
patchBytes, err := createOPCLIPatch(&pod, []corev1.Container{binInitContainer}, patch)
if err != nil {
return &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
glog.Infof("AdmissionResponse: patch=%v\n", string(patchBytes))
return &v1beta1.AdmissionResponse{
Allowed: true,
Patch: patchBytes,
PatchType: func() *v1beta1.PatchType {
pt := v1beta1.PatchTypeJSONPatch
return &pt
}(),
}
}
// create mutation patch for resoures
func createOPCLIPatch(pod *corev1.Pod, containers []corev1.Container, patch []patchOperation) ([]byte, error) {
annotations := map[string]string{injectionStatus: "injected"}
patch = append(patch, addVolume(pod.Spec.Volumes, []corev1.Volume{binVolume}, "/spec/volumes")...)
patch = append(patch, addContainers(pod.Spec.InitContainers, containers, "/spec/initContainers")...)
patch = append(patch, updateAnnotation(pod.Annotations, annotations)...)
return json.Marshal(patch)
}
func createOPConnectPatch(container *corev1.Container, containerIndex int, host, tokenSecretName, tokenSecretKey string) []patchOperation {
var patch []patchOperation
envs := []corev1.EnvVar{}
// if connect configuration is already set in the container do not overwrite it
hostConfig, tokenConfig := isConnectConfigurationSet(container)
if !hostConfig {
connectHostEnvVar := corev1.EnvVar{
Name: "OP_CONNECT_HOST",
Value: host,
}
envs = append(envs, connectHostEnvVar)
}
if !tokenConfig {
connectTokenEnvVar := corev1.EnvVar{
Name: "OP_CONNECT_TOKEN",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
Key: tokenSecretKey,
LocalObjectReference: corev1.LocalObjectReference{
Name: tokenSecretName,
},
},
},
}
envs = append(envs, connectTokenEnvVar)
}
patch = append(patch, setEnvironment(*container, containerIndex, envs, "/spec/containers")...)
return patch
}
func isConnectConfigurationSet(container *corev1.Container) (bool, bool) {
hostConfig := false
tokenConfig := false
for _, env := range container.Env {
if env.Name == connectHostEnv {
hostConfig = true
}
if env.Name == connectTokenEnv {
tokenConfig = true
}
if tokenConfig && hostConfig {
break
}
}
return hostConfig, tokenConfig
}
// mutates the container to allow for secrets to be injected into the container via the op cli
func (s *SecretInjector) mutateContainer(_ context.Context, container *corev1.Container, containerIndex int) (*corev1.Container, bool, []patchOperation, error) {
// prepending op run command to the container command so that secrets are injected before the main process is started
if len(container.Command) == 0 {
return container, false, nil, fmt.Errorf("not attaching OP to the container %s: the podspec does not define a command", container.Name)
}
// Prepend the command with op run --
container.Command = append([]string{binVolumeMountPath + "op", "run", "--"}, container.Command...)
var patch []patchOperation
// adding the cli to the container using a volume mount
path := fmt.Sprintf("%s/%d/volumeMounts", "/spec/containers", containerIndex)
patch = append(patch, patchOperation{
Op: "add",
Path: path,
Value: []corev1.VolumeMount{binVolumeMount},
})
// replacing the container command with a command prepended with op run
path = fmt.Sprintf("%s/%d/command", "/spec/containers", containerIndex)
patch = append(patch, patchOperation{
Op: "replace",
Path: path,
Value: container.Command,
})
//creating patch for adding connect environment variables to container. If they are already set in the container then this will be skipped
patch = append(patch, createOPConnectPatch(container, containerIndex, s.Config.ConnectHost, s.Config.ConnectTokenName, s.Config.ConnectTokenKey)...)
return container, true, patch, nil
}
func setEnvironment(container corev1.Container, containerIndex int, addedEnv []corev1.EnvVar, basePath string) (patch []patchOperation) {
first := len(container.Env) == 0
var value interface{}
for _, add := range addedEnv {
path := fmt.Sprintf("%s/%d/env", basePath, containerIndex)
value = add
if first {
first = false
value = []corev1.EnvVar{add}
} else {
path = path + "/-"
}
patch = append(patch, patchOperation{
Op: "add",
Path: path,
Value: value,
})
}
return patch
}
// Serve method for secrets injector webhook
func (s *SecretInjector) Serve(w http.ResponseWriter, r *http.Request) {
var body []byte
if r.Body != nil {
if data, err := ioutil.ReadAll(r.Body); err == nil {
body = data
}
}
if len(body) == 0 {
glog.Error("empty body")
http.Error(w, "empty body", http.StatusBadRequest)
return
}
// verify the content type is accurate
contentType := r.Header.Get("Content-Type")
if contentType != "application/json" {
glog.Errorf("Content-Type=%s, expect application/json", contentType)
http.Error(w, "invalid Content-Type, expect `application/json`", http.StatusUnsupportedMediaType)
return
}
var admissionResponse *v1beta1.AdmissionResponse
ar := v1beta1.AdmissionReview{}
if _, _, err := deserializer.Decode(body, nil, &ar); err != nil {
glog.Errorf("Can't decode body: %v", err)
admissionResponse = &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
} else {
admissionResponse = s.mutate(&ar)
}
admissionReview := v1beta1.AdmissionReview{}
if admissionResponse != nil {
admissionReview.Response = admissionResponse
if ar.Request != nil {
admissionReview.Response.UID = ar.Request.UID
}
}
resp, err := json.Marshal(admissionReview)
if err != nil {
glog.Errorf("Can't encode response: %v", err)
http.Error(w, fmt.Sprintf("could not encode response: %v", err), http.StatusInternalServerError)
}
glog.Infof("Ready to write reponse ...")
if _, err := w.Write(resp); err != nil {
glog.Errorf("Can't write response: %v", err)
http.Error(w, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
}
}

View File

@@ -0,0 +1,5 @@
package version
var (
Version = "0.0.1"
)

191
vendor/github.com/golang/glog/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

44
vendor/github.com/golang/glog/README generated vendored Normal file
View File

@@ -0,0 +1,44 @@
glog
====
Leveled execution logs for Go.
This is an efficient pure Go implementation of leveled logs in the
manner of the open source C++ package
https://github.com/google/glog
By binding methods to booleans it is possible to use the log package
without paying the expense of evaluating the arguments to the log.
Through the -vmodule flag, the package also provides fine-grained
control over logging at the file level.
The comment from glog.go introduces the ideas:
Package glog implements logging analogous to the Google-internal
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
Error, Fatal, plus formatting variants such as Infof. It
also provides V-style logging controlled by the -v and
-vmodule=file=2 flags.
Basic examples:
glog.Info("Prepare to repel boarders")
glog.Fatalf("Initialization failed: %s", err)
See the documentation for the V function for an explanation
of these examples:
if glog.V(2) {
glog.Info("Starting transaction...")
}
glog.V(2).Infoln("Processed", nItems, "elements")
The repository contains an open source version of the log package
used inside Google. The master copy of the source lives inside
Google, not here. The code in this repo is for export only and is not itself
under development. Feature requests will be ignored.
Send bug reports to golang-nuts@googlegroups.com.

1180
vendor/github.com/golang/glog/glog.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

124
vendor/github.com/golang/glog/glog_file.go generated vendored Normal file
View File

@@ -0,0 +1,124 @@
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
//
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// File I/O for logs.
package glog
import (
"errors"
"flag"
"fmt"
"os"
"os/user"
"path/filepath"
"strings"
"sync"
"time"
)
// MaxSize is the maximum size of a log file in bytes.
var MaxSize uint64 = 1024 * 1024 * 1800
// logDirs lists the candidate directories for new log files.
var logDirs []string
// If non-empty, overrides the choice of directory in which to write logs.
// See createLogDirs for the full list of possible destinations.
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
func createLogDirs() {
if *logDir != "" {
logDirs = append(logDirs, *logDir)
}
logDirs = append(logDirs, os.TempDir())
}
var (
pid = os.Getpid()
program = filepath.Base(os.Args[0])
host = "unknownhost"
userName = "unknownuser"
)
func init() {
h, err := os.Hostname()
if err == nil {
host = shortHostname(h)
}
current, err := user.Current()
if err == nil {
userName = current.Username
}
// Sanitize userName since it may contain filepath separators on Windows.
userName = strings.Replace(userName, `\`, "_", -1)
}
// shortHostname returns its argument, truncating at the first period.
// For instance, given "www.google.com" it returns "www".
func shortHostname(hostname string) string {
if i := strings.Index(hostname, "."); i >= 0 {
return hostname[:i]
}
return hostname
}
// logName returns a new log file name containing tag, with start time t, and
// the name for the symlink for tag.
func logName(tag string, t time.Time) (name, link string) {
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
program,
host,
userName,
tag,
t.Year(),
t.Month(),
t.Day(),
t.Hour(),
t.Minute(),
t.Second(),
pid)
return name, program + "." + tag
}
var onceLogDirs sync.Once
// create creates a new log file and returns the file and its filename, which
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
// successfully, create also attempts to update the symlink for that tag, ignoring
// errors.
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
onceLogDirs.Do(createLogDirs)
if len(logDirs) == 0 {
return nil, "", errors.New("log: no log dirs")
}
name, link := logName(tag, t)
var lastErr error
for _, dir := range logDirs {
fname := filepath.Join(dir, name)
f, err := os.Create(fname)
if err == nil {
symlink := filepath.Join(dir, link)
os.Remove(symlink) // ignore err
os.Symlink(name, symlink) // ignore err
return f, fname, nil
}
lastErr = err
}
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
}

View File

@@ -26,8 +26,8 @@ var (
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:]) //nolint:errcheck
h.Write(data) //nolint:errcheck
h.Write(space[:])
h.Write(data)
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)

118
vendor/github.com/google/uuid/null.go generated vendored
View File

@@ -1,118 +0,0 @@
// Copyright 2021 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"database/sql/driver"
"encoding/json"
"fmt"
)
var jsonNull = []byte("null")
// NullUUID represents a UUID that may be null.
// NullUUID implements the SQL driver.Scanner interface so
// it can be used as a scan destination:
//
// var u uuid.NullUUID
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
// ...
// if u.Valid {
// // use u.UUID
// } else {
// // NULL value
// }
//
type NullUUID struct {
UUID UUID
Valid bool // Valid is true if UUID is not NULL
}
// Scan implements the SQL driver.Scanner interface.
func (nu *NullUUID) Scan(value interface{}) error {
if value == nil {
nu.UUID, nu.Valid = Nil, false
return nil
}
err := nu.UUID.Scan(value)
if err != nil {
nu.Valid = false
return err
}
nu.Valid = true
return nil
}
// Value implements the driver Valuer interface.
func (nu NullUUID) Value() (driver.Value, error) {
if !nu.Valid {
return nil, nil
}
// Delegate to UUID Value function
return nu.UUID.Value()
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (nu NullUUID) MarshalBinary() ([]byte, error) {
if nu.Valid {
return nu.UUID[:], nil
}
return []byte(nil), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(nu.UUID[:], data)
nu.Valid = true
return nil
}
// MarshalText implements encoding.TextMarshaler.
func (nu NullUUID) MarshalText() ([]byte, error) {
if nu.Valid {
return nu.UUID.MarshalText()
}
return jsonNull, nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (nu *NullUUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err != nil {
nu.Valid = false
return err
}
nu.UUID = id
nu.Valid = true
return nil
}
// MarshalJSON implements json.Marshaler.
func (nu NullUUID) MarshalJSON() ([]byte, error) {
if nu.Valid {
return json.Marshal(nu.UUID)
}
return jsonNull, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
if bytes.Equal(data, jsonNull) {
*nu = NullUUID{}
return nil // valid null UUID
}
err := json.Unmarshal(data, &nu.UUID)
nu.Valid = err == nil
return err
}

View File

@@ -9,7 +9,7 @@ import (
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {

View File

@@ -12,7 +12,6 @@ import (
"fmt"
"io"
"strings"
"sync"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
@@ -34,27 +33,7 @@ const (
Future // Reserved for future definition.
)
const randPoolSize = 16 * 16
var (
rander = rand.Reader // random function
poolEnabled = false
poolMu sync.Mutex
poolPos = randPoolSize // protected with poolMu
pool [randPoolSize]byte // protected with poolMu
)
type invalidLengthError struct{ len int }
func (err invalidLengthError) Error() string {
return fmt.Sprintf("invalid UUID length: %d", err.len)
}
// IsInvalidLengthError is matcher function for custom error invalidLengthError
func IsInvalidLengthError(err error) bool {
_, ok := err.(invalidLengthError)
return ok
}
var rander = rand.Reader // random function
// Parse decodes s into a UUID or returns an error. Both the standard UUID
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
@@ -89,7 +68,7 @@ func Parse(s string) (UUID, error) {
}
return uuid, nil
default:
return uuid, invalidLengthError{len(s)}
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
@@ -133,7 +112,7 @@ func ParseBytes(b []byte) (UUID, error) {
}
return uuid, nil
default:
return uuid, invalidLengthError{len(b)}
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
@@ -264,31 +243,3 @@ func SetRand(r io.Reader) {
}
rander = r
}
// EnableRandPool enables internal randomness pool used for Random
// (Version 4) UUID generation. The pool contains random bytes read from
// the random number generator on demand in batches. Enabling the pool
// may improve the UUID generation throughput significantly.
//
// Since the pool is stored on the Go heap, this feature may be a bad fit
// for security sensitive applications.
//
// Both EnableRandPool and DisableRandPool are not thread-safe and should
// only be called when there is no possibility that New or any other
// UUID Version 4 generation function will be called concurrently.
func EnableRandPool() {
poolEnabled = true
}
// DisableRandPool disables the randomness pool if it was previously
// enabled with EnableRandPool.
//
// Both EnableRandPool and DisableRandPool are not thread-safe and should
// only be called when there is no possibility that New or any other
// UUID Version 4 generation function will be called concurrently.
func DisableRandPool() {
poolEnabled = false
defer poolMu.Unlock()
poolMu.Lock()
poolPos = randPoolSize
}

View File

@@ -14,21 +14,11 @@ func New() UUID {
return Must(NewRandom())
}
// NewString creates a new random UUID and returns it as a string or panics.
// NewString is equivalent to the expression
//
// uuid.New().String()
func NewString() string {
return Must(NewRandom()).String()
}
// NewRandom returns a Random (Version 4) UUID.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
// Uses the randomness pool if it was enabled with EnableRandPool.
//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
@@ -37,10 +27,7 @@ func NewString() string {
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
if !poolEnabled {
return NewRandomFromReader(rander)
}
return newRandomFromPool()
return NewRandomFromReader(rander)
}
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
@@ -54,23 +41,3 @@ func NewRandomFromReader(r io.Reader) (UUID, error) {
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}
func newRandomFromPool() (UUID, error) {
var uuid UUID
poolMu.Lock()
if poolPos == randPoolSize {
_, err := io.ReadFull(rander, pool[:])
if err != nil {
poolMu.Unlock()
return Nil, err
}
poolPos = 0
}
copy(uuid[:], pool[poolPos:(poolPos+16)])
poolPos += 16
poolMu.Unlock()
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}

View File

@@ -1,25 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
.idea/
*.iml

View File

@@ -1,9 +0,0 @@
# This is the official list of Gorilla WebSocket authors for copyright
# purposes.
#
# Please keep the list sorted.
Gary Burd <gary@beagledreams.com>
Google LLC (https://opensource.google.com/)
Joachim Bauch <mail@joachim-bauch.de>

View File

@@ -1,22 +0,0 @@
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,64 +0,0 @@
# Gorilla WebSocket
[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket)
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
### Documentation
* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
### Status
The Gorilla WebSocket package provides a complete and tested implementation of
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
package API is stable.
### Installation
go get github.com/gorilla/websocket
### Protocol Compliance
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
### Gorilla WebSocket compared with other packages
<table>
<tr>
<th></th>
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
</tr>
<tr>
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
<tr><td>Passes <a href="https://github.com/crossbario/autobahn-testsuite">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
<tr><td colspan="3">Other Features</tr></td>
<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
</table>
Notes:
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
2. The application can get the type of a received data message by implementing
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
function.
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
Read returns when the input buffer is full or a frame boundary is
encountered. Each call to Write sends a single frame message. The Gorilla
io.Reader and io.WriteCloser operate on a single WebSocket message.

View File

@@ -1,395 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bytes"
"context"
"crypto/tls"
"errors"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptrace"
"net/url"
"strings"
"time"
)
// ErrBadHandshake is returned when the server response to opening handshake is
// invalid.
var ErrBadHandshake = errors.New("websocket: bad handshake")
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
// NewClient creates a new client connection using the given net connection.
// The URL u specifies the host and request URI. Use requestHeader to specify
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
// (Cookie). Use the response.Header to get the selected subprotocol
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
//
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
// non-nil *http.Response so that callers can handle redirects, authentication,
// etc.
//
// Deprecated: Use Dialer instead.
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
d := Dialer{
ReadBufferSize: readBufSize,
WriteBufferSize: writeBufSize,
NetDial: func(net, addr string) (net.Conn, error) {
return netConn, nil
},
}
return d.Dial(u.String(), requestHeader)
}
// A Dialer contains options for connecting to WebSocket server.
type Dialer struct {
// NetDial specifies the dial function for creating TCP connections. If
// NetDial is nil, net.Dial is used.
NetDial func(network, addr string) (net.Conn, error)
// NetDialContext specifies the dial function for creating TCP connections. If
// NetDialContext is nil, net.DialContext is used.
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
// Proxy specifies a function to return a proxy for a given
// Request. If the function returns a non-nil error, the
// request is aborted with the provided error.
// If Proxy is nil or returns a nil *URL, no proxy is used.
Proxy func(*http.Request) (*url.URL, error)
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
// If nil, the default configuration is used.
TLSClientConfig *tls.Config
// HandshakeTimeout specifies the duration for the handshake to complete.
HandshakeTimeout time.Duration
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
// size is zero, then a useful default size is used. The I/O buffer sizes
// do not limit the size of the messages that can be sent or received.
ReadBufferSize, WriteBufferSize int
// WriteBufferPool is a pool of buffers for write operations. If the value
// is not set, then write buffers are allocated to the connection for the
// lifetime of the connection.
//
// A pool is most useful when the application has a modest volume of writes
// across a large number of connections.
//
// Applications should use a single pool for each unique value of
// WriteBufferSize.
WriteBufferPool BufferPool
// Subprotocols specifies the client's requested subprotocols.
Subprotocols []string
// EnableCompression specifies if the client should attempt to negotiate
// per message compression (RFC 7692). Setting this value to true does not
// guarantee that compression will be supported. Currently only "no context
// takeover" modes are supported.
EnableCompression bool
// Jar specifies the cookie jar.
// If Jar is nil, cookies are not sent in requests and ignored
// in responses.
Jar http.CookieJar
}
// Dial creates a new client connection by calling DialContext with a background context.
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
return d.DialContext(context.Background(), urlStr, requestHeader)
}
var errMalformedURL = errors.New("malformed ws or wss URL")
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
hostPort = u.Host
hostNoPort = u.Host
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
hostNoPort = hostNoPort[:i]
} else {
switch u.Scheme {
case "wss":
hostPort += ":443"
case "https":
hostPort += ":443"
default:
hostPort += ":80"
}
}
return hostPort, hostNoPort
}
// DefaultDialer is a dialer with all fields set to the default values.
var DefaultDialer = &Dialer{
Proxy: http.ProxyFromEnvironment,
HandshakeTimeout: 45 * time.Second,
}
// nilDialer is dialer to use when receiver is nil.
var nilDialer = *DefaultDialer
// DialContext creates a new client connection. Use requestHeader to specify the
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
// Use the response.Header to get the selected subprotocol
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
//
// The context will be used in the request and in the Dialer.
//
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
// non-nil *http.Response so that callers can handle redirects, authentication,
// etcetera. The response body may not contain the entire response and does not
// need to be closed by the application.
func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
if d == nil {
d = &nilDialer
}
challengeKey, err := generateChallengeKey()
if err != nil {
return nil, nil, err
}
u, err := url.Parse(urlStr)
if err != nil {
return nil, nil, err
}
switch u.Scheme {
case "ws":
u.Scheme = "http"
case "wss":
u.Scheme = "https"
default:
return nil, nil, errMalformedURL
}
if u.User != nil {
// User name and password are not allowed in websocket URIs.
return nil, nil, errMalformedURL
}
req := &http.Request{
Method: "GET",
URL: u,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: u.Host,
}
req = req.WithContext(ctx)
// Set the cookies present in the cookie jar of the dialer
if d.Jar != nil {
for _, cookie := range d.Jar.Cookies(u) {
req.AddCookie(cookie)
}
}
// Set the request headers using the capitalization for names and values in
// RFC examples. Although the capitalization shouldn't matter, there are
// servers that depend on it. The Header.Set method is not used because the
// method canonicalizes the header names.
req.Header["Upgrade"] = []string{"websocket"}
req.Header["Connection"] = []string{"Upgrade"}
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
req.Header["Sec-WebSocket-Version"] = []string{"13"}
if len(d.Subprotocols) > 0 {
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
}
for k, vs := range requestHeader {
switch {
case k == "Host":
if len(vs) > 0 {
req.Host = vs[0]
}
case k == "Upgrade" ||
k == "Connection" ||
k == "Sec-Websocket-Key" ||
k == "Sec-Websocket-Version" ||
k == "Sec-Websocket-Extensions" ||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
case k == "Sec-Websocket-Protocol":
req.Header["Sec-WebSocket-Protocol"] = vs
default:
req.Header[k] = vs
}
}
if d.EnableCompression {
req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
}
if d.HandshakeTimeout != 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
defer cancel()
}
// Get network dial function.
var netDial func(network, add string) (net.Conn, error)
if d.NetDialContext != nil {
netDial = func(network, addr string) (net.Conn, error) {
return d.NetDialContext(ctx, network, addr)
}
} else if d.NetDial != nil {
netDial = d.NetDial
} else {
netDialer := &net.Dialer{}
netDial = func(network, addr string) (net.Conn, error) {
return netDialer.DialContext(ctx, network, addr)
}
}
// If needed, wrap the dial function to set the connection deadline.
if deadline, ok := ctx.Deadline(); ok {
forwardDial := netDial
netDial = func(network, addr string) (net.Conn, error) {
c, err := forwardDial(network, addr)
if err != nil {
return nil, err
}
err = c.SetDeadline(deadline)
if err != nil {
c.Close()
return nil, err
}
return c, nil
}
}
// If needed, wrap the dial function to connect through a proxy.
if d.Proxy != nil {
proxyURL, err := d.Proxy(req)
if err != nil {
return nil, nil, err
}
if proxyURL != nil {
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
if err != nil {
return nil, nil, err
}
netDial = dialer.Dial
}
}
hostPort, hostNoPort := hostPortNoPort(u)
trace := httptrace.ContextClientTrace(ctx)
if trace != nil && trace.GetConn != nil {
trace.GetConn(hostPort)
}
netConn, err := netDial("tcp", hostPort)
if trace != nil && trace.GotConn != nil {
trace.GotConn(httptrace.GotConnInfo{
Conn: netConn,
})
}
if err != nil {
return nil, nil, err
}
defer func() {
if netConn != nil {
netConn.Close()
}
}()
if u.Scheme == "https" {
cfg := cloneTLSConfig(d.TLSClientConfig)
if cfg.ServerName == "" {
cfg.ServerName = hostNoPort
}
tlsConn := tls.Client(netConn, cfg)
netConn = tlsConn
var err error
if trace != nil {
err = doHandshakeWithTrace(trace, tlsConn, cfg)
} else {
err = doHandshake(tlsConn, cfg)
}
if err != nil {
return nil, nil, err
}
}
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
if err := req.Write(netConn); err != nil {
return nil, nil, err
}
if trace != nil && trace.GotFirstResponseByte != nil {
if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
trace.GotFirstResponseByte()
}
}
resp, err := http.ReadResponse(conn.br, req)
if err != nil {
return nil, nil, err
}
if d.Jar != nil {
if rc := resp.Cookies(); len(rc) > 0 {
d.Jar.SetCookies(u, rc)
}
}
if resp.StatusCode != 101 ||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
// Before closing the network connection on return from this
// function, slurp up some of the response to aid application
// debugging.
buf := make([]byte, 1024)
n, _ := io.ReadFull(resp.Body, buf)
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
return nil, resp, ErrBadHandshake
}
for _, ext := range parseExtensions(resp.Header) {
if ext[""] != "permessage-deflate" {
continue
}
_, snct := ext["server_no_context_takeover"]
_, cnct := ext["client_no_context_takeover"]
if !snct || !cnct {
return nil, resp, errInvalidCompression
}
conn.newCompressionWriter = compressNoContextTakeover
conn.newDecompressionReader = decompressNoContextTakeover
break
}
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
netConn.SetDeadline(time.Time{})
netConn = nil // to avoid close in defer.
return conn, resp, nil
}
func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
if err := tlsConn.Handshake(); err != nil {
return err
}
if !cfg.InsecureSkipVerify {
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
return err
}
}
return nil
}

View File

@@ -1,16 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.8
package websocket
import "crypto/tls"
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View File

@@ -1,38 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.8
package websocket
import "crypto/tls"
// cloneTLSConfig clones all public fields except the fields
// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
// config in active use.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return &tls.Config{
Rand: cfg.Rand,
Time: cfg.Time,
Certificates: cfg.Certificates,
NameToCertificate: cfg.NameToCertificate,
GetCertificate: cfg.GetCertificate,
RootCAs: cfg.RootCAs,
NextProtos: cfg.NextProtos,
ServerName: cfg.ServerName,
ClientAuth: cfg.ClientAuth,
ClientCAs: cfg.ClientCAs,
InsecureSkipVerify: cfg.InsecureSkipVerify,
CipherSuites: cfg.CipherSuites,
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
ClientSessionCache: cfg.ClientSessionCache,
MinVersion: cfg.MinVersion,
MaxVersion: cfg.MaxVersion,
CurvePreferences: cfg.CurvePreferences,
}
}

View File

@@ -1,148 +0,0 @@
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"compress/flate"
"errors"
"io"
"strings"
"sync"
)
const (
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
maxCompressionLevel = flate.BestCompression
defaultCompressionLevel = 1
)
var (
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
flateReaderPool = sync.Pool{New: func() interface{} {
return flate.NewReader(nil)
}}
)
func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
const tail =
// Add four bytes as specified in RFC
"\x00\x00\xff\xff" +
// Add final block to squelch unexpected EOF error from flate reader.
"\x01\x00\x00\xff\xff"
fr, _ := flateReaderPool.Get().(io.ReadCloser)
fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
return &flateReadWrapper{fr}
}
func isValidCompressionLevel(level int) bool {
return minCompressionLevel <= level && level <= maxCompressionLevel
}
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
p := &flateWriterPools[level-minCompressionLevel]
tw := &truncWriter{w: w}
fw, _ := p.Get().(*flate.Writer)
if fw == nil {
fw, _ = flate.NewWriter(tw, level)
} else {
fw.Reset(tw)
}
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
}
// truncWriter is an io.Writer that writes all but the last four bytes of the
// stream to another io.Writer.
type truncWriter struct {
w io.WriteCloser
n int
p [4]byte
}
func (w *truncWriter) Write(p []byte) (int, error) {
n := 0
// fill buffer first for simplicity.
if w.n < len(w.p) {
n = copy(w.p[w.n:], p)
p = p[n:]
w.n += n
if len(p) == 0 {
return n, nil
}
}
m := len(p)
if m > len(w.p) {
m = len(w.p)
}
if nn, err := w.w.Write(w.p[:m]); err != nil {
return n + nn, err
}
copy(w.p[:], w.p[m:])
copy(w.p[len(w.p)-m:], p[len(p)-m:])
nn, err := w.w.Write(p[:len(p)-m])
return n + nn, err
}
type flateWriteWrapper struct {
fw *flate.Writer
tw *truncWriter
p *sync.Pool
}
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
if w.fw == nil {
return 0, errWriteClosed
}
return w.fw.Write(p)
}
func (w *flateWriteWrapper) Close() error {
if w.fw == nil {
return errWriteClosed
}
err1 := w.fw.Flush()
w.p.Put(w.fw)
w.fw = nil
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
}
err2 := w.tw.w.Close()
if err1 != nil {
return err1
}
return err2
}
type flateReadWrapper struct {
fr io.ReadCloser
}
func (r *flateReadWrapper) Read(p []byte) (int, error) {
if r.fr == nil {
return 0, io.ErrClosedPipe
}
n, err := r.fr.Read(p)
if err == io.EOF {
// Preemptively place the reader back in the pool. This helps with
// scenarios where the application does not call NextReader() soon after
// this final read.
r.Close()
}
return n, err
}
func (r *flateReadWrapper) Close() error {
if r.fr == nil {
return io.ErrClosedPipe
}
err := r.fr.Close()
flateReaderPool.Put(r.fr)
r.fr = nil
return err
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +0,0 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.8
package websocket
import "net"
func (c *Conn) writeBufs(bufs ...[]byte) error {
b := net.Buffers(bufs)
_, err := b.WriteTo(c.conn)
return err
}

View File

@@ -1,18 +0,0 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.8
package websocket
func (c *Conn) writeBufs(bufs ...[]byte) error {
for _, buf := range bufs {
if len(buf) > 0 {
if _, err := c.conn.Write(buf); err != nil {
return err
}
}
}
return nil
}

View File

@@ -1,227 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package websocket implements the WebSocket protocol defined in RFC 6455.
//
// Overview
//
// The Conn type represents a WebSocket connection. A server application calls
// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
//
// var upgrader = websocket.Upgrader{
// ReadBufferSize: 1024,
// WriteBufferSize: 1024,
// }
//
// func handler(w http.ResponseWriter, r *http.Request) {
// conn, err := upgrader.Upgrade(w, r, nil)
// if err != nil {
// log.Println(err)
// return
// }
// ... Use conn to send and receive messages.
// }
//
// Call the connection's WriteMessage and ReadMessage methods to send and
// receive messages as a slice of bytes. This snippet of code shows how to echo
// messages using these methods:
//
// for {
// messageType, p, err := conn.ReadMessage()
// if err != nil {
// log.Println(err)
// return
// }
// if err := conn.WriteMessage(messageType, p); err != nil {
// log.Println(err)
// return
// }
// }
//
// In above snippet of code, p is a []byte and messageType is an int with value
// websocket.BinaryMessage or websocket.TextMessage.
//
// An application can also send and receive messages using the io.WriteCloser
// and io.Reader interfaces. To send a message, call the connection NextWriter
// method to get an io.WriteCloser, write the message to the writer and close
// the writer when done. To receive a message, call the connection NextReader
// method to get an io.Reader and read until io.EOF is returned. This snippet
// shows how to echo messages using the NextWriter and NextReader methods:
//
// for {
// messageType, r, err := conn.NextReader()
// if err != nil {
// return
// }
// w, err := conn.NextWriter(messageType)
// if err != nil {
// return err
// }
// if _, err := io.Copy(w, r); err != nil {
// return err
// }
// if err := w.Close(); err != nil {
// return err
// }
// }
//
// Data Messages
//
// The WebSocket protocol distinguishes between text and binary data messages.
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
// binary messages is left to the application.
//
// This package uses the TextMessage and BinaryMessage integer constants to
// identify the two data message types. The ReadMessage and NextReader methods
// return the type of the received message. The messageType argument to the
// WriteMessage and NextWriter methods specifies the type of a sent message.
//
// It is the application's responsibility to ensure that text messages are
// valid UTF-8 encoded text.
//
// Control Messages
//
// The WebSocket protocol defines three types of control messages: close, ping
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
// methods to send a control message to the peer.
//
// Connections handle received close messages by calling the handler function
// set with the SetCloseHandler method and by returning a *CloseError from the
// NextReader, ReadMessage or the message Read method. The default close
// handler sends a close message to the peer.
//
// Connections handle received ping messages by calling the handler function
// set with the SetPingHandler method. The default ping handler sends a pong
// message to the peer.
//
// Connections handle received pong messages by calling the handler function
// set with the SetPongHandler method. The default pong handler does nothing.
// If an application sends ping messages, then the application should set a
// pong handler to receive the corresponding pong.
//
// The control message handler functions are called from the NextReader,
// ReadMessage and message reader Read methods. The default close and ping
// handlers can block these methods for a short time when the handler writes to
// the connection.
//
// The application must read the connection to process close, ping and pong
// messages sent from the peer. If the application is not otherwise interested
// in messages from the peer, then the application should start a goroutine to
// read and discard messages from the peer. A simple example is:
//
// func readLoop(c *websocket.Conn) {
// for {
// if _, _, err := c.NextReader(); err != nil {
// c.Close()
// break
// }
// }
// }
//
// Concurrency
//
// Connections support one concurrent reader and one concurrent writer.
//
// Applications are responsible for ensuring that no more than one goroutine
// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
// that no more than one goroutine calls the read methods (NextReader,
// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
// concurrently.
//
// The Close and WriteControl methods can be called concurrently with all other
// methods.
//
// Origin Considerations
//
// Web browsers allow Javascript applications to open a WebSocket connection to
// any host. It's up to the server to enforce an origin policy using the Origin
// request header sent by the browser.
//
// The Upgrader calls the function specified in the CheckOrigin field to check
// the origin. If the CheckOrigin function returns false, then the Upgrade
// method fails the WebSocket handshake with HTTP status 403.
//
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
// the handshake if the Origin request header is present and the Origin host is
// not equal to the Host request header.
//
// The deprecated package-level Upgrade function does not perform origin
// checking. The application is responsible for checking the Origin header
// before calling the Upgrade function.
//
// Buffers
//
// Connections buffer network input and output to reduce the number
// of system calls when reading or writing messages.
//
// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
// Section 5 for a discussion of message framing. A WebSocket frame header is
// written to the network each time a write buffer is flushed to the network.
// Decreasing the size of the write buffer can increase the amount of framing
// overhead on the connection.
//
// The buffer sizes in bytes are specified by the ReadBufferSize and
// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
// buffers created by the HTTP server when a buffer size field is set to zero.
// The HTTP server buffers have a size of 4096 at the time of this writing.
//
// The buffer sizes do not limit the size of a message that can be read or
// written by a connection.
//
// Buffers are held for the lifetime of the connection by default. If the
// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
// write buffer only when writing a message.
//
// Applications should tune the buffer sizes to balance memory use and
// performance. Increasing the buffer size uses more memory, but can reduce the
// number of system calls to read or write the network. In the case of writing,
// increasing the buffer size can reduce the number of frame headers written to
// the network.
//
// Some guidelines for setting buffer parameters are:
//
// Limit the buffer sizes to the maximum expected message size. Buffers larger
// than the largest message do not provide any benefit.
//
// Depending on the distribution of message sizes, setting the buffer size to
// a value less than the maximum expected message size can greatly reduce memory
// use with a small impact on performance. Here's an example: If 99% of the
// messages are smaller than 256 bytes and the maximum message size is 512
// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
// than a buffer size of 512 bytes. The memory savings is 50%.
//
// A write buffer pool is useful when the application has a modest number
// writes over a large number of connections. when buffers are pooled, a larger
// buffer size has a reduced impact on total memory use and has the benefit of
// reducing system calls and frame overhead.
//
// Compression EXPERIMENTAL
//
// Per message compression extensions (RFC 7692) are experimentally supported
// by this package in a limited capacity. Setting the EnableCompression option
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
// support.
//
// var upgrader = websocket.Upgrader{
// EnableCompression: true,
// }
//
// If compression was successfully negotiated with the connection's peer, any
// message received in compressed form will be automatically decompressed.
// All Read methods will return uncompressed bytes.
//
// Per message compression of messages written to a connection can be enabled
// or disabled by calling the corresponding Conn method:
//
// conn.EnableWriteCompression(false)
//
// Currently this package does not support compression with "context takeover".
// This means that messages must be compressed and decompressed in isolation,
// without retaining sliding window or dictionary state across messages. For
// more details refer to RFC 7692.
//
// Use of compression is experimental and may result in decreased performance.
package websocket

View File

@@ -1,3 +0,0 @@
module github.com/gorilla/websocket
go 1.12

View File

View File

@@ -1,42 +0,0 @@
// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"io"
"strings"
)
// JoinMessages concatenates received messages to create a single io.Reader.
// The string term is appended to each message. The returned reader does not
// support concurrent calls to the Read method.
func JoinMessages(c *Conn, term string) io.Reader {
return &joinReader{c: c, term: term}
}
type joinReader struct {
c *Conn
term string
r io.Reader
}
func (r *joinReader) Read(p []byte) (int, error) {
if r.r == nil {
var err error
_, r.r, err = r.c.NextReader()
if err != nil {
return 0, err
}
if r.term != "" {
r.r = io.MultiReader(r.r, strings.NewReader(r.term))
}
}
n, err := r.r.Read(p)
if err == io.EOF {
err = nil
r.r = nil
}
return n, err
}

View File

@@ -1,60 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"encoding/json"
"io"
)
// WriteJSON writes the JSON encoding of v as a message.
//
// Deprecated: Use c.WriteJSON instead.
func WriteJSON(c *Conn, v interface{}) error {
return c.WriteJSON(v)
}
// WriteJSON writes the JSON encoding of v as a message.
//
// See the documentation for encoding/json Marshal for details about the
// conversion of Go values to JSON.
func (c *Conn) WriteJSON(v interface{}) error {
w, err := c.NextWriter(TextMessage)
if err != nil {
return err
}
err1 := json.NewEncoder(w).Encode(v)
err2 := w.Close()
if err1 != nil {
return err1
}
return err2
}
// ReadJSON reads the next JSON-encoded message from the connection and stores
// it in the value pointed to by v.
//
// Deprecated: Use c.ReadJSON instead.
func ReadJSON(c *Conn, v interface{}) error {
return c.ReadJSON(v)
}
// ReadJSON reads the next JSON-encoded message from the connection and stores
// it in the value pointed to by v.
//
// See the documentation for the encoding/json Unmarshal function for details
// about the conversion of JSON to a Go value.
func (c *Conn) ReadJSON(v interface{}) error {
_, r, err := c.NextReader()
if err != nil {
return err
}
err = json.NewDecoder(r).Decode(v)
if err == io.EOF {
// One value is expected in the message.
err = io.ErrUnexpectedEOF
}
return err
}

View File

@@ -1,54 +0,0 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
// this source code is governed by a BSD-style license that can be found in the
// LICENSE file.
// +build !appengine
package websocket
import "unsafe"
const wordSize = int(unsafe.Sizeof(uintptr(0)))
func maskBytes(key [4]byte, pos int, b []byte) int {
// Mask one byte at a time for small buffers.
if len(b) < 2*wordSize {
for i := range b {
b[i] ^= key[pos&3]
pos++
}
return pos & 3
}
// Mask one byte at a time to word boundary.
if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
n = wordSize - n
for i := range b[:n] {
b[i] ^= key[pos&3]
pos++
}
b = b[n:]
}
// Create aligned word size key.
var k [wordSize]byte
for i := range k {
k[i] = key[(pos+i)&3]
}
kw := *(*uintptr)(unsafe.Pointer(&k))
// Mask one word at a time.
n := (len(b) / wordSize) * wordSize
for i := 0; i < n; i += wordSize {
*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
}
// Mask one byte at a time for remaining bytes.
b = b[n:]
for i := range b {
b[i] ^= key[pos&3]
pos++
}
return pos & 3
}

View File

@@ -1,15 +0,0 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
// this source code is governed by a BSD-style license that can be found in the
// LICENSE file.
// +build appengine
package websocket
func maskBytes(key [4]byte, pos int, b []byte) int {
for i := range b {
b[i] ^= key[pos&3]
pos++
}
return pos & 3
}

View File

@@ -1,102 +0,0 @@
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bytes"
"net"
"sync"
"time"
)
// PreparedMessage caches on the wire representations of a message payload.
// Use PreparedMessage to efficiently send a message payload to multiple
// connections. PreparedMessage is especially useful when compression is used
// because the CPU and memory expensive compression operation can be executed
// once for a given set of compression options.
type PreparedMessage struct {
messageType int
data []byte
mu sync.Mutex
frames map[prepareKey]*preparedFrame
}
// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
type prepareKey struct {
isServer bool
compress bool
compressionLevel int
}
// preparedFrame contains data in wire representation.
type preparedFrame struct {
once sync.Once
data []byte
}
// NewPreparedMessage returns an initialized PreparedMessage. You can then send
// it to connection using WritePreparedMessage method. Valid wire
// representation will be calculated lazily only once for a set of current
// connection options.
func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
pm := &PreparedMessage{
messageType: messageType,
frames: make(map[prepareKey]*preparedFrame),
data: data,
}
// Prepare a plain server frame.
_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
if err != nil {
return nil, err
}
// To protect against caller modifying the data argument, remember the data
// copied to the plain server frame.
pm.data = frameData[len(frameData)-len(data):]
return pm, nil
}
func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
pm.mu.Lock()
frame, ok := pm.frames[key]
if !ok {
frame = &preparedFrame{}
pm.frames[key] = frame
}
pm.mu.Unlock()
var err error
frame.once.Do(func() {
// Prepare a frame using a 'fake' connection.
// TODO: Refactor code in conn.go to allow more direct construction of
// the frame.
mu := make(chan struct{}, 1)
mu <- struct{}{}
var nc prepareConn
c := &Conn{
conn: &nc,
mu: mu,
isServer: key.isServer,
compressionLevel: key.compressionLevel,
enableWriteCompression: true,
writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
}
if key.compress {
c.newCompressionWriter = compressNoContextTakeover
}
err = c.WriteMessage(pm.messageType, pm.data)
frame.data = nc.buf.Bytes()
})
return pm.messageType, frame.data, err
}
type prepareConn struct {
buf bytes.Buffer
net.Conn
}
func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }

View File

@@ -1,77 +0,0 @@
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bufio"
"encoding/base64"
"errors"
"net"
"net/http"
"net/url"
"strings"
)
type netDialerFunc func(network, addr string) (net.Conn, error)
func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
return fn(network, addr)
}
func init() {
proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
})
}
type httpProxyDialer struct {
proxyURL *url.URL
forwardDial func(network, addr string) (net.Conn, error)
}
func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
hostPort, _ := hostPortNoPort(hpd.proxyURL)
conn, err := hpd.forwardDial(network, hostPort)
if err != nil {
return nil, err
}
connectHeader := make(http.Header)
if user := hpd.proxyURL.User; user != nil {
proxyUser := user.Username()
if proxyPassword, passwordSet := user.Password(); passwordSet {
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
}
}
connectReq := &http.Request{
Method: "CONNECT",
URL: &url.URL{Opaque: addr},
Host: addr,
Header: connectHeader,
}
if err := connectReq.Write(conn); err != nil {
conn.Close()
return nil, err
}
// Read response. It's OK to use and discard buffered reader here becaue
// the remote server does not speak until spoken to.
br := bufio.NewReader(conn)
resp, err := http.ReadResponse(br, connectReq)
if err != nil {
conn.Close()
return nil, err
}
if resp.StatusCode != 200 {
conn.Close()
f := strings.SplitN(resp.Status, " ", 2)
return nil, errors.New(f[1])
}
return conn, nil
}

View File

@@ -1,363 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bufio"
"errors"
"io"
"net/http"
"net/url"
"strings"
"time"
)
// HandshakeError describes an error with the handshake from the peer.
type HandshakeError struct {
message string
}
func (e HandshakeError) Error() string { return e.message }
// Upgrader specifies parameters for upgrading an HTTP connection to a
// WebSocket connection.
type Upgrader struct {
// HandshakeTimeout specifies the duration for the handshake to complete.
HandshakeTimeout time.Duration
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
// size is zero, then buffers allocated by the HTTP server are used. The
// I/O buffer sizes do not limit the size of the messages that can be sent
// or received.
ReadBufferSize, WriteBufferSize int
// WriteBufferPool is a pool of buffers for write operations. If the value
// is not set, then write buffers are allocated to the connection for the
// lifetime of the connection.
//
// A pool is most useful when the application has a modest volume of writes
// across a large number of connections.
//
// Applications should use a single pool for each unique value of
// WriteBufferSize.
WriteBufferPool BufferPool
// Subprotocols specifies the server's supported protocols in order of
// preference. If this field is not nil, then the Upgrade method negotiates a
// subprotocol by selecting the first match in this list with a protocol
// requested by the client. If there's no match, then no protocol is
// negotiated (the Sec-Websocket-Protocol header is not included in the
// handshake response).
Subprotocols []string
// Error specifies the function for generating HTTP error responses. If Error
// is nil, then http.Error is used to generate the HTTP response.
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
// CheckOrigin returns true if the request Origin header is acceptable. If
// CheckOrigin is nil, then a safe default is used: return false if the
// Origin request header is present and the origin host is not equal to
// request Host header.
//
// A CheckOrigin function should carefully validate the request origin to
// prevent cross-site request forgery.
CheckOrigin func(r *http.Request) bool
// EnableCompression specify if the server should attempt to negotiate per
// message compression (RFC 7692). Setting this value to true does not
// guarantee that compression will be supported. Currently only "no context
// takeover" modes are supported.
EnableCompression bool
}
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
err := HandshakeError{reason}
if u.Error != nil {
u.Error(w, r, status, err)
} else {
w.Header().Set("Sec-Websocket-Version", "13")
http.Error(w, http.StatusText(status), status)
}
return nil, err
}
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
func checkSameOrigin(r *http.Request) bool {
origin := r.Header["Origin"]
if len(origin) == 0 {
return true
}
u, err := url.Parse(origin[0])
if err != nil {
return false
}
return equalASCIIFold(u.Host, r.Host)
}
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
if u.Subprotocols != nil {
clientProtocols := Subprotocols(r)
for _, serverProtocol := range u.Subprotocols {
for _, clientProtocol := range clientProtocols {
if clientProtocol == serverProtocol {
return clientProtocol
}
}
}
} else if responseHeader != nil {
return responseHeader.Get("Sec-Websocket-Protocol")
}
return ""
}
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
//
// The responseHeader is included in the response to the client's upgrade
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
// application negotiated subprotocol (Sec-WebSocket-Protocol).
//
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
// response.
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
const badHandshake = "websocket: the client is not using the websocket protocol: "
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
}
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
}
if r.Method != "GET" {
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
}
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
}
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
}
checkOrigin := u.CheckOrigin
if checkOrigin == nil {
checkOrigin = checkSameOrigin
}
if !checkOrigin(r) {
return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
}
challengeKey := r.Header.Get("Sec-Websocket-Key")
if challengeKey == "" {
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
}
subprotocol := u.selectSubprotocol(r, responseHeader)
// Negotiate PMCE
var compress bool
if u.EnableCompression {
for _, ext := range parseExtensions(r.Header) {
if ext[""] != "permessage-deflate" {
continue
}
compress = true
break
}
}
h, ok := w.(http.Hijacker)
if !ok {
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
}
var brw *bufio.ReadWriter
netConn, brw, err := h.Hijack()
if err != nil {
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
}
if brw.Reader.Buffered() > 0 {
netConn.Close()
return nil, errors.New("websocket: client sent data before handshake is complete")
}
var br *bufio.Reader
if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
// Reuse hijacked buffered reader as connection reader.
br = brw.Reader
}
buf := bufioWriterBuffer(netConn, brw.Writer)
var writeBuf []byte
if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
// Reuse hijacked write buffer as connection buffer.
writeBuf = buf
}
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
c.subprotocol = subprotocol
if compress {
c.newCompressionWriter = compressNoContextTakeover
c.newDecompressionReader = decompressNoContextTakeover
}
// Use larger of hijacked buffer and connection write buffer for header.
p := buf
if len(c.writeBuf) > len(p) {
p = c.writeBuf
}
p = p[:0]
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
p = append(p, computeAcceptKey(challengeKey)...)
p = append(p, "\r\n"...)
if c.subprotocol != "" {
p = append(p, "Sec-WebSocket-Protocol: "...)
p = append(p, c.subprotocol...)
p = append(p, "\r\n"...)
}
if compress {
p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
}
for k, vs := range responseHeader {
if k == "Sec-Websocket-Protocol" {
continue
}
for _, v := range vs {
p = append(p, k...)
p = append(p, ": "...)
for i := 0; i < len(v); i++ {
b := v[i]
if b <= 31 {
// prevent response splitting.
b = ' '
}
p = append(p, b)
}
p = append(p, "\r\n"...)
}
}
p = append(p, "\r\n"...)
// Clear deadlines set by HTTP server.
netConn.SetDeadline(time.Time{})
if u.HandshakeTimeout > 0 {
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
}
if _, err = netConn.Write(p); err != nil {
netConn.Close()
return nil, err
}
if u.HandshakeTimeout > 0 {
netConn.SetWriteDeadline(time.Time{})
}
return c, nil
}
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
//
// Deprecated: Use websocket.Upgrader instead.
//
// Upgrade does not perform origin checking. The application is responsible for
// checking the Origin header before calling Upgrade. An example implementation
// of the same origin policy check is:
//
// if req.Header.Get("Origin") != "http://"+req.Host {
// http.Error(w, "Origin not allowed", http.StatusForbidden)
// return
// }
//
// If the endpoint supports subprotocols, then the application is responsible
// for negotiating the protocol used on the connection. Use the Subprotocols()
// function to get the subprotocols requested by the client. Use the
// Sec-Websocket-Protocol response header to specify the subprotocol selected
// by the application.
//
// The responseHeader is included in the response to the client's upgrade
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
// negotiated subprotocol (Sec-Websocket-Protocol).
//
// The connection buffers IO to the underlying network connection. The
// readBufSize and writeBufSize parameters specify the size of the buffers to
// use. Messages can be larger than the buffers.
//
// If the request is not a valid WebSocket handshake, then Upgrade returns an
// error of type HandshakeError. Applications should handle this error by
// replying to the client with an HTTP error response.
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
// don't return errors to maintain backwards compatibility
}
u.CheckOrigin = func(r *http.Request) bool {
// allow all connections by default
return true
}
return u.Upgrade(w, r, responseHeader)
}
// Subprotocols returns the subprotocols requested by the client in the
// Sec-Websocket-Protocol header.
func Subprotocols(r *http.Request) []string {
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
if h == "" {
return nil
}
protocols := strings.Split(h, ",")
for i := range protocols {
protocols[i] = strings.TrimSpace(protocols[i])
}
return protocols
}
// IsWebSocketUpgrade returns true if the client requested upgrade to the
// WebSocket protocol.
func IsWebSocketUpgrade(r *http.Request) bool {
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
tokenListContainsValue(r.Header, "Upgrade", "websocket")
}
// bufioReaderSize size returns the size of a bufio.Reader.
func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
// This code assumes that peek on a reset reader returns
// bufio.Reader.buf[:0].
// TODO: Use bufio.Reader.Size() after Go 1.10
br.Reset(originalReader)
if p, err := br.Peek(0); err == nil {
return cap(p)
}
return 0
}
// writeHook is an io.Writer that records the last slice passed to it vio
// io.Writer.Write.
type writeHook struct {
p []byte
}
func (wh *writeHook) Write(p []byte) (int, error) {
wh.p = p
return len(p), nil
}
// bufioWriterBuffer grabs the buffer from a bufio.Writer.
func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
// This code assumes that bufio.Writer.buf[:1] is passed to the
// bufio.Writer's underlying writer.
var wh writeHook
bw.Reset(&wh)
bw.WriteByte(0)
bw.Flush()
bw.Reset(originalWriter)
return wh.p[:cap(wh.p)]
}

View File

@@ -1,19 +0,0 @@
// +build go1.8
package websocket
import (
"crypto/tls"
"net/http/httptrace"
)
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
if trace.TLSHandshakeStart != nil {
trace.TLSHandshakeStart()
}
err := doHandshake(tlsConn, cfg)
if trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
}
return err
}

View File

@@ -1,12 +0,0 @@
// +build !go1.8
package websocket
import (
"crypto/tls"
"net/http/httptrace"
)
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
return doHandshake(tlsConn, cfg)
}

View File

@@ -1,283 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"crypto/rand"
"crypto/sha1"
"encoding/base64"
"io"
"net/http"
"strings"
"unicode/utf8"
)
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
func computeAcceptKey(challengeKey string) string {
h := sha1.New()
h.Write([]byte(challengeKey))
h.Write(keyGUID)
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
func generateChallengeKey() (string, error) {
p := make([]byte, 16)
if _, err := io.ReadFull(rand.Reader, p); err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(p), nil
}
// Token octets per RFC 2616.
var isTokenOctet = [256]bool{
'!': true,
'#': true,
'$': true,
'%': true,
'&': true,
'\'': true,
'*': true,
'+': true,
'-': true,
'.': true,
'0': true,
'1': true,
'2': true,
'3': true,
'4': true,
'5': true,
'6': true,
'7': true,
'8': true,
'9': true,
'A': true,
'B': true,
'C': true,
'D': true,
'E': true,
'F': true,
'G': true,
'H': true,
'I': true,
'J': true,
'K': true,
'L': true,
'M': true,
'N': true,
'O': true,
'P': true,
'Q': true,
'R': true,
'S': true,
'T': true,
'U': true,
'W': true,
'V': true,
'X': true,
'Y': true,
'Z': true,
'^': true,
'_': true,
'`': true,
'a': true,
'b': true,
'c': true,
'd': true,
'e': true,
'f': true,
'g': true,
'h': true,
'i': true,
'j': true,
'k': true,
'l': true,
'm': true,
'n': true,
'o': true,
'p': true,
'q': true,
'r': true,
's': true,
't': true,
'u': true,
'v': true,
'w': true,
'x': true,
'y': true,
'z': true,
'|': true,
'~': true,
}
// skipSpace returns a slice of the string s with all leading RFC 2616 linear
// whitespace removed.
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if b := s[i]; b != ' ' && b != '\t' {
break
}
}
return s[i:]
}
// nextToken returns the leading RFC 2616 token of s and the string following
// the token.
func nextToken(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
if !isTokenOctet[s[i]] {
break
}
}
return s[:i], s[i:]
}
// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
// and the string following the token or quoted string.
func nextTokenOrQuoted(s string) (value string, rest string) {
if !strings.HasPrefix(s, "\"") {
return nextToken(s)
}
s = s[1:]
for i := 0; i < len(s); i++ {
switch s[i] {
case '"':
return s[:i], s[i+1:]
case '\\':
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i = i + 1; i < len(s); i++ {
b := s[i]
switch {
case escape:
escape = false
p[j] = b
j++
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
j++
}
}
return "", ""
}
}
return "", ""
}
// equalASCIIFold returns true if s is equal to t with ASCII case folding as
// defined in RFC 4790.
func equalASCIIFold(s, t string) bool {
for s != "" && t != "" {
sr, size := utf8.DecodeRuneInString(s)
s = s[size:]
tr, size := utf8.DecodeRuneInString(t)
t = t[size:]
if sr == tr {
continue
}
if 'A' <= sr && sr <= 'Z' {
sr = sr + 'a' - 'A'
}
if 'A' <= tr && tr <= 'Z' {
tr = tr + 'a' - 'A'
}
if sr != tr {
return false
}
}
return s == t
}
// tokenListContainsValue returns true if the 1#token header with the given
// name contains a token equal to value with ASCII case folding.
func tokenListContainsValue(header http.Header, name string, value string) bool {
headers:
for _, s := range header[name] {
for {
var t string
t, s = nextToken(skipSpace(s))
if t == "" {
continue headers
}
s = skipSpace(s)
if s != "" && s[0] != ',' {
continue headers
}
if equalASCIIFold(t, value) {
return true
}
if s == "" {
continue headers
}
s = s[1:]
}
}
return false
}
// parseExtensions parses WebSocket extensions from a header.
func parseExtensions(header http.Header) []map[string]string {
// From RFC 6455:
//
// Sec-WebSocket-Extensions = extension-list
// extension-list = 1#extension
// extension = extension-token *( ";" extension-param )
// extension-token = registered-token
// registered-token = token
// extension-param = token [ "=" (token | quoted-string) ]
// ;When using the quoted-string syntax variant, the value
// ;after quoted-string unescaping MUST conform to the
// ;'token' ABNF.
var result []map[string]string
headers:
for _, s := range header["Sec-Websocket-Extensions"] {
for {
var t string
t, s = nextToken(skipSpace(s))
if t == "" {
continue headers
}
ext := map[string]string{"": t}
for {
s = skipSpace(s)
if !strings.HasPrefix(s, ";") {
break
}
var k string
k, s = nextToken(skipSpace(s[1:]))
if k == "" {
continue headers
}
s = skipSpace(s)
var v string
if strings.HasPrefix(s, "=") {
v, s = nextTokenOrQuoted(skipSpace(s[1:]))
s = skipSpace(s)
}
if s != "" && s[0] != ',' && s[0] != ';' {
continue headers
}
ext[k] = v
}
if s != "" && s[0] != ',' {
continue headers
}
result = append(result, ext)
if s == "" {
continue headers
}
s = s[1:]
}
}
return result
}

View File

@@ -1,473 +0,0 @@
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
// Package proxy provides support for a variety of protocols to proxy network
// data.
//
package websocket
import (
"errors"
"io"
"net"
"net/url"
"os"
"strconv"
"strings"
"sync"
)
type proxy_direct struct{}
// Direct is a direct proxy: one that makes network connections directly.
var proxy_Direct = proxy_direct{}
func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
return net.Dial(network, addr)
}
// A PerHost directs connections to a default Dialer unless the host name
// requested matches one of a number of exceptions.
type proxy_PerHost struct {
def, bypass proxy_Dialer
bypassNetworks []*net.IPNet
bypassIPs []net.IP
bypassZones []string
bypassHosts []string
}
// NewPerHost returns a PerHost Dialer that directs connections to either
// defaultDialer or bypass, depending on whether the connection matches one of
// the configured rules.
func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
return &proxy_PerHost{
def: defaultDialer,
bypass: bypass,
}
}
// Dial connects to the address addr on the given network through either
// defaultDialer or bypass.
func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
return p.dialerForRequest(host).Dial(network, addr)
}
func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
if ip := net.ParseIP(host); ip != nil {
for _, net := range p.bypassNetworks {
if net.Contains(ip) {
return p.bypass
}
}
for _, bypassIP := range p.bypassIPs {
if bypassIP.Equal(ip) {
return p.bypass
}
}
return p.def
}
for _, zone := range p.bypassZones {
if strings.HasSuffix(host, zone) {
return p.bypass
}
if host == zone[1:] {
// For a zone ".example.com", we match "example.com"
// too.
return p.bypass
}
}
for _, bypassHost := range p.bypassHosts {
if bypassHost == host {
return p.bypass
}
}
return p.def
}
// AddFromString parses a string that contains comma-separated values
// specifying hosts that should use the bypass proxy. Each value is either an
// IP address, a CIDR range, a zone (*.example.com) or a host name
// (localhost). A best effort is made to parse the string and errors are
// ignored.
func (p *proxy_PerHost) AddFromString(s string) {
hosts := strings.Split(s, ",")
for _, host := range hosts {
host = strings.TrimSpace(host)
if len(host) == 0 {
continue
}
if strings.Contains(host, "/") {
// We assume that it's a CIDR address like 127.0.0.0/8
if _, net, err := net.ParseCIDR(host); err == nil {
p.AddNetwork(net)
}
continue
}
if ip := net.ParseIP(host); ip != nil {
p.AddIP(ip)
continue
}
if strings.HasPrefix(host, "*.") {
p.AddZone(host[1:])
continue
}
p.AddHost(host)
}
}
// AddIP specifies an IP address that will use the bypass proxy. Note that
// this will only take effect if a literal IP address is dialed. A connection
// to a named host will never match an IP.
func (p *proxy_PerHost) AddIP(ip net.IP) {
p.bypassIPs = append(p.bypassIPs, ip)
}
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
// this will only take effect if a literal IP address is dialed. A connection
// to a named host will never match.
func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
p.bypassNetworks = append(p.bypassNetworks, net)
}
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
// "example.com" matches "example.com" and all of its subdomains.
func (p *proxy_PerHost) AddZone(zone string) {
if strings.HasSuffix(zone, ".") {
zone = zone[:len(zone)-1]
}
if !strings.HasPrefix(zone, ".") {
zone = "." + zone
}
p.bypassZones = append(p.bypassZones, zone)
}
// AddHost specifies a host name that will use the bypass proxy.
func (p *proxy_PerHost) AddHost(host string) {
if strings.HasSuffix(host, ".") {
host = host[:len(host)-1]
}
p.bypassHosts = append(p.bypassHosts, host)
}
// A Dialer is a means to establish a connection.
type proxy_Dialer interface {
// Dial connects to the given address via the proxy.
Dial(network, addr string) (c net.Conn, err error)
}
// Auth contains authentication parameters that specific Dialers may require.
type proxy_Auth struct {
User, Password string
}
// FromEnvironment returns the dialer specified by the proxy related variables in
// the environment.
func proxy_FromEnvironment() proxy_Dialer {
allProxy := proxy_allProxyEnv.Get()
if len(allProxy) == 0 {
return proxy_Direct
}
proxyURL, err := url.Parse(allProxy)
if err != nil {
return proxy_Direct
}
proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
if err != nil {
return proxy_Direct
}
noProxy := proxy_noProxyEnv.Get()
if len(noProxy) == 0 {
return proxy
}
perHost := proxy_NewPerHost(proxy, proxy_Direct)
perHost.AddFromString(noProxy)
return perHost
}
// proxySchemes is a map from URL schemes to a function that creates a Dialer
// from a URL with such a scheme.
var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
// by FromURL.
func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
if proxy_proxySchemes == nil {
proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
}
proxy_proxySchemes[scheme] = f
}
// FromURL returns a Dialer given a URL specification and an underlying
// Dialer for it to make network requests.
func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
var auth *proxy_Auth
if u.User != nil {
auth = new(proxy_Auth)
auth.User = u.User.Username()
if p, ok := u.User.Password(); ok {
auth.Password = p
}
}
switch u.Scheme {
case "socks5":
return proxy_SOCKS5("tcp", u.Host, auth, forward)
}
// If the scheme doesn't match any of the built-in schemes, see if it
// was registered by another package.
if proxy_proxySchemes != nil {
if f, ok := proxy_proxySchemes[u.Scheme]; ok {
return f(u, forward)
}
}
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
}
var (
proxy_allProxyEnv = &proxy_envOnce{
names: []string{"ALL_PROXY", "all_proxy"},
}
proxy_noProxyEnv = &proxy_envOnce{
names: []string{"NO_PROXY", "no_proxy"},
}
)
// envOnce looks up an environment variable (optionally by multiple
// names) once. It mitigates expensive lookups on some platforms
// (e.g. Windows).
// (Borrowed from net/http/transport.go)
type proxy_envOnce struct {
names []string
once sync.Once
val string
}
func (e *proxy_envOnce) Get() string {
e.once.Do(e.init)
return e.val
}
func (e *proxy_envOnce) init() {
for _, n := range e.names {
e.val = os.Getenv(n)
if e.val != "" {
return
}
}
}
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
// with an optional username and password. See RFC 1928 and RFC 1929.
func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
s := &proxy_socks5{
network: network,
addr: addr,
forward: forward,
}
if auth != nil {
s.user = auth.User
s.password = auth.Password
}
return s, nil
}
type proxy_socks5 struct {
user, password string
network, addr string
forward proxy_Dialer
}
const proxy_socks5Version = 5
const (
proxy_socks5AuthNone = 0
proxy_socks5AuthPassword = 2
)
const proxy_socks5Connect = 1
const (
proxy_socks5IP4 = 1
proxy_socks5Domain = 3
proxy_socks5IP6 = 4
)
var proxy_socks5Errors = []string{
"",
"general failure",
"connection forbidden",
"network unreachable",
"host unreachable",
"connection refused",
"TTL expired",
"command not supported",
"address type not supported",
}
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
switch network {
case "tcp", "tcp6", "tcp4":
default:
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
}
conn, err := s.forward.Dial(s.network, s.addr)
if err != nil {
return nil, err
}
if err := s.connect(conn, addr); err != nil {
conn.Close()
return nil, err
}
return conn, nil
}
// connect takes an existing connection to a socks5 proxy server,
// and commands the server to extend that connection to target,
// which must be a canonical address with a host and port.
func (s *proxy_socks5) connect(conn net.Conn, target string) error {
host, portStr, err := net.SplitHostPort(target)
if err != nil {
return err
}
port, err := strconv.Atoi(portStr)
if err != nil {
return errors.New("proxy: failed to parse port number: " + portStr)
}
if port < 1 || port > 0xffff {
return errors.New("proxy: port number out of range: " + portStr)
}
// the size here is just an estimate
buf := make([]byte, 0, 6+len(host))
buf = append(buf, proxy_socks5Version)
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
} else {
buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
}
if _, err := conn.Write(buf); err != nil {
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
if buf[0] != 5 {
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
}
if buf[1] == 0xff {
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
}
// See RFC 1929
if buf[1] == proxy_socks5AuthPassword {
buf = buf[:0]
buf = append(buf, 1 /* password protocol version */)
buf = append(buf, uint8(len(s.user)))
buf = append(buf, s.user...)
buf = append(buf, uint8(len(s.password)))
buf = append(buf, s.password...)
if _, err := conn.Write(buf); err != nil {
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
if buf[1] != 0 {
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
}
}
buf = buf[:0]
buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
if ip := net.ParseIP(host); ip != nil {
if ip4 := ip.To4(); ip4 != nil {
buf = append(buf, proxy_socks5IP4)
ip = ip4
} else {
buf = append(buf, proxy_socks5IP6)
}
buf = append(buf, ip...)
} else {
if len(host) > 255 {
return errors.New("proxy: destination host name too long: " + host)
}
buf = append(buf, proxy_socks5Domain)
buf = append(buf, byte(len(host)))
buf = append(buf, host...)
}
buf = append(buf, byte(port>>8), byte(port))
if _, err := conn.Write(buf); err != nil {
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
failure := "unknown error"
if int(buf[1]) < len(proxy_socks5Errors) {
failure = proxy_socks5Errors[buf[1]]
}
if len(failure) > 0 {
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
}
bytesToDiscard := 0
switch buf[3] {
case proxy_socks5IP4:
bytesToDiscard = net.IPv4len
case proxy_socks5IP6:
bytesToDiscard = net.IPv6len
case proxy_socks5Domain:
_, err := io.ReadFull(conn, buf[:1])
if err != nil {
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
bytesToDiscard = int(buf[0])
default:
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
}
if cap(buf) < bytesToDiscard {
buf = make([]byte, bytesToDiscard)
} else {
buf = buf[:bytesToDiscard]
}
if _, err := io.ReadFull(conn, buf); err != nil {
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
// Also need to discard the port number
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
}
return nil
}

View File

@@ -1,18 +0,0 @@
sudo: false
language: go
go:
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12.x
- 1.13.x
- master
before_install:
- go get github.com/mattn/goveralls
script:
- go test -v -covermode=count -coverprofile=coverage.out
- go vet ./...
- test -z "$(gofmt -d -s . | tee /dev/stderr)"
- $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci

View File

@@ -1,29 +0,0 @@
BSD 3-Clause License
Copyright (c) 2013, Julien Schmidt
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,300 +0,0 @@
# HttpRouter [![Build Status](https://travis-ci.org/julienschmidt/httprouter.svg?branch=master)](https://travis-ci.org/julienschmidt/httprouter) [![Coverage Status](https://coveralls.io/repos/github/julienschmidt/httprouter/badge.svg?branch=master)](https://coveralls.io/github/julienschmidt/httprouter?branch=master) [![GoDoc](https://godoc.org/github.com/julienschmidt/httprouter?status.svg)](http://godoc.org/github.com/julienschmidt/httprouter)
HttpRouter is a lightweight high performance HTTP request router (also called *multiplexer* or just *mux* for short) for [Go](https://golang.org/).
In contrast to the [default mux](https://golang.org/pkg/net/http/#ServeMux) of Go's `net/http` package, this router supports variables in the routing pattern and matches against the request method. It also scales better.
The router is optimized for high performance and a small memory footprint. It scales well even with very long paths and a large number of routes. A compressing dynamic trie (radix tree) structure is used for efficient matching.
## Features
**Only explicit matches:** With other routers, like [`http.ServeMux`](https://golang.org/pkg/net/http/#ServeMux), a requested URL path could match multiple patterns. Therefore they have some awkward pattern priority rules, like *longest match* or *first registered, first matched*. By design of this router, a request can only match exactly one or no route. As a result, there are also no unintended matches, which makes it great for SEO and improves the user experience.
**Stop caring about trailing slashes:** Choose the URL style you like, the router automatically redirects the client if a trailing slash is missing or if there is one extra. Of course it only does so, if the new path has a handler. If you don't like it, you can [turn off this behavior](https://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash).
**Path auto-correction:** Besides detecting the missing or additional trailing slash at no extra cost, the router can also fix wrong cases and remove superfluous path elements (like `../` or `//`). Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users? HttpRouter can help him by making a case-insensitive look-up and redirecting him to the correct URL.
**Parameters in your routing pattern:** Stop parsing the requested URL path, just give the path segment a name and the router delivers the dynamic value to you. Because of the design of the router, path parameters are very cheap.
**Zero Garbage:** The matching and dispatching process generates zero bytes of garbage. The only heap allocations that are made are building the slice of the key-value pairs for path parameters, and building new context and request objects (the latter only in the standard `Handler`/`HandlerFunc` API). In the 3-argument API, if the request path contains no parameters not a single heap allocation is necessary.
**Best Performance:** [Benchmarks speak for themselves](https://github.com/julienschmidt/go-http-routing-benchmark). See below for technical details of the implementation.
**No more server crashes:** You can set a [Panic handler](https://godoc.org/github.com/julienschmidt/httprouter#Router.PanicHandler) to deal with panics occurring during handling a HTTP request. The router then recovers and lets the `PanicHandler` log what happened and deliver a nice error page.
**Perfect for APIs:** The router design encourages to build sensible, hierarchical RESTful APIs. Moreover it has built-in native support for [OPTIONS requests](http://zacstewart.com/2012/04/14/http-options-method.html) and `405 Method Not Allowed` replies.
Of course you can also set **custom [`NotFound`](https://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) and [`MethodNotAllowed`](https://godoc.org/github.com/julienschmidt/httprouter#Router.MethodNotAllowed) handlers** and [**serve static files**](https://godoc.org/github.com/julienschmidt/httprouter#Router.ServeFiles).
## Usage
This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/julienschmidt/httprouter) for details.
Let's start with a trivial example:
```go
package main
import (
"fmt"
"net/http"
"log"
"github.com/julienschmidt/httprouter"
)
func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
fmt.Fprint(w, "Welcome!\n")
}
func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name"))
}
func main() {
router := httprouter.New()
router.GET("/", Index)
router.GET("/hello/:name", Hello)
log.Fatal(http.ListenAndServe(":8080", router))
}
```
### Named parameters
As you can see, `:name` is a *named parameter*. The values are accessible via `httprouter.Params`, which is just a slice of `httprouter.Param`s. You can get the value of a parameter either by its index in the slice, or by using the `ByName(name)` method: `:name` can be retrieved by `ByName("name")`.
When using a `http.Handler` (using `router.Handler` or `http.HandlerFunc`) instead of HttpRouter's handle API using a 3rd function parameter, the named parameters are stored in the `request.Context`. See more below under [Why doesn't this work with http.Handler?](#why-doesnt-this-work-with-httphandler).
Named parameters only match a single path segment:
```
Pattern: /user/:user
/user/gordon match
/user/you match
/user/gordon/profile no match
/user/ no match
```
**Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other.
### Catch-All parameters
The second type are *catch-all* parameters and have the form `*name`. Like the name suggests, they match everything. Therefore they must always be at the **end** of the pattern:
```
Pattern: /src/*filepath
/src/ match
/src/somefile.go match
/src/subdir/somefile.go match
```
## How does it work?
The router relies on a tree structure which makes heavy use of *common prefixes*, it is basically a *compact* [*prefix tree*](https://en.wikipedia.org/wiki/Trie) (or just [*Radix tree*](https://en.wikipedia.org/wiki/Radix_tree)). Nodes with a common prefix also share a common parent. Here is a short example what the routing tree for the `GET` request method could look like:
```
Priority Path Handle
9 \ *<1>
3 ├s nil
2 |├earch\ *<2>
1 |└upport\ *<3>
2 ├blog\ *<4>
1 | └:post nil
1 | └\ *<5>
2 ├about-us\ *<6>
1 | └team\ *<7>
1 └contact\ *<8>
```
Every `*<num>` represents the memory address of a handler function (a pointer). If you follow a path trough the tree from the root to the leaf, you get the complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder ([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a tree structure also allows us to use dynamic parts like the `:post` parameter, since we actually match against the routing patterns instead of just comparing hashes. [As benchmarks show](https://github.com/julienschmidt/go-http-routing-benchmark), this works very well and efficient.
Since URL paths have a hierarchical structure and make use only of a limited set of characters (byte values), it is very likely that there are a lot of common prefixes. This allows us to easily reduce the routing into ever smaller problems. Moreover the router manages a separate tree for every request method. For one thing it is more space efficient than holding a method->handle map in every single node, it also allows us to greatly reduce the routing problem before even starting the look-up in the prefix-tree.
For even better scalability, the child nodes on each tree level are ordered by priority, where the priority is just the number of handles registered in sub nodes (children, grandchildren, and so on..). This helps in two ways:
1. Nodes which are part of the most routing paths are evaluated first. This helps to make as much routes as possible to be reachable as fast as possible.
2. It is some sort of cost compensation. The longest reachable path (highest cost) can always be evaluated first. The following scheme visualizes the tree structure. Nodes are evaluated from top to bottom and from left to right.
```
├------------
├---------
├-----
├----
├--
├--
└-
```
## Why doesn't this work with `http.Handler`?
**It does!** The router itself implements the `http.Handler` interface. Moreover the router provides convenient [adapters for `http.Handler`](https://godoc.org/github.com/julienschmidt/httprouter#Router.Handler)s and [`http.HandlerFunc`](https://godoc.org/github.com/julienschmidt/httprouter#Router.HandlerFunc)s which allows them to be used as a [`httprouter.Handle`](https://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) when registering a route.
Named parameters can be accessed `request.Context`:
```go
func Hello(w http.ResponseWriter, r *http.Request) {
params := httprouter.ParamsFromContext(r.Context())
fmt.Fprintf(w, "hello, %s!\n", params.ByName("name"))
}
```
Alternatively, one can also use `params := r.Context().Value(httprouter.ParamsKey)` instead of the helper function.
Just try it out for yourself, the usage of HttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up.
## Automatic OPTIONS responses and CORS
One might wish to modify automatic responses to OPTIONS requests, e.g. to support [CORS preflight requests](https://developer.mozilla.org/en-US/docs/Glossary/preflight_request) or to set other headers.
This can be achieved using the [`Router.GlobalOPTIONS`](https://godoc.org/github.com/julienschmidt/httprouter#Router.GlobalOPTIONS) handler:
```go
router.GlobalOPTIONS = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Access-Control-Request-Method") != "" {
// Set CORS headers
header := w.Header()
header.Set("Access-Control-Allow-Methods", r.Header.Get("Allow"))
header.Set("Access-Control-Allow-Origin", "*")
}
// Adjust status code to 204
w.WriteHeader(http.StatusNoContent)
})
```
## Where can I find Middleware *X*?
This package just provides a very efficient request router with a few extra features. The router is just a [`http.Handler`](https://golang.org/pkg/net/http/#Handler), you can chain any http.Handler compatible middleware before the router, for example the [Gorilla handlers](http://www.gorillatoolkit.org/pkg/handlers). Or you could [just write your own](https://justinas.org/writing-http-middleware-in-go/), it's very easy!
Alternatively, you could try [a web framework based on HttpRouter](#web-frameworks-based-on-httprouter).
### Multi-domain / Sub-domains
Here is a quick example: Does your server serve multiple domains / hosts?
You want to use sub-domains?
Define a router per host!
```go
// We need an object that implements the http.Handler interface.
// Therefore we need a type for which we implement the ServeHTTP method.
// We just use a map here, in which we map host names (with port) to http.Handlers
type HostSwitch map[string]http.Handler
// Implement the ServeHTTP method on our new type
func (hs HostSwitch) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Check if a http.Handler is registered for the given host.
// If yes, use it to handle the request.
if handler := hs[r.Host]; handler != nil {
handler.ServeHTTP(w, r)
} else {
// Handle host names for which no handler is registered
http.Error(w, "Forbidden", 403) // Or Redirect?
}
}
func main() {
// Initialize a router as usual
router := httprouter.New()
router.GET("/", Index)
router.GET("/hello/:name", Hello)
// Make a new HostSwitch and insert the router (our http handler)
// for example.com and port 12345
hs := make(HostSwitch)
hs["example.com:12345"] = router
// Use the HostSwitch to listen and serve on port 12345
log.Fatal(http.ListenAndServe(":12345", hs))
}
```
### Basic Authentication
Another quick example: Basic Authentication (RFC 2617) for handles:
```go
package main
import (
"fmt"
"log"
"net/http"
"github.com/julienschmidt/httprouter"
)
func BasicAuth(h httprouter.Handle, requiredUser, requiredPassword string) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
// Get the Basic Authentication credentials
user, password, hasAuth := r.BasicAuth()
if hasAuth && user == requiredUser && password == requiredPassword {
// Delegate request to the given handle
h(w, r, ps)
} else {
// Request Basic Authentication otherwise
w.Header().Set("WWW-Authenticate", "Basic realm=Restricted")
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
}
}
}
func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
fmt.Fprint(w, "Not protected!\n")
}
func Protected(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
fmt.Fprint(w, "Protected!\n")
}
func main() {
user := "gordon"
pass := "secret!"
router := httprouter.New()
router.GET("/", Index)
router.GET("/protected/", BasicAuth(Protected, user, pass))
log.Fatal(http.ListenAndServe(":8080", router))
}
```
## Chaining with the NotFound handler
**NOTE: It might be required to set [`Router.HandleMethodNotAllowed`](https://godoc.org/github.com/julienschmidt/httprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.**
You can use another [`http.Handler`](https://golang.org/pkg/net/http/#Handler), for example another router, to handle requests which could not be matched by this router by using the [`Router.NotFound`](https://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) handler. This allows chaining.
### Static files
The `NotFound` handler can for example be used to serve static files from the root path `/` (like an `index.html` file along with other assets):
```go
// Serve static files from the ./public directory
router.NotFound = http.FileServer(http.Dir("public"))
```
But this approach sidesteps the strict core rules of this router to avoid routing problems. A cleaner approach is to use a distinct sub-path for serving files, like `/static/*filepath` or `/files/*filepath`.
## Web Frameworks based on HttpRouter
If the HttpRouter is a bit too minimalistic for you, you might try one of the following more high-level 3rd-party web frameworks building upon the HttpRouter package:
* [Ace](https://github.com/plimble/ace): Blazing fast Go Web Framework
* [api2go](https://github.com/manyminds/api2go): A JSON API Implementation for Go
* [Gin](https://github.com/gin-gonic/gin): Features a martini-like API with much better performance
* [Goat](https://github.com/bahlo/goat): A minimalistic REST API server in Go
* [goMiddlewareChain](https://github.com/TobiEiss/goMiddlewareChain): An express.js-like-middleware-chain
* [Hikaru](https://github.com/najeira/hikaru): Supports standalone and Google AppEngine
* [Hitch](https://github.com/nbio/hitch): Hitch ties httprouter, [httpcontext](https://github.com/nbio/httpcontext), and middleware up in a bow
* [httpway](https://github.com/corneldamian/httpway): Simple middleware extension with context for httprouter and a server with gracefully shutdown support
* [kami](https://github.com/guregu/kami): A tiny web framework using x/net/context
* [Medeina](https://github.com/imdario/medeina): Inspired by Ruby's Roda and Cuba
* [Neko](https://github.com/rocwong/neko): A lightweight web application framework for Golang
* [pbgo](https://github.com/chai2010/pbgo): pbgo is a mini RPC/REST framework based on Protobuf
* [River](https://github.com/abiosoft/river): River is a simple and lightweight REST server
* [siesta](https://github.com/VividCortex/siesta): Composable HTTP handlers with contexts
* [xmux](https://github.com/rs/xmux): xmux is a httprouter fork on top of xhandler (net/context aware)

View File

@@ -1,3 +0,0 @@
module github.com/julienschmidt/httprouter
go 1.7

View File

@@ -1,123 +0,0 @@
// Copyright 2013 Julien Schmidt. All rights reserved.
// Based on the path package, Copyright 2009 The Go Authors.
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package httprouter
// CleanPath is the URL version of path.Clean, it returns a canonical URL path
// for p, eliminating . and .. elements.
//
// The following rules are applied iteratively until no further processing can
// be done:
// 1. Replace multiple slashes with a single slash.
// 2. Eliminate each . path name element (the current directory).
// 3. Eliminate each inner .. path name element (the parent directory)
// along with the non-.. element that precedes it.
// 4. Eliminate .. elements that begin a rooted path:
// that is, replace "/.." by "/" at the beginning of a path.
//
// If the result of this process is an empty string, "/" is returned
func CleanPath(p string) string {
// Turn empty string into "/"
if p == "" {
return "/"
}
n := len(p)
var buf []byte
// Invariants:
// reading from path; r is index of next byte to process.
// writing to buf; w is index of next byte to write.
// path must start with '/'
r := 1
w := 1
if p[0] != '/' {
r = 0
buf = make([]byte, n+1)
buf[0] = '/'
}
trailing := n > 1 && p[n-1] == '/'
// A bit more clunky without a 'lazybuf' like the path package, but the loop
// gets completely inlined (bufApp). So in contrast to the path package this
// loop has no expensive function calls (except 1x make)
for r < n {
switch {
case p[r] == '/':
// empty path element, trailing slash is added after the end
r++
case p[r] == '.' && r+1 == n:
trailing = true
r++
case p[r] == '.' && p[r+1] == '/':
// . element
r += 2
case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'):
// .. element: remove to last /
r += 3
if w > 1 {
// can backtrack
w--
if buf == nil {
for w > 1 && p[w] != '/' {
w--
}
} else {
for w > 1 && buf[w] != '/' {
w--
}
}
}
default:
// real path element.
// add slash if needed
if w > 1 {
bufApp(&buf, p, w, '/')
w++
}
// copy element
for r < n && p[r] != '/' {
bufApp(&buf, p, w, p[r])
w++
r++
}
}
}
// re-append trailing slash
if trailing && w > 1 {
bufApp(&buf, p, w, '/')
w++
}
if buf == nil {
return p[:w]
}
return string(buf[:w])
}
// internal helper to lazily create a buffer if necessary
func bufApp(buf *[]byte, s string, w int, c byte) {
if *buf == nil {
if s[w] == c {
return
}
*buf = make([]byte, len(s))
copy(*buf, s[:w])
}
(*buf)[w] = c
}

View File

@@ -1,452 +0,0 @@
// Copyright 2013 Julien Schmidt. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
// Package httprouter is a trie based high performance HTTP request router.
//
// A trivial example is:
//
// package main
//
// import (
// "fmt"
// "github.com/julienschmidt/httprouter"
// "net/http"
// "log"
// )
//
// func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
// fmt.Fprint(w, "Welcome!\n")
// }
//
// func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
// fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name"))
// }
//
// func main() {
// router := httprouter.New()
// router.GET("/", Index)
// router.GET("/hello/:name", Hello)
//
// log.Fatal(http.ListenAndServe(":8080", router))
// }
//
// The router matches incoming requests by the request method and the path.
// If a handle is registered for this path and method, the router delegates the
// request to that function.
// For the methods GET, POST, PUT, PATCH and DELETE shortcut functions exist to
// register handles, for all other methods router.Handle can be used.
//
// The registered path, against which the router matches incoming requests, can
// contain two types of parameters:
// Syntax Type
// :name named parameter
// *name catch-all parameter
//
// Named parameters are dynamic path segments. They match anything until the
// next '/' or the path end:
// Path: /blog/:category/:post
//
// Requests:
// /blog/go/request-routers match: category="go", post="request-routers"
// /blog/go/request-routers/ no match, but the router would redirect
// /blog/go/ no match
// /blog/go/request-routers/comments no match
//
// Catch-all parameters match anything until the path end, including the
// directory index (the '/' before the catch-all). Since they match anything
// until the end, catch-all parameters must always be the final path element.
// Path: /files/*filepath
//
// Requests:
// /files/ match: filepath="/"
// /files/LICENSE match: filepath="/LICENSE"
// /files/templates/article.html match: filepath="/templates/article.html"
// /files no match, but the router would redirect
//
// The value of parameters is saved as a slice of the Param struct, consisting
// each of a key and a value. The slice is passed to the Handle func as a third
// parameter.
// There are two ways to retrieve the value of a parameter:
// // by the name of the parameter
// user := ps.ByName("user") // defined by :user or *user
//
// // by the index of the parameter. This way you can also get the name (key)
// thirdKey := ps[2].Key // the name of the 3rd parameter
// thirdValue := ps[2].Value // the value of the 3rd parameter
package httprouter
import (
"context"
"net/http"
"strings"
)
// Handle is a function that can be registered to a route to handle HTTP
// requests. Like http.HandlerFunc, but has a third parameter for the values of
// wildcards (variables).
type Handle func(http.ResponseWriter, *http.Request, Params)
// Param is a single URL parameter, consisting of a key and a value.
type Param struct {
Key string
Value string
}
// Params is a Param-slice, as returned by the router.
// The slice is ordered, the first URL parameter is also the first slice value.
// It is therefore safe to read values by the index.
type Params []Param
// ByName returns the value of the first Param which key matches the given name.
// If no matching Param is found, an empty string is returned.
func (ps Params) ByName(name string) string {
for i := range ps {
if ps[i].Key == name {
return ps[i].Value
}
}
return ""
}
type paramsKey struct{}
// ParamsKey is the request context key under which URL params are stored.
var ParamsKey = paramsKey{}
// ParamsFromContext pulls the URL parameters from a request context,
// or returns nil if none are present.
func ParamsFromContext(ctx context.Context) Params {
p, _ := ctx.Value(ParamsKey).(Params)
return p
}
// Router is a http.Handler which can be used to dispatch requests to different
// handler functions via configurable routes
type Router struct {
trees map[string]*node
// Enables automatic redirection if the current route can't be matched but a
// handler for the path with (without) the trailing slash exists.
// For example if /foo/ is requested but a route only exists for /foo, the
// client is redirected to /foo with http status code 301 for GET requests
// and 307 for all other request methods.
RedirectTrailingSlash bool
// If enabled, the router tries to fix the current request path, if no
// handle is registered for it.
// First superfluous path elements like ../ or // are removed.
// Afterwards the router does a case-insensitive lookup of the cleaned path.
// If a handle can be found for this route, the router makes a redirection
// to the corrected path with status code 301 for GET requests and 307 for
// all other request methods.
// For example /FOO and /..//Foo could be redirected to /foo.
// RedirectTrailingSlash is independent of this option.
RedirectFixedPath bool
// If enabled, the router checks if another method is allowed for the
// current route, if the current request can not be routed.
// If this is the case, the request is answered with 'Method Not Allowed'
// and HTTP status code 405.
// If no other Method is allowed, the request is delegated to the NotFound
// handler.
HandleMethodNotAllowed bool
// If enabled, the router automatically replies to OPTIONS requests.
// Custom OPTIONS handlers take priority over automatic replies.
HandleOPTIONS bool
// An optional http.Handler that is called on automatic OPTIONS requests.
// The handler is only called if HandleOPTIONS is true and no OPTIONS
// handler for the specific path was set.
// The "Allowed" header is set before calling the handler.
GlobalOPTIONS http.Handler
// Cached value of global (*) allowed methods
globalAllowed string
// Configurable http.Handler which is called when no matching route is
// found. If it is not set, http.NotFound is used.
NotFound http.Handler
// Configurable http.Handler which is called when a request
// cannot be routed and HandleMethodNotAllowed is true.
// If it is not set, http.Error with http.StatusMethodNotAllowed is used.
// The "Allow" header with allowed request methods is set before the handler
// is called.
MethodNotAllowed http.Handler
// Function to handle panics recovered from http handlers.
// It should be used to generate a error page and return the http error code
// 500 (Internal Server Error).
// The handler can be used to keep your server from crashing because of
// unrecovered panics.
PanicHandler func(http.ResponseWriter, *http.Request, interface{})
}
// Make sure the Router conforms with the http.Handler interface
var _ http.Handler = New()
// New returns a new initialized Router.
// Path auto-correction, including trailing slashes, is enabled by default.
func New() *Router {
return &Router{
RedirectTrailingSlash: true,
RedirectFixedPath: true,
HandleMethodNotAllowed: true,
HandleOPTIONS: true,
}
}
// GET is a shortcut for router.Handle(http.MethodGet, path, handle)
func (r *Router) GET(path string, handle Handle) {
r.Handle(http.MethodGet, path, handle)
}
// HEAD is a shortcut for router.Handle(http.MethodHead, path, handle)
func (r *Router) HEAD(path string, handle Handle) {
r.Handle(http.MethodHead, path, handle)
}
// OPTIONS is a shortcut for router.Handle(http.MethodOptions, path, handle)
func (r *Router) OPTIONS(path string, handle Handle) {
r.Handle(http.MethodOptions, path, handle)
}
// POST is a shortcut for router.Handle(http.MethodPost, path, handle)
func (r *Router) POST(path string, handle Handle) {
r.Handle(http.MethodPost, path, handle)
}
// PUT is a shortcut for router.Handle(http.MethodPut, path, handle)
func (r *Router) PUT(path string, handle Handle) {
r.Handle(http.MethodPut, path, handle)
}
// PATCH is a shortcut for router.Handle(http.MethodPatch, path, handle)
func (r *Router) PATCH(path string, handle Handle) {
r.Handle(http.MethodPatch, path, handle)
}
// DELETE is a shortcut for router.Handle(http.MethodDelete, path, handle)
func (r *Router) DELETE(path string, handle Handle) {
r.Handle(http.MethodDelete, path, handle)
}
// Handle registers a new request handle with the given path and method.
//
// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut
// functions can be used.
//
// This function is intended for bulk loading and to allow the usage of less
// frequently used, non-standardized or custom methods (e.g. for internal
// communication with a proxy).
func (r *Router) Handle(method, path string, handle Handle) {
if len(path) < 1 || path[0] != '/' {
panic("path must begin with '/' in path '" + path + "'")
}
if r.trees == nil {
r.trees = make(map[string]*node)
}
root := r.trees[method]
if root == nil {
root = new(node)
r.trees[method] = root
r.globalAllowed = r.allowed("*", "")
}
root.addRoute(path, handle)
}
// Handler is an adapter which allows the usage of an http.Handler as a
// request handle.
// The Params are available in the request context under ParamsKey.
func (r *Router) Handler(method, path string, handler http.Handler) {
r.Handle(method, path,
func(w http.ResponseWriter, req *http.Request, p Params) {
if len(p) > 0 {
ctx := req.Context()
ctx = context.WithValue(ctx, ParamsKey, p)
req = req.WithContext(ctx)
}
handler.ServeHTTP(w, req)
},
)
}
// HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a
// request handle.
func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) {
r.Handler(method, path, handler)
}
// ServeFiles serves files from the given file system root.
// The path must end with "/*filepath", files are then served from the local
// path /defined/root/dir/*filepath.
// For example if root is "/etc" and *filepath is "passwd", the local file
// "/etc/passwd" would be served.
// Internally a http.FileServer is used, therefore http.NotFound is used instead
// of the Router's NotFound handler.
// To use the operating system's file system implementation,
// use http.Dir:
// router.ServeFiles("/src/*filepath", http.Dir("/var/www"))
func (r *Router) ServeFiles(path string, root http.FileSystem) {
if len(path) < 10 || path[len(path)-10:] != "/*filepath" {
panic("path must end with /*filepath in path '" + path + "'")
}
fileServer := http.FileServer(root)
r.GET(path, func(w http.ResponseWriter, req *http.Request, ps Params) {
req.URL.Path = ps.ByName("filepath")
fileServer.ServeHTTP(w, req)
})
}
func (r *Router) recv(w http.ResponseWriter, req *http.Request) {
if rcv := recover(); rcv != nil {
r.PanicHandler(w, req, rcv)
}
}
// Lookup allows the manual lookup of a method + path combo.
// This is e.g. useful to build a framework around this router.
// If the path was found, it returns the handle function and the path parameter
// values. Otherwise the third return value indicates whether a redirection to
// the same path with an extra / without the trailing slash should be performed.
func (r *Router) Lookup(method, path string) (Handle, Params, bool) {
if root := r.trees[method]; root != nil {
return root.getValue(path)
}
return nil, nil, false
}
func (r *Router) allowed(path, reqMethod string) (allow string) {
allowed := make([]string, 0, 9)
if path == "*" { // server-wide
// empty method is used for internal calls to refresh the cache
if reqMethod == "" {
for method := range r.trees {
if method == http.MethodOptions {
continue
}
// Add request method to list of allowed methods
allowed = append(allowed, method)
}
} else {
return r.globalAllowed
}
} else { // specific path
for method := range r.trees {
// Skip the requested method - we already tried this one
if method == reqMethod || method == http.MethodOptions {
continue
}
handle, _, _ := r.trees[method].getValue(path)
if handle != nil {
// Add request method to list of allowed methods
allowed = append(allowed, method)
}
}
}
if len(allowed) > 0 {
// Add request method to list of allowed methods
allowed = append(allowed, http.MethodOptions)
// Sort allowed methods.
// sort.Strings(allowed) unfortunately causes unnecessary allocations
// due to allowed being moved to the heap and interface conversion
for i, l := 1, len(allowed); i < l; i++ {
for j := i; j > 0 && allowed[j] < allowed[j-1]; j-- {
allowed[j], allowed[j-1] = allowed[j-1], allowed[j]
}
}
// return as comma separated list
return strings.Join(allowed, ", ")
}
return
}
// ServeHTTP makes the router implement the http.Handler interface.
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if r.PanicHandler != nil {
defer r.recv(w, req)
}
path := req.URL.Path
if root := r.trees[req.Method]; root != nil {
if handle, ps, tsr := root.getValue(path); handle != nil {
handle(w, req, ps)
return
} else if req.Method != http.MethodConnect && path != "/" {
code := 301 // Permanent redirect, request with GET method
if req.Method != http.MethodGet {
// Temporary redirect, request with same method
// As of Go 1.3, Go does not support status code 308.
code = 307
}
if tsr && r.RedirectTrailingSlash {
if len(path) > 1 && path[len(path)-1] == '/' {
req.URL.Path = path[:len(path)-1]
} else {
req.URL.Path = path + "/"
}
http.Redirect(w, req, req.URL.String(), code)
return
}
// Try to fix the request path
if r.RedirectFixedPath {
fixedPath, found := root.findCaseInsensitivePath(
CleanPath(path),
r.RedirectTrailingSlash,
)
if found {
req.URL.Path = string(fixedPath)
http.Redirect(w, req, req.URL.String(), code)
return
}
}
}
}
if req.Method == http.MethodOptions && r.HandleOPTIONS {
// Handle OPTIONS requests
if allow := r.allowed(path, http.MethodOptions); allow != "" {
w.Header().Set("Allow", allow)
if r.GlobalOPTIONS != nil {
r.GlobalOPTIONS.ServeHTTP(w, req)
}
return
}
} else if r.HandleMethodNotAllowed { // Handle 405
if allow := r.allowed(path, req.Method); allow != "" {
w.Header().Set("Allow", allow)
if r.MethodNotAllowed != nil {
r.MethodNotAllowed.ServeHTTP(w, req)
} else {
http.Error(w,
http.StatusText(http.StatusMethodNotAllowed),
http.StatusMethodNotAllowed,
)
}
return
}
}
// Handle 404
if r.NotFound != nil {
r.NotFound.ServeHTTP(w, req)
} else {
http.NotFound(w, req)
}
}

View File

@@ -1,666 +0,0 @@
// Copyright 2013 Julien Schmidt. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package httprouter
import (
"strings"
"unicode"
"unicode/utf8"
)
func min(a, b int) int {
if a <= b {
return a
}
return b
}
const maxParamCount uint8 = ^uint8(0)
func countParams(path string) uint8 {
var n uint
for i := 0; i < len(path); i++ {
if path[i] != ':' && path[i] != '*' {
continue
}
n++
}
if n >= uint(maxParamCount) {
return maxParamCount
}
return uint8(n)
}
type nodeType uint8
const (
static nodeType = iota // default
root
param
catchAll
)
type node struct {
path string
wildChild bool
nType nodeType
maxParams uint8
priority uint32
indices string
children []*node
handle Handle
}
// increments priority of the given child and reorders if necessary
func (n *node) incrementChildPrio(pos int) int {
n.children[pos].priority++
prio := n.children[pos].priority
// adjust position (move to front)
newPos := pos
for newPos > 0 && n.children[newPos-1].priority < prio {
// swap node positions
n.children[newPos-1], n.children[newPos] = n.children[newPos], n.children[newPos-1]
newPos--
}
// build new index char string
if newPos != pos {
n.indices = n.indices[:newPos] + // unchanged prefix, might be empty
n.indices[pos:pos+1] + // the index char we move
n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos'
}
return newPos
}
// addRoute adds a node with the given handle to the path.
// Not concurrency-safe!
func (n *node) addRoute(path string, handle Handle) {
fullPath := path
n.priority++
numParams := countParams(path)
// non-empty tree
if len(n.path) > 0 || len(n.children) > 0 {
walk:
for {
// Update maxParams of the current node
if numParams > n.maxParams {
n.maxParams = numParams
}
// Find the longest common prefix.
// This also implies that the common prefix contains no ':' or '*'
// since the existing key can't contain those chars.
i := 0
max := min(len(path), len(n.path))
for i < max && path[i] == n.path[i] {
i++
}
// Split edge
if i < len(n.path) {
child := node{
path: n.path[i:],
wildChild: n.wildChild,
nType: static,
indices: n.indices,
children: n.children,
handle: n.handle,
priority: n.priority - 1,
}
// Update maxParams (max of all children)
for i := range child.children {
if child.children[i].maxParams > child.maxParams {
child.maxParams = child.children[i].maxParams
}
}
n.children = []*node{&child}
// []byte for proper unicode char conversion, see #65
n.indices = string([]byte{n.path[i]})
n.path = path[:i]
n.handle = nil
n.wildChild = false
}
// Make new node a child of this node
if i < len(path) {
path = path[i:]
if n.wildChild {
n = n.children[0]
n.priority++
// Update maxParams of the child node
if numParams > n.maxParams {
n.maxParams = numParams
}
numParams--
// Check if the wildcard matches
if len(path) >= len(n.path) && n.path == path[:len(n.path)] &&
// Adding a child to a catchAll is not possible
n.nType != catchAll &&
// Check for longer wildcard, e.g. :name and :names
(len(n.path) >= len(path) || path[len(n.path)] == '/') {
continue walk
} else {
// Wildcard conflict
var pathSeg string
if n.nType == catchAll {
pathSeg = path
} else {
pathSeg = strings.SplitN(path, "/", 2)[0]
}
prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path
panic("'" + pathSeg +
"' in new path '" + fullPath +
"' conflicts with existing wildcard '" + n.path +
"' in existing prefix '" + prefix +
"'")
}
}
c := path[0]
// slash after param
if n.nType == param && c == '/' && len(n.children) == 1 {
n = n.children[0]
n.priority++
continue walk
}
// Check if a child with the next path byte exists
for i := 0; i < len(n.indices); i++ {
if c == n.indices[i] {
i = n.incrementChildPrio(i)
n = n.children[i]
continue walk
}
}
// Otherwise insert it
if c != ':' && c != '*' {
// []byte for proper unicode char conversion, see #65
n.indices += string([]byte{c})
child := &node{
maxParams: numParams,
}
n.children = append(n.children, child)
n.incrementChildPrio(len(n.indices) - 1)
n = child
}
n.insertChild(numParams, path, fullPath, handle)
return
} else if i == len(path) { // Make node a (in-path) leaf
if n.handle != nil {
panic("a handle is already registered for path '" + fullPath + "'")
}
n.handle = handle
}
return
}
} else { // Empty tree
n.insertChild(numParams, path, fullPath, handle)
n.nType = root
}
}
func (n *node) insertChild(numParams uint8, path, fullPath string, handle Handle) {
var offset int // already handled bytes of the path
// find prefix until first wildcard (beginning with ':'' or '*'')
for i, max := 0, len(path); numParams > 0; i++ {
c := path[i]
if c != ':' && c != '*' {
continue
}
// find wildcard end (either '/' or path end)
end := i + 1
for end < max && path[end] != '/' {
switch path[end] {
// the wildcard name must not contain ':' and '*'
case ':', '*':
panic("only one wildcard per path segment is allowed, has: '" +
path[i:] + "' in path '" + fullPath + "'")
default:
end++
}
}
// check if this Node existing children which would be
// unreachable if we insert the wildcard here
if len(n.children) > 0 {
panic("wildcard route '" + path[i:end] +
"' conflicts with existing children in path '" + fullPath + "'")
}
// check if the wildcard has a name
if end-i < 2 {
panic("wildcards must be named with a non-empty name in path '" + fullPath + "'")
}
if c == ':' { // param
// split path at the beginning of the wildcard
if i > 0 {
n.path = path[offset:i]
offset = i
}
child := &node{
nType: param,
maxParams: numParams,
}
n.children = []*node{child}
n.wildChild = true
n = child
n.priority++
numParams--
// if the path doesn't end with the wildcard, then there
// will be another non-wildcard subpath starting with '/'
if end < max {
n.path = path[offset:end]
offset = end
child := &node{
maxParams: numParams,
priority: 1,
}
n.children = []*node{child}
n = child
}
} else { // catchAll
if end != max || numParams > 1 {
panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'")
}
if len(n.path) > 0 && n.path[len(n.path)-1] == '/' {
panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'")
}
// currently fixed width 1 for '/'
i--
if path[i] != '/' {
panic("no / before catch-all in path '" + fullPath + "'")
}
n.path = path[offset:i]
// first node: catchAll node with empty path
child := &node{
wildChild: true,
nType: catchAll,
maxParams: 1,
}
// update maxParams of the parent node
if n.maxParams < 1 {
n.maxParams = 1
}
n.children = []*node{child}
n.indices = string(path[i])
n = child
n.priority++
// second node: node holding the variable
child = &node{
path: path[i:],
nType: catchAll,
maxParams: 1,
handle: handle,
priority: 1,
}
n.children = []*node{child}
return
}
}
// insert remaining path part and handle to the leaf
n.path = path[offset:]
n.handle = handle
}
// Returns the handle registered with the given path (key). The values of
// wildcards are saved to a map.
// If no handle can be found, a TSR (trailing slash redirect) recommendation is
// made if a handle exists with an extra (without the) trailing slash for the
// given path.
func (n *node) getValue(path string) (handle Handle, p Params, tsr bool) {
walk: // outer loop for walking the tree
for {
if len(path) > len(n.path) {
if path[:len(n.path)] == n.path {
path = path[len(n.path):]
// If this node does not have a wildcard (param or catchAll)
// child, we can just look up the next child node and continue
// to walk down the tree
if !n.wildChild {
c := path[0]
for i := 0; i < len(n.indices); i++ {
if c == n.indices[i] {
n = n.children[i]
continue walk
}
}
// Nothing found.
// We can recommend to redirect to the same URL without a
// trailing slash if a leaf exists for that path.
tsr = (path == "/" && n.handle != nil)
return
}
// handle wildcard child
n = n.children[0]
switch n.nType {
case param:
// find param end (either '/' or path end)
end := 0
for end < len(path) && path[end] != '/' {
end++
}
// save param value
if p == nil {
// lazy allocation
p = make(Params, 0, n.maxParams)
}
i := len(p)
p = p[:i+1] // expand slice within preallocated capacity
p[i].Key = n.path[1:]
p[i].Value = path[:end]
// we need to go deeper!
if end < len(path) {
if len(n.children) > 0 {
path = path[end:]
n = n.children[0]
continue walk
}
// ... but we can't
tsr = (len(path) == end+1)
return
}
if handle = n.handle; handle != nil {
return
} else if len(n.children) == 1 {
// No handle found. Check if a handle for this path + a
// trailing slash exists for TSR recommendation
n = n.children[0]
tsr = (n.path == "/" && n.handle != nil)
}
return
case catchAll:
// save param value
if p == nil {
// lazy allocation
p = make(Params, 0, n.maxParams)
}
i := len(p)
p = p[:i+1] // expand slice within preallocated capacity
p[i].Key = n.path[2:]
p[i].Value = path
handle = n.handle
return
default:
panic("invalid node type")
}
}
} else if path == n.path {
// We should have reached the node containing the handle.
// Check if this node has a handle registered.
if handle = n.handle; handle != nil {
return
}
if path == "/" && n.wildChild && n.nType != root {
tsr = true
return
}
// No handle found. Check if a handle for this path + a
// trailing slash exists for trailing slash recommendation
for i := 0; i < len(n.indices); i++ {
if n.indices[i] == '/' {
n = n.children[i]
tsr = (len(n.path) == 1 && n.handle != nil) ||
(n.nType == catchAll && n.children[0].handle != nil)
return
}
}
return
}
// Nothing found. We can recommend to redirect to the same URL with an
// extra trailing slash if a leaf exists for that path
tsr = (path == "/") ||
(len(n.path) == len(path)+1 && n.path[len(path)] == '/' &&
path == n.path[:len(n.path)-1] && n.handle != nil)
return
}
}
// Makes a case-insensitive lookup of the given path and tries to find a handler.
// It can optionally also fix trailing slashes.
// It returns the case-corrected path and a bool indicating whether the lookup
// was successful.
func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) {
return n.findCaseInsensitivePathRec(
path,
make([]byte, 0, len(path)+1), // preallocate enough memory for new path
[4]byte{}, // empty rune buffer
fixTrailingSlash,
)
}
// shift bytes in array by n bytes left
func shiftNRuneBytes(rb [4]byte, n int) [4]byte {
switch n {
case 0:
return rb
case 1:
return [4]byte{rb[1], rb[2], rb[3], 0}
case 2:
return [4]byte{rb[2], rb[3]}
case 3:
return [4]byte{rb[3]}
default:
return [4]byte{}
}
}
// recursive case-insensitive lookup function used by n.findCaseInsensitivePath
func (n *node) findCaseInsensitivePathRec(path string, ciPath []byte, rb [4]byte, fixTrailingSlash bool) ([]byte, bool) {
npLen := len(n.path)
walk: // outer loop for walking the tree
for len(path) >= npLen && (npLen == 0 || strings.EqualFold(path[1:npLen], n.path[1:])) {
// add common prefix to result
oldPath := path
path = path[npLen:]
ciPath = append(ciPath, n.path...)
if len(path) > 0 {
// If this node does not have a wildcard (param or catchAll) child,
// we can just look up the next child node and continue to walk down
// the tree
if !n.wildChild {
// skip rune bytes already processed
rb = shiftNRuneBytes(rb, npLen)
if rb[0] != 0 {
// old rune not finished
for i := 0; i < len(n.indices); i++ {
if n.indices[i] == rb[0] {
// continue with child node
n = n.children[i]
npLen = len(n.path)
continue walk
}
}
} else {
// process a new rune
var rv rune
// find rune start
// runes are up to 4 byte long,
// -4 would definitely be another rune
var off int
for max := min(npLen, 3); off < max; off++ {
if i := npLen - off; utf8.RuneStart(oldPath[i]) {
// read rune from cached path
rv, _ = utf8.DecodeRuneInString(oldPath[i:])
break
}
}
// calculate lowercase bytes of current rune
lo := unicode.ToLower(rv)
utf8.EncodeRune(rb[:], lo)
// skip already processed bytes
rb = shiftNRuneBytes(rb, off)
for i := 0; i < len(n.indices); i++ {
// lowercase matches
if n.indices[i] == rb[0] {
// must use a recursive approach since both the
// uppercase byte and the lowercase byte might exist
// as an index
if out, found := n.children[i].findCaseInsensitivePathRec(
path, ciPath, rb, fixTrailingSlash,
); found {
return out, true
}
break
}
}
// if we found no match, the same for the uppercase rune,
// if it differs
if up := unicode.ToUpper(rv); up != lo {
utf8.EncodeRune(rb[:], up)
rb = shiftNRuneBytes(rb, off)
for i, c := 0, rb[0]; i < len(n.indices); i++ {
// uppercase matches
if n.indices[i] == c {
// continue with child node
n = n.children[i]
npLen = len(n.path)
continue walk
}
}
}
}
// Nothing found. We can recommend to redirect to the same URL
// without a trailing slash if a leaf exists for that path
return ciPath, (fixTrailingSlash && path == "/" && n.handle != nil)
}
n = n.children[0]
switch n.nType {
case param:
// find param end (either '/' or path end)
k := 0
for k < len(path) && path[k] != '/' {
k++
}
// add param value to case insensitive path
ciPath = append(ciPath, path[:k]...)
// we need to go deeper!
if k < len(path) {
if len(n.children) > 0 {
// continue with child node
n = n.children[0]
npLen = len(n.path)
path = path[k:]
continue
}
// ... but we can't
if fixTrailingSlash && len(path) == k+1 {
return ciPath, true
}
return ciPath, false
}
if n.handle != nil {
return ciPath, true
} else if fixTrailingSlash && len(n.children) == 1 {
// No handle found. Check if a handle for this path + a
// trailing slash exists
n = n.children[0]
if n.path == "/" && n.handle != nil {
return append(ciPath, '/'), true
}
}
return ciPath, false
case catchAll:
return append(ciPath, path...), true
default:
panic("invalid node type")
}
} else {
// We should have reached the node containing the handle.
// Check if this node has a handle registered.
if n.handle != nil {
return ciPath, true
}
// No handle found.
// Try to fix the path by adding a trailing slash
if fixTrailingSlash {
for i := 0; i < len(n.indices); i++ {
if n.indices[i] == '/' {
n = n.children[i]
if (len(n.path) == 1 && n.handle != nil) ||
(n.nType == catchAll && n.children[0].handle != nil) {
return append(ciPath, '/'), true
}
return ciPath, false
}
}
}
return ciPath, false
}
}
// Nothing found.
// Try to fix the path by adding / removing a trailing slash
if fixTrailingSlash {
if path == "/" {
return ciPath, true
}
if len(path)+1 == npLen && n.path[len(path)] == '/' &&
strings.EqualFold(path[1:], n.path[1:len(path)]) && n.handle != nil {
return append(ciPath, n.path...), true
}
}
return ciPath, false
}

View File

@@ -1,20 +0,0 @@
# Binaries for programs and plugins
*.exe
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/
examples/examples
examples/ipv4/go.sum
examples/ipv4/ipv4
examples/ipv6/go.sum
examples/ipv6/ipv6

View File

@@ -1,7 +0,0 @@
FROM golang
WORKDIR /peerdiscovery
COPY . .
RUN go build ./examples/ipv4/main.go
CMD ["/peerdiscovery/main"]

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2018 Zack
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,72 +0,0 @@
# peerdiscovery
<img src="https://img.shields.io/badge/coverage-89%25-brightgreen.svg?style=flat-square" alt="Code coverage">&nbsp;<a href="https://goreportcard.com/report/github.com/schollz/peerdiscovery"><img src="https://goreportcard.com/badge/github.com/schollz/peerdiscovery?style=flat-square" alt="Go Report"></a>&nbsp;<a href="https://godoc.org/github.com/schollz/peerdiscovery"><img src="http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square" alt="Go Doc"></a>
Pure-go library for cross-platform thread-safe local peer discovery using UDP multicast. I needed to use peer discovery for [croc](https://github.com/schollz/croc) and everything I tried had problems, so I made another one.
## Install
Make sure you have Go 1.5+.
```
go get -u github.com/schollz/peerdiscovery
```
## Usage
The following is a code to find the first peer on the local network and print it out.
```golang
discoveries, _ := peerdiscovery.Discover(peerdiscovery.Settings{Limit: 1})
for _, d := range discoveries {
fmt.Printf("discovered '%s'\n", d.Address)
}
```
Here's the output when running on two computers. (*Run these gifs in sync by hitting Ctl + F5*).
**Computer 1:**
![computer 1](https://user-images.githubusercontent.com/6550035/39165714-ba7167d8-473a-11e8-82b5-fb7401ce2138.gif)
**Computer 2:**
![computer 1](https://user-images.githubusercontent.com/6550035/39165716-ba8db9ec-473a-11e8-96f7-e8c64faac676.gif)
For more examples, see the scanning examples ([ipv4](https://github.com/schollz/peerdiscovery/blob/master/examples/ipv4/main.go) and [ipv6](https://github.com/schollz/peerdiscovery/blob/master/examples/ipv6/main.go)) or [the docs](https://pkg.go.dev/github.com/schollz/peerdiscovery).
## Testing
To test the peer discovery with just one host, one can launch multiple containers. The provided `Dockerfile` will run the example code.
Please make sure to enable [Docker's IPv6 support](https://docs.docker.com/v17.09/engine/userguide/networking/default_network/ipv6/) if you are using IPv6 for peer discovery.
```console
# Build the container, named peertest
$ docker build -t peertest .
# Execute the following command in multiple terminals
$ docker run -t --rm peertest
Scanning for 10 seconds to find LAN peers
100% |████████████████████████████████████████| [9s:0s]Found 1 other computers
0) '172.17.0.2' with payload 'zqrecHipCO'
```
## Contributing
Pull requests are welcome. Feel free to...
- Revise documentation
- Add new features
- Fix bugs
- Suggest improvements
## Thanks
Thanks [@geistesk](https://github.com/geistesk) for adding IPv6 support and a `Notify` func, and helping maintain! Thanks [@Kunde21](https://github.com/Kunde21) for providing a bug fix and massively refactoring the code in a much better way. Thanks [@robpre](https://github.com/robpre) for finding and fixing bugs. Thanks [@shvydky](https://github.com/shvydky) for adding dynamic payloads.
## License
MIT

View File

@@ -1,9 +0,0 @@
module github.com/schollz/peerdiscovery
go 1.13
require (
github.com/stretchr/testify v1.6.1
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b
golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02 // indirect
)

View File

@@ -1,24 +0,0 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02 h1:5Ftd3YbC/kANXWCBjvppvUmv1BMakgFcBKA7MpYYp4M=
golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,414 +0,0 @@
package peerdiscovery
import (
"fmt"
"net"
"strconv"
"sync"
"time"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
)
// IPVersion specifies the version of the Internet Protocol to be used.
type IPVersion uint
const (
IPv4 IPVersion = 4
IPv6 IPVersion = 6
)
// Discovered is the structure of the discovered peers,
// which holds their local address (port removed) and
// a payload if there is one.
type Discovered struct {
// Address is the local address of a discovered peer.
Address string
// Payload is the associated payload from discovered peer.
Payload []byte
}
func (d Discovered) String() string {
return fmt.Sprintf("address: %s, payload: %s", d.Address, d.Payload)
}
// Settings are the settings that can be specified for
// doing peer discovery.
type Settings struct {
// Limit is the number of peers to discover, use < 1 for unlimited.
Limit int
// Port is the port to broadcast on (the peers must also broadcast using the same port).
// The default port is 9999.
Port string
// MulticastAddress specifies the multicast address.
// You should be able to use any of 224.0.0.0/4 or ff00::/8.
// By default it uses the Simple Service Discovery Protocol
// address (239.255.255.250 for IPv4 or ff02::c for IPv6).
MulticastAddress string
// Payload is the bytes that are sent out with each broadcast. Must be short.
Payload []byte
// PayloadFunc is the function that will be called to dynamically generate payload
// before every broadcast. If this pointer is nil `Payload` field will be broadcasted instead.
PayloadFunc func() []byte
// Delay is the amount of time between broadcasts. The default delay is 1 second.
Delay time.Duration
// TimeLimit is the amount of time to spend discovering, if the limit is not reached.
// A negative limit indiciates scanning until the limit was reached or, if an
// unlimited scanning was requested, no timeout.
// The default time limit is 10 seconds.
TimeLimit time.Duration
// StopChan is a channel to stop the peer discvoery immediatley after reception.
StopChan chan struct{}
// AllowSelf will allow discovery the local machine (default false)
AllowSelf bool
// DisableBroadcast will not allow sending out a broadcast
DisableBroadcast bool
// IPVersion specifies the version of the Internet Protocol (default IPv4)
IPVersion IPVersion
// Notify will be called each time a new peer was discovered.
// The default is nil, which means no notification whatsoever.
Notify func(Discovered)
portNum int
multicastAddressNumbers net.IP
}
// peerDiscovery is the object that can do the discovery for finding LAN peers.
type peerDiscovery struct {
settings Settings
received map[string][]byte
sync.RWMutex
}
// initialize returns a new peerDiscovery object which can be used to discover peers.
// The settings are optional. If any setting is not supplied, then defaults are used.
// See the Settings for more information.
func initialize(settings Settings) (p *peerDiscovery, err error) {
p = new(peerDiscovery)
p.Lock()
defer p.Unlock()
// initialize settings
p.settings = settings
// defaults
if p.settings.Port == "" {
p.settings.Port = "9999"
}
if p.settings.IPVersion == 0 {
p.settings.IPVersion = IPv4
}
if p.settings.MulticastAddress == "" {
if p.settings.IPVersion == IPv4 {
p.settings.MulticastAddress = "239.255.255.250"
} else {
p.settings.MulticastAddress = "ff02::c"
}
}
if len(p.settings.Payload) == 0 {
p.settings.Payload = []byte("hi")
}
if p.settings.Delay == 0 {
p.settings.Delay = 1 * time.Second
}
if p.settings.TimeLimit == 0 {
p.settings.TimeLimit = 10 * time.Second
}
if p.settings.StopChan == nil {
p.settings.StopChan = make(chan struct{})
}
p.received = make(map[string][]byte)
p.settings.multicastAddressNumbers = net.ParseIP(p.settings.MulticastAddress)
if p.settings.multicastAddressNumbers == nil {
err = fmt.Errorf("Multicast Address %s could not be converted to an IP",
p.settings.MulticastAddress)
return
}
p.settings.portNum, err = strconv.Atoi(p.settings.Port)
if err != nil {
return
}
return
}
type NetPacketConn interface {
JoinGroup(ifi *net.Interface, group net.Addr) error
SetMulticastInterface(ini *net.Interface) error
SetMulticastTTL(int) error
ReadFrom(buf []byte) (int, net.Addr, error)
WriteTo(buf []byte, dst net.Addr) (int, error)
}
// Discover will use the created settings to scan for LAN peers. It will return
// an array of the discovered peers and their associate payloads. It will not
// return broadcasts sent to itself.
func Discover(settings ...Settings) (discoveries []Discovered, err error) {
s := Settings{}
if len(settings) > 0 {
s = settings[0]
}
p, err := initialize(s)
if err != nil {
return
}
p.RLock()
address := net.JoinHostPort(p.settings.MulticastAddress, p.settings.Port)
portNum := p.settings.portNum
tickerDuration := p.settings.Delay
timeLimit := p.settings.TimeLimit
p.RUnlock()
// get interfaces
ifaces, err := net.Interfaces()
if err != nil {
return
}
// Open up a connection
c, err := net.ListenPacket(fmt.Sprintf("udp%d", p.settings.IPVersion), address)
if err != nil {
return
}
defer c.Close()
group := p.settings.multicastAddressNumbers
// ipv{4,6} have an own PacketConn, which does not implement net.PacketConn
var p2 NetPacketConn
if p.settings.IPVersion == IPv4 {
p2 = PacketConn4{ipv4.NewPacketConn(c)}
} else {
p2 = PacketConn6{ipv6.NewPacketConn(c)}
}
for i := range ifaces {
p2.JoinGroup(&ifaces[i], &net.UDPAddr{IP: group, Port: portNum})
}
go p.listen()
ticker := time.NewTicker(tickerDuration)
defer ticker.Stop()
start := time.Now()
for {
exit := false
p.RLock()
if len(p.received) >= p.settings.Limit && p.settings.Limit > 0 {
exit = true
}
p.RUnlock()
if !s.DisableBroadcast {
payload := p.settings.Payload
if p.settings.PayloadFunc != nil {
payload = p.settings.PayloadFunc()
}
// write to multicast
broadcast(p2, payload, ifaces, &net.UDPAddr{IP: group, Port: portNum})
}
select {
case <-p.settings.StopChan:
exit = true
case <-ticker.C:
}
if exit || timeLimit > 0 && time.Since(start) > timeLimit {
break
}
}
if !s.DisableBroadcast {
payload := p.settings.Payload
if p.settings.PayloadFunc != nil {
payload = p.settings.PayloadFunc()
}
// send out broadcast that is finished
broadcast(p2, payload, ifaces, &net.UDPAddr{IP: group, Port: portNum})
}
p.RLock()
discoveries = make([]Discovered, len(p.received))
i := 0
for ip, payload := range p.received {
discoveries[i] = Discovered{
Address: ip,
Payload: payload,
}
i++
}
p.RUnlock()
return
}
func broadcast(p2 NetPacketConn, payload []byte, ifaces []net.Interface, dst net.Addr) {
for i := range ifaces {
if errMulticast := p2.SetMulticastInterface(&ifaces[i]); errMulticast != nil {
continue
}
p2.SetMulticastTTL(2)
if _, errMulticast := p2.WriteTo([]byte(payload), dst); errMulticast != nil {
continue
}
}
}
const (
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
maxDatagramSize = 66507
)
// Listen binds to the UDP address and port given and writes packets received
// from that address to a buffer which is passed to a hander
func (p *peerDiscovery) listen() (recievedBytes []byte, err error) {
p.RLock()
address := net.JoinHostPort(p.settings.MulticastAddress, p.settings.Port)
portNum := p.settings.portNum
allowSelf := p.settings.AllowSelf
notify := p.settings.Notify
p.RUnlock()
localIPs := getLocalIPs()
// get interfaces
ifaces, err := net.Interfaces()
if err != nil {
return
}
// log.Println(ifaces)
// Open up a connection
c, err := net.ListenPacket(fmt.Sprintf("udp%d", p.settings.IPVersion), address)
if err != nil {
return
}
defer c.Close()
group := p.settings.multicastAddressNumbers
var p2 NetPacketConn
if p.settings.IPVersion == IPv4 {
p2 = PacketConn4{ipv4.NewPacketConn(c)}
} else {
p2 = PacketConn6{ipv6.NewPacketConn(c)}
}
for i := range ifaces {
p2.JoinGroup(&ifaces[i], &net.UDPAddr{IP: group, Port: portNum})
}
// Loop forever reading from the socket
for {
buffer := make([]byte, maxDatagramSize)
var (
n int
src net.Addr
errRead error
)
n, src, errRead = p2.ReadFrom(buffer)
if errRead != nil {
err = errRead
return
}
srcHost, _, _ := net.SplitHostPort(src.String())
if _, ok := localIPs[srcHost]; ok && !allowSelf {
continue
}
// log.Println(src, hex.Dump(buffer[:n]))
p.Lock()
if _, ok := p.received[srcHost]; !ok {
p.received[srcHost] = buffer[:n]
}
p.Unlock()
if notify != nil {
notify(Discovered{
Address: srcHost,
Payload: buffer[:n],
})
}
p.RLock()
if len(p.received) >= p.settings.Limit && p.settings.Limit > 0 {
p.RUnlock()
break
}
p.RUnlock()
}
return
}
// getLocalIPs returns the local ip address
func getLocalIPs() (ips map[string]struct{}) {
ips = make(map[string]struct{})
ips["localhost"] = struct{}{}
ips["127.0.0.1"] = struct{}{}
ips["::1"] = struct{}{}
ifaces, err := net.Interfaces()
if err != nil {
return
}
for _, iface := range ifaces {
addrs, err := iface.Addrs()
if err != nil {
continue
}
for _, address := range addrs {
ip, _, err := net.ParseCIDR(address.String())
if err != nil {
// log.Printf("Failed to parse %s: %v", address.String(), err)
continue
}
ips[ip.String()+"%"+iface.Name] = struct{}{}
ips[ip.String()] = struct{}{}
}
}
return
}
type PacketConn4 struct {
*ipv4.PacketConn
}
// ReadFrom wraps the ipv4 ReadFrom without a control message
func (pc4 PacketConn4) ReadFrom(buf []byte) (int, net.Addr, error) {
n, _, addr, err := pc4.PacketConn.ReadFrom(buf)
return n, addr, err
}
// WriteTo wraps the ipv4 WriteTo without a control message
func (pc4 PacketConn4) WriteTo(buf []byte, dst net.Addr) (int, error) {
return pc4.PacketConn.WriteTo(buf, nil, dst)
}
type PacketConn6 struct {
*ipv6.PacketConn
}
// ReadFrom wraps the ipv6 ReadFrom without a control message
func (pc6 PacketConn6) ReadFrom(buf []byte) (int, net.Addr, error) {
n, _, addr, err := pc6.PacketConn.ReadFrom(buf)
return n, addr, err
}
// WriteTo wraps the ipv6 WriteTo without a control message
func (pc6 PacketConn6) WriteTo(buf []byte, dst net.Addr) (int, error) {
return pc6.PacketConn.WriteTo(buf, nil, dst)
}
// SetMulticastTTL wraps the hop limit of ipv6
func (pc6 PacketConn6) SetMulticastTTL(i int) error {
return pc6.SetMulticastHopLimit(i)
}

View File

@@ -1,8 +0,0 @@
# This is the list of envconfig authors for copyright purposes.
#
# This does not necessarily list everyone who has contributed code, since in
# some cases, their employer may be the copyright holder. To see the full list
# of contributors, see the revision history in source control.
Google LLC
Seth Vargo

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,59 +0,0 @@
# Copyright The envconfig Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VETTERS = "asmdecl,assign,atomic,bools,buildtag,cgocall,composites,copylocks,errorsas,httpresponse,loopclosure,lostcancel,nilfunc,printf,shift,stdmethods,structtag,tests,unmarshal,unreachable,unsafeptr,unusedresult"
GOFMT_FILES = $(shell go list -f '{{.Dir}}' ./...)
fmtcheck:
@command -v goimports > /dev/null 2>&1 || (cd tools && go get golang.org/x/tools/cmd/goimports && cd ..)
@CHANGES="$$(goimports -d $(GOFMT_FILES))"; \
if [ -n "$${CHANGES}" ]; then \
echo "Unformatted (run goimports -w .):\n\n$${CHANGES}\n\n"; \
exit 1; \
fi
@# Annoyingly, goimports does not support the simplify flag.
@CHANGES="$$(gofmt -s -d $(GOFMT_FILES))"; \
if [ -n "$${CHANGES}" ]; then \
echo "Unformatted (run gofmt -s -w .):\n\n$${CHANGES}\n\n"; \
exit 1; \
fi
.PHONY: fmtcheck
spellcheck:
@command -v misspell > /dev/null 2>&1 || (cd tools && go get github.com/client9/misspell/cmd/misspell && cd ..)
@misspell -locale="US" -error -source="text" **/*
.PHONY: spellcheck
staticcheck:
@command -v staticcheck > /dev/null 2>&1 || (cd tools && go get honnef.co/go/tools/cmd/staticcheck && cd ..)
@staticcheck -checks="all" -tests $(GOFMT_FILES)
.PHONY: staticcheck
test:
@go test \
-count=1 \
-short \
-timeout=5m \
-vet="${VETTERS}" \
./...
.PHONY: test
test-acc:
@go test \
-count=1 \
-race \
-timeout=10m \
-vet="${VETTERS}" \
./...
.PHONY: test-acc

View File

@@ -1,303 +0,0 @@
# Envconfig
[![GoDoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/mod/github.com/sethvargo/go-envconfig)
[![GitHub Actions](https://img.shields.io/github/workflow/status/sethvargo/go-envconfig/Test?style=flat-square)](https://github.com/sethvargo/go-envconfig/actions?query=workflow%3ATest)
Envconfig populates struct field values based on environment variables or
arbitrary lookup functions. It supports pre-setting mutations, which is useful
for things like converting values to uppercase, trimming whitespace, or looking
up secrets.
**Note:** Versions prior to v0.2 used a different import path. This README and
examples are for v0.2+.
## Usage
Define a struct with fields using the `env` tag:
```go
type MyConfig struct {
Port int `env:"PORT"`
Username string `env:"USERNAME"`
}
```
Set some environment variables:
```sh
export PORT=5555
export USERNAME=yoyo
```
Process it using envconfig:
```go
package main
import (
"context"
"log"
"github.com/sethvargo/go-envconfig"
)
func main() {
ctx := context.Background()
var c MyConfig
if err := envconfig.Process(ctx, &c); err != nil {
log.Fatal(err)
}
// c.Port = 5555
// c.Username = "yoyo"
}
```
You can also use nested structs, just remember that any fields you want to
process must be public:
```go
type MyConfig struct {
Database *DatabaseConfig
}
type DatabaseConfig struct {
Port int `env:"PORT"`
Username string `env:"USERNAME"`
}
```
## Configuration
Use the `env` struct tag to define configuration.
### Required
If a field is required, processing will error if the environment variable is
unset.
```go
type MyStruct struct {
Port int `env:"PORT,required"`
}
```
It is invalid to have a field as both `required` and `default`.
### Default
If an environment variable is not set, the field will be set to the default
value. Note that the environment variable must not be set (e.g. `unset PORT`).
If the environment variable is the empty string, that counts as a "value" and
the default will not be used.
```go
type MyStruct struct {
Port int `env:"PORT,default=5555"`
}
```
You can also set the default value to another field or value from the
environment, for example:
```go
type MyStruct struct {
DefaultPort int `env:"DEFAULT_PORT,default=5555"`
Port int `env:"OVERRIDE_PORT,default=$DEFAULT_PORT"`
}
```
The value for `Port` will default to the value of `DEFAULT_PORT`.
It is invalid to have a field as both `required` and `default`.
### Prefix
For shared, embedded structs, you can define a prefix to use when processing
struct values for that embed.
```go
type SharedConfig struct {
Port int `env:"PORT,default=5555"`
}
type Server1 struct {
// This processes Port from $FOO_PORT.
*SharedConfig `env:",prefix=FOO_"`
}
type Server2 struct {
// This processes Port from $BAR_PORT.
*SharedConfig `env:",prefix=BAR_"`
}
```
It is invalid to specify a prefix on non-struct fields.
## Complex Types
### Durations
In the environment, `time.Duration` values are specified as a parsable Go
duration:
```go
type MyStruct struct {
MyVar time.Duration `env:"MYVAR"`
}
```
```bash
export MYVAR="10m" # 10 * time.Minute
```
### TextUnmarshaler / BinaryUnmarshaler
Types that implement `TextUnmarshaler` or `BinaryUnmarshaler` are processed as such.
### json.Unmarshaler
Types that implement `json.Unmarshaler` are processed as such.
### gob.Decoder
Types that implement `gob.Decoder` are processed as such.
### Slices
Slices are specified as comma-separated values:
```go
type MyStruct struct {
MyVar []string `env:"MYVAR"`
}
```
```bash
export MYVAR="a,b,c,d" # []string{"a", "b", "c", "d"}
```
Note that byte slices are special cased and interpreted as strings from the
environment.
### Maps
Maps are specified as comma-separated key:value pairs:
```go
type MyStruct struct {
MyVar map[string]string `env:"MYVAR"`
}
```
```bash
export MYVAR="a:b,c:d" # map[string]string{"a":"b", "c":"d"}
```
### Structs
Envconfig walks the entire struct, so deeply-nested fields are also supported. You can also define your own decoder (see below).
## Prefixing
You can define a custom prefix using the `PrefixLookuper`. This will lookup
values in the environment by prefixing the keys with the provided value:
```go
type MyStruct struct {
MyVar string `env:"MYVAR"`
}
```
```go
// Process variables, but look for the "APP_" prefix.
l := envconfig.PrefixLookuper("APP_", envconfig.OsLookuper())
if err := envconfig.ProcessWith(ctx, &c, l); err != nil {
panic(err)
}
```
```bash
export APP_MYVAR="foo"
```
## Extension
All built-in types are supported except Func and Chan. If you need to define a
custom decoder, implement `Decoder`:
```go
type MyStruct struct {
field string
}
func (v *MyStruct) EnvDecode(val string) error {
v.field = fmt.Sprintf("PREFIX-%s", val)
return nil
}
```
If you need to modify environment variable values before processing, you can
specify a custom `Mutator`:
```go
type Config struct {
Password `env:"PASSWORD"`
}
func resolveSecretFunc(ctx context.Context, key, value string) (string, error) {
if strings.HasPrefix(key, "secret://") {
return secretmanager.Resolve(ctx, value) // example
}
return value, nil
}
var config Config
envconfig.ProcessWith(ctx, &config, envconfig.OsLookuper(), resolveSecretFunc)
```
## Testing
Relying on the environment in tests can be troublesome because environment
variables are global, which makes it difficult to parallelize the tests.
Envconfig supports extracting data from anything that returns a value:
```go
lookuper := envconfig.MapLookuper(map[string]string{
"FOO": "bar",
"ZIP": "zap",
})
var config Config
envconfig.ProcessWith(ctx, &config, lookuper)
```
Now you can parallelize all your tests by providing a map for the lookup
function. In fact, that's how the tests in this repo work, so check there for an
example.
You can also combine multiple lookupers with `MultiLookuper`. See the GoDoc for
more information and examples.
## Inspiration
This library is conceptually similar to [kelseyhightower/envconfig](https://github.com/kelseyhightower/envconfig), with the following
major behavioral differences:
- Adds support for specifying a custom lookup function (such as a map), which
is useful for testing.
- Only populates fields if they contain zero or nil values. This means you can
pre-initialize a struct and any pre-populated fields will not be overwritten
during processing.
- Support for interpolation. The default value for a field can be the value of
another field.
- Support for arbitrary mutators that change/resolve data before type
conversion.

View File

@@ -1,57 +0,0 @@
// Copyright The envconfig Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envconfig
import (
"encoding/base64"
"encoding/hex"
"strings"
)
// Base64Bytes is a slice of bytes where the information is base64-encoded in
// the environment variable.
type Base64Bytes []byte
// EnvDecode implements env.Decoder.
func (b *Base64Bytes) EnvDecode(val string) error {
val = strings.ReplaceAll(val, "+", "-")
val = strings.ReplaceAll(val, "/", "_")
val = strings.TrimRight(val, "=")
var err error
*b, err = base64.RawURLEncoding.DecodeString(val)
return err
}
// Bytes returns the underlying bytes.
func (b Base64Bytes) Bytes() []byte {
return []byte(b)
}
// HexBytes is a slice of bytes where the information is hex-encoded in the
// environment variable.
type HexBytes []byte
// EnvDecode implements env.Decoder.
func (b *HexBytes) EnvDecode(val string) error {
var err error
*b, err = hex.DecodeString(val)
return err
}
// Bytes returns the underlying bytes.
func (b HexBytes) Bytes() []byte {
return []byte(b)
}

View File

@@ -1,592 +0,0 @@
// Copyright The envconfig Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package envconfig populates struct fields based on environment variable
// values (or anything that responds to "Lookup"). Structs declare their
// environment dependencies using the `env` tag with the key being the name of
// the environment variable, case sensitive.
//
// type MyStruct struct {
// A string `env:"A"` // resolves A to $A
// B string `env:"B,required"` // resolves B to $B, errors if $B is unset
// C string `env:"C,default=foo"` // resolves C to $C, defaults to "foo"
//
// D string `env:"D,required,default=foo"` // error, cannot be required and default
// E string `env:""` // error, must specify key
// }
//
// All built-in types are supported except Func and Chan. If you need to define
// a custom decoder, implement Decoder:
//
// type MyStruct struct {
// field string
// }
//
// func (v *MyStruct) EnvDecode(val string) error {
// v.field = fmt.Sprintf("PREFIX-%s", val)
// return nil
// }
//
// In the environment, slices are specified as comma-separated values:
//
// export MYVAR="a,b,c,d" // []string{"a", "b", "c", "d"}
//
// In the environment, maps are specified as comma-separated key:value pairs:
//
// export MYVAR="a:b,c:d" // map[string]string{"a":"b", "c":"d"}
//
// If you need to modify environment variable values before processing, you can
// specify a custom mutator:
//
// type Config struct {
// Password `env:"PASSWORD_SECRET"`
// }
//
// func resolveSecretFunc(ctx context.Context, key, value string) (string, error) {
// if strings.HasPrefix(key, "secret://") {
// return secretmanager.Resolve(ctx, value) // example
// }
// return value, nil
// }
//
// var config Config
// ProcessWith(&config, OsLookuper(), resolveSecretFunc)
//
package envconfig
import (
"context"
"encoding"
"encoding/gob"
"encoding/json"
"errors"
"fmt"
"os"
"reflect"
"strconv"
"strings"
"time"
)
const (
envTag = "env"
optRequired = "required"
optDefault = "default="
optPrefix = "prefix="
)
// Error is a custom error type for errors returned by envconfig.
type Error string
// Error implements error.
func (e Error) Error() string {
return string(e)
}
const (
ErrInvalidMapItem = Error("invalid map item")
ErrLookuperNil = Error("lookuper cannot be nil")
ErrMissingKey = Error("missing key")
ErrMissingRequired = Error("missing required value")
ErrNotPtr = Error("input must be a pointer")
ErrNotStruct = Error("input must be a struct")
ErrPrefixNotStruct = Error("prefix is only valid on struct types")
ErrPrivateField = Error("cannot parse private fields")
ErrRequiredAndDefault = Error("field cannot be required and have a default value")
ErrUnknownOption = Error("unknown option")
)
// Lookuper is an interface that provides a lookup for a string-based key.
type Lookuper interface {
// Lookup searches for the given key and returns the corresponding string
// value. If a value is found, it returns the value and true. If a value is
// not found, it returns the empty string and false.
Lookup(key string) (string, bool)
}
// osLookuper looks up environment configuration from the local environment.
type osLookuper struct{}
// Verify implements interface.
var _ Lookuper = (*osLookuper)(nil)
func (o *osLookuper) Lookup(key string) (string, bool) {
return os.LookupEnv(key)
}
// OsLookuper returns a lookuper that uses the environment (os.LookupEnv) to
// resolve values.
func OsLookuper() Lookuper {
return new(osLookuper)
}
type mapLookuper map[string]string
var _ Lookuper = (*mapLookuper)(nil)
func (m mapLookuper) Lookup(key string) (string, bool) {
v, ok := m[key]
return v, ok
}
// MapLookuper looks up environment configuration from a provided map. This is
// useful for testing, especially in parallel, since it does not require you to
// mutate the parent environment (which is stateful).
func MapLookuper(m map[string]string) Lookuper {
return mapLookuper(m)
}
type multiLookuper struct {
ls []Lookuper
}
var _ Lookuper = (*multiLookuper)(nil)
func (m *multiLookuper) Lookup(key string) (string, bool) {
for _, l := range m.ls {
if v, ok := l.Lookup(key); ok {
return v, true
}
}
return "", false
}
// PrefixLookuper looks up environment configuration using the specified prefix.
// This is useful if you want all your variables to start with a particular
// prefix like "MY_APP_".
func PrefixLookuper(prefix string, l Lookuper) Lookuper {
if typ, ok := l.(*prefixLookuper); ok {
return &prefixLookuper{prefix: typ.prefix + prefix, l: typ.l}
}
return &prefixLookuper{prefix: prefix, l: l}
}
type prefixLookuper struct {
prefix string
l Lookuper
}
func (p *prefixLookuper) Lookup(key string) (string, bool) {
return p.l.Lookup(p.prefix + key)
}
// MultiLookuper wraps a collection of lookupers. It does not combine them, and
// lookups appear in the order in which they are provided to the initializer.
func MultiLookuper(lookupers ...Lookuper) Lookuper {
return &multiLookuper{ls: lookupers}
}
// Decoder is an interface that custom types/fields can implement to control how
// decoding takes place. For example:
//
// type MyType string
//
// func (mt MyType) EnvDecode(val string) error {
// return "CUSTOM-"+val
// }
//
type Decoder interface {
EnvDecode(val string) error
}
// MutatorFunc is a function that mutates a given value before it is passed
// along for processing. This is useful if you want to mutate the environment
// variable value before it's converted to the proper type.
type MutatorFunc func(ctx context.Context, k, v string) (string, error)
// options are internal options for decoding.
type options struct {
Default string
Required bool
Prefix string
}
// Process processes the struct using the environment. See ProcessWith for a
// more customizable version.
func Process(ctx context.Context, i interface{}) error {
return ProcessWith(ctx, i, OsLookuper())
}
// ProcessWith processes the given interface with the given lookuper. See the
// package-level documentation for specific examples and behaviors.
func ProcessWith(ctx context.Context, i interface{}, l Lookuper, fns ...MutatorFunc) error {
if l == nil {
return ErrLookuperNil
}
v := reflect.ValueOf(i)
if v.Kind() != reflect.Ptr {
return ErrNotPtr
}
e := v.Elem()
if e.Kind() != reflect.Struct {
return ErrNotStruct
}
t := e.Type()
for i := 0; i < t.NumField(); i++ {
ef := e.Field(i)
tf := t.Field(i)
tag := tf.Tag.Get(envTag)
if !ef.CanSet() {
if tag != "" {
// There's an "env" tag on a private field, we can't alter it, and it's
// likely a mistake. Return an error so the user can handle.
return fmt.Errorf("%s: %w", tf.Name, ErrPrivateField)
}
// Otherwise continue to the next field.
continue
}
// Parse the key and options.
key, opts, err := keyAndOpts(tag)
if err != nil {
return fmt.Errorf("%s: %w", tf.Name, err)
}
// Initialize pointer structs.
for ef.Kind() == reflect.Ptr {
if ef.IsNil() {
if ef.Type().Elem().Kind() != reflect.Struct {
// This is a nil pointer to something that isn't a struct, like
// *string. Move along.
break
}
// Nil pointer to a struct, create so we can traverse.
ef.Set(reflect.New(ef.Type().Elem()))
}
ef = ef.Elem()
}
// Special case handle structs. This has to come after the value resolution in
// case the struct has a custom decoder.
if ef.Kind() == reflect.Struct {
for ef.CanAddr() {
ef = ef.Addr()
}
// Lookup the value, ignoring an error if the key isn't defined. This is
// required for nested structs that don't declare their own `env` keys,
// but have internal fields with an `env` defined.
val, err := lookup(key, opts, l)
if err != nil && !errors.Is(err, ErrMissingKey) {
return fmt.Errorf("%s: %w", tf.Name, err)
}
if ok, err := processAsDecoder(val, ef); ok {
if err != nil {
return err
}
continue
}
plu := l
if opts.Prefix != "" {
plu = PrefixLookuper(opts.Prefix, l)
}
if err := ProcessWith(ctx, ef.Interface(), plu, fns...); err != nil {
return fmt.Errorf("%s: %w", tf.Name, err)
}
continue
}
// It's invalid to have a prefix on a non-struct field.
if opts.Prefix != "" {
return ErrPrefixNotStruct
}
// Stop processing if there's no env tag (this comes after nested parsing),
// in case there's an env tag in an embedded struct.
if tag == "" {
continue
}
// The field already has a non-zero value, do not overwrite.
if !ef.IsZero() {
continue
}
val, err := lookup(key, opts, l)
if err != nil {
return fmt.Errorf("%s: %w", tf.Name, err)
}
// Apply any mutators. Mutators are applied after the lookup, but before any
// type conversions. They always resolve to a string (or error)
for _, fn := range fns {
if fn != nil {
val, err = fn(ctx, key, val)
if err != nil {
return fmt.Errorf("%s: %w", tf.Name, err)
}
}
}
// Set value.
if err := processField(val, ef); err != nil {
return fmt.Errorf("%s(%q): %w", tf.Name, val, err)
}
}
return nil
}
// keyAndOpts parses the given tag value (e.g. env:"foo,required") and
// returns the key name and options as a list.
func keyAndOpts(tag string) (string, *options, error) {
parts := strings.Split(tag, ",")
key, tagOpts := strings.TrimSpace(parts[0]), parts[1:]
var opts options
LOOP:
for i, o := range tagOpts {
o = strings.TrimSpace(o)
switch {
case o == optRequired:
opts.Required = true
case strings.HasPrefix(o, optPrefix):
opts.Prefix = strings.TrimPrefix(o, optPrefix)
case strings.HasPrefix(o, optDefault):
// If a default value was given, assume everything after is the provided
// value, including comma-seprated items.
o = strings.TrimLeft(strings.Join(tagOpts[i:], ","), " ")
opts.Default = strings.TrimPrefix(o, optDefault)
break LOOP
default:
return "", nil, fmt.Errorf("%q: %w", o, ErrUnknownOption)
}
}
return key, &opts, nil
}
// lookup looks up the given key using the provided Lookuper and options.
func lookup(key string, opts *options, l Lookuper) (string, error) {
if key == "" {
// The struct has something like `env:",required"`, which is likely a
// mistake. We could try to infer the envvar from the field name, but that
// feels too magical.
return "", ErrMissingKey
}
if opts.Required && opts.Default != "" {
// Having a default value on a required value doesn't make sense.
return "", ErrRequiredAndDefault
}
// Lookup value.
val, ok := l.Lookup(key)
if !ok {
if opts.Required {
return "", fmt.Errorf("%w: %s", ErrMissingRequired, key)
}
if opts.Default != "" {
// Expand the default value. This allows for a default value that maps to
// a different variable.
val = os.Expand(opts.Default, func(i string) string {
s, ok := l.Lookup(i)
if ok {
return s
}
return ""
})
}
}
return val, nil
}
// processAsDecoder processes the given value as a decoder or custom
// unmarshaller.
func processAsDecoder(v string, ef reflect.Value) (bool, error) {
// Keep a running error. It's possible that a property might implement
// multiple decoders, and we don't know *which* decoder will succeed. If we
// get through all of them, we'll return the most recent error.
var imp bool
var err error
// Resolve any pointers.
for ef.CanAddr() {
ef = ef.Addr()
}
if ef.CanInterface() {
iface := ef.Interface()
if dec, ok := iface.(Decoder); ok {
imp = true
if err = dec.EnvDecode(v); err == nil {
return true, nil
}
}
if tu, ok := iface.(encoding.BinaryUnmarshaler); ok {
imp = true
if err = tu.UnmarshalBinary([]byte(v)); err == nil {
return true, nil
}
}
if tu, ok := iface.(gob.GobDecoder); ok {
imp = true
if err = tu.GobDecode([]byte(v)); err == nil {
return true, nil
}
}
if tu, ok := iface.(json.Unmarshaler); ok {
imp = true
if err = tu.UnmarshalJSON([]byte(v)); err == nil {
return true, nil
}
}
if tu, ok := iface.(encoding.TextUnmarshaler); ok {
imp = true
if err = tu.UnmarshalText([]byte(v)); err == nil {
return true, nil
}
}
}
return imp, err
}
func processField(v string, ef reflect.Value) error {
// Handle pointers and uninitialized pointers.
for ef.Type().Kind() == reflect.Ptr {
if ef.IsNil() {
ef.Set(reflect.New(ef.Type().Elem()))
}
ef = ef.Elem()
}
tf := ef.Type()
tk := tf.Kind()
// Handle existing decoders.
if ok, err := processAsDecoder(v, ef); ok {
return err
}
// We don't check if the value is empty earlier, because the user might want
// to define a custom decoder and treat the empty variable as a special case.
// However, if we got this far, none of the remaining parsers will succeed, so
// bail out now.
if v == "" {
return nil
}
switch tk {
case reflect.Bool:
b, err := strconv.ParseBool(v)
if err != nil {
return err
}
ef.SetBool(b)
case reflect.Float32, reflect.Float64:
f, err := strconv.ParseFloat(v, tf.Bits())
if err != nil {
return err
}
ef.SetFloat(f)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
i, err := strconv.ParseInt(v, 0, tf.Bits())
if err != nil {
return err
}
ef.SetInt(i)
case reflect.Int64:
// Special case time.Duration values.
if tf.PkgPath() == "time" && tf.Name() == "Duration" {
d, err := time.ParseDuration(v)
if err != nil {
return err
}
ef.SetInt(int64(d))
} else {
i, err := strconv.ParseInt(v, 0, tf.Bits())
if err != nil {
return err
}
ef.SetInt(i)
}
case reflect.String:
ef.SetString(v)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
i, err := strconv.ParseUint(v, 0, tf.Bits())
if err != nil {
return err
}
ef.SetUint(i)
case reflect.Interface:
return fmt.Errorf("cannot decode into interfaces")
// Maps
case reflect.Map:
vals := strings.Split(v, ",")
mp := reflect.MakeMapWithSize(tf, len(vals))
for _, val := range vals {
pair := strings.SplitN(val, ":", 2)
if len(pair) < 2 {
return fmt.Errorf("%s: %w", val, ErrInvalidMapItem)
}
mKey, mVal := strings.TrimSpace(pair[0]), strings.TrimSpace(pair[1])
k := reflect.New(tf.Key()).Elem()
if err := processField(mKey, k); err != nil {
return fmt.Errorf("%s: %w", mKey, err)
}
v := reflect.New(tf.Elem()).Elem()
if err := processField(mVal, v); err != nil {
return fmt.Errorf("%s: %w", mVal, err)
}
mp.SetMapIndex(k, v)
}
ef.Set(mp)
// Slices
case reflect.Slice:
// Special case: []byte
if tf.Elem().Kind() == reflect.Uint8 {
ef.Set(reflect.ValueOf([]byte(v)))
} else {
vals := strings.Split(v, ",")
s := reflect.MakeSlice(tf, len(vals), len(vals))
for i, val := range vals {
val = strings.TrimSpace(val)
if err := processField(val, s.Index(i)); err != nil {
return fmt.Errorf("%s: %w", val, err)
}
}
ef.Set(s)
}
}
return nil
}

View File

@@ -1,5 +0,0 @@
module github.com/sethvargo/go-envconfig
go 1.14
require github.com/google/go-cmp v0.4.1

View File

@@ -1,4 +0,0 @@
github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,84 +0,0 @@
package local
import (
"encoding/json"
"fmt"
"time"
"github.com/schollz/peerdiscovery"
"github.com/suborbital/grav/grav"
"github.com/suborbital/vektor/vlog"
)
// Discovery is a grav Discovery plugin using local network multicast
type Discovery struct {
opts *grav.DiscoveryOpts
log *vlog.Logger
discoveryFunc grav.DiscoveryFunc
}
// payload is a discovery payload
type payload struct {
UUID string `json:"uuid"`
Port string `json:"port"`
Path string `json:"path"`
}
// New creates a new local discovery plugin
func New() *Discovery {
g := &Discovery{}
return g
}
// Start starts discovery
func (d *Discovery) Start(opts *grav.DiscoveryOpts, discoveryFunc grav.DiscoveryFunc) error {
d.opts = opts
d.log = opts.Logger
d.discoveryFunc = discoveryFunc
d.log.Info("[discovery-local] starting discovery, advertising endpoint", opts.TransportPort, opts.TransportURI)
payloadFunc := func() []byte {
payload := payload{
UUID: d.opts.NodeUUID,
Port: opts.TransportPort,
Path: opts.TransportURI,
}
payloadBytes, _ := json.Marshal(payload)
return payloadBytes
}
notifyFunc := func(peer peerdiscovery.Discovered) {
d.log.Debug("[discovery-local] potential peer found:", peer.Address)
payload := payload{}
if err := json.Unmarshal(peer.Payload, &payload); err != nil {
d.log.Debug("[discovery-local] peer did not offer correct payload, discarding")
return
}
endpoint := fmt.Sprintf("%s:%s%s", peer.Address, payload.Port, payload.Path)
// send the discovery to Grav. Grav is responsible for ensuring uniqueness of the connections.
d.discoveryFunc(endpoint, payload.UUID)
}
_, err := peerdiscovery.Discover(peerdiscovery.Settings{
Limit: -1,
PayloadFunc: payloadFunc,
Delay: 10 * time.Second,
TimeLimit: -1,
Notify: notifyFunc,
AllowSelf: true,
})
return err
}
// UseDiscoveryFunc sets the function to be used when a new peer is discovered
func (d *Discovery) UseDiscoveryFunc(dFunc func(endpoint string, uuid string)) {
d.discoveryFunc = dFunc
}

View File

@@ -1,83 +0,0 @@
package grav
const (
defaultBusChanSize = 256
)
// messageBus is responsible for emitting messages among the connected pods
// and managing the failure cases for those pods
type messageBus struct {
busChan MsgChan
pool *connectionPool
buffer *MsgBuffer
}
// newMessageBus creates a new messageBus
func newMessageBus() *messageBus {
b := &messageBus{
busChan: make(chan Message, defaultBusChanSize),
pool: newConnectionPool(),
buffer: NewMsgBuffer(defaultBufferSize),
}
b.start()
return b
}
// addPod adds a pod to the connection pool
func (b *messageBus) addPod(pod *Pod) {
b.pool.insert(pod)
}
func (b *messageBus) start() {
go func() {
// continually take new messages and for each,
// grab the next active connection from the ring and then
// start traversing around the ring to emit the message to
// each connection until landing back at the beginning of the
// ring, and repeat forever when each new message arrives
for msg := range b.busChan {
for {
// make sure the next pod is ready for messages
if err := b.pool.prepareNext(b.buffer); err == nil {
break
}
}
startingConn := b.pool.next()
b.traverse(msg, startingConn)
b.buffer.Push(msg)
}
}()
}
func (b *messageBus) traverse(msg Message, start *podConnection) {
startID := start.ID
conn := start
for {
// send the message to the pod
conn.send(msg)
// run checks on the next podConnection to see if
// anything needs to be done (including potentially deleting it)
next := b.pool.peek()
if err := b.pool.prepareNext(b.buffer); err != nil {
if startID == next.ID {
startID = next.next.ID
}
}
// now advance the ring
conn = b.pool.next()
if startID == conn.ID {
// if we have arrived back at the starting point on the ring
// we have done our job and are ready for the next message
break
}
}
}

View File

@@ -1,21 +0,0 @@
package grav
import "github.com/suborbital/vektor/vlog"
// DiscoveryFunc is a function that allows a plugin to report a newly discovered node
type DiscoveryFunc func(endpoint string, uuid string)
// Discovery represents a discovery plugin
type Discovery interface {
// Start is called to start the Discovery plugin
Start(*DiscoveryOpts, DiscoveryFunc) error
}
// DiscoveryOpts is a set of options for transports
type DiscoveryOpts struct {
NodeUUID string
TransportPort string
TransportURI string
Logger *vlog.Logger
Custom interface{}
}

View File

@@ -1,71 +0,0 @@
package grav
import (
"sync"
)
// messageFilter is a series of maps that associate things about a message (its UUID, type, etc) with a boolean value to say if
// it should be allowed or denied. For each of the maps, if an entry is included, the value of the boolean is respected (true = allow, false = deny)
// Maps are either inclusive (meaning that a missing entry defaults to allow), or exclusive (meaning that a missing entry defaults to deny)
// This can be configured per map by modifiying the UUIDInclusive, TypeInclusive (etc) fields.
type messageFilter struct {
UUIDMap map[string]bool
UUIDInclusive bool
TypeMap map[string]bool
TypeInclusive bool
lock sync.RWMutex
}
func newMessageFilter() *messageFilter {
mf := &messageFilter{
UUIDMap: map[string]bool{},
UUIDInclusive: true,
TypeMap: map[string]bool{},
TypeInclusive: true,
lock: sync.RWMutex{},
}
return mf
}
func (mf *messageFilter) allow(msg Message) bool {
mf.lock.RLock()
defer mf.lock.RUnlock()
// for each map, deny the message if:
// - a filter entry exists and it's value is false
// - a filter entry doesn't exist and its inclusive rule is false
allowType, typeExists := mf.TypeMap[msg.Type()]
if typeExists && !allowType {
return false
} else if !typeExists && !mf.TypeInclusive {
return false
}
allowUUID, uuidExists := mf.UUIDMap[msg.UUID()]
if uuidExists && !allowUUID {
return false
} else if !uuidExists && !mf.UUIDInclusive {
return false
}
return true
}
// FilterUUID likely should not be used in normal cases, it adds a message UUID to the pod's filter.
func (mf *messageFilter) FilterUUID(uuid string, allow bool) {
mf.lock.Lock()
defer mf.lock.Unlock()
mf.UUIDMap[uuid] = allow
}
func (mf *messageFilter) FilterType(msgType string, allow bool) {
mf.lock.Lock()
defer mf.lock.Unlock()
mf.TypeMap[msgType] = allow
}

View File

@@ -1,72 +0,0 @@
package grav
import (
"github.com/pkg/errors"
"github.com/google/uuid"
"github.com/suborbital/vektor/vlog"
)
// ErrTransportNotConfigured represent package-level vars
var (
ErrTransportNotConfigured = errors.New("transport plugin not configured")
)
// Grav represents a Grav message bus instance
type Grav struct {
NodeUUID string
bus *messageBus
logger *vlog.Logger
hub *hub
}
// New creates a new Grav with the provided options
func New(opts ...OptionsModifier) *Grav {
nodeUUID := uuid.New().String()
options := newOptionsWithModifiers(opts...)
g := &Grav{
NodeUUID: nodeUUID,
bus: newMessageBus(),
logger: options.Logger,
}
// the hub handles coordinating the transport and discovery plugins
g.hub = initHub(nodeUUID, options, options.Transport, options.Discovery, g.Connect)
return g
}
// Connect creates a new connection (pod) to the bus
func (g *Grav) Connect() *Pod {
opts := &podOpts{WantsReplay: false}
return g.connectWithOpts(opts)
}
// ConnectWithReplay creates a new connection (pod) to the bus
// and replays recent messages when the pod sets its onFunc
func (g *Grav) ConnectWithReplay() *Pod {
opts := &podOpts{WantsReplay: true}
return g.connectWithOpts(opts)
}
// ConnectEndpoint uses the configured transport to connect the bus to an external endpoint
func (g *Grav) ConnectEndpoint(endpoint string) error {
return g.hub.connectEndpoint(endpoint, "")
}
// ConnectBridgeTopic connects the Grav instance to a particular topic on the connected bridge
func (g *Grav) ConnectBridgeTopic(topic string) error {
return g.hub.connectBridgeTopic(topic)
}
func (g *Grav) connectWithOpts(opts *podOpts) *Pod {
pod := newPod(g.bus.busChan, opts)
g.bus.addPod(pod)
return pod
}

View File

@@ -1,290 +0,0 @@
package grav
import (
"sync"
"github.com/pkg/errors"
"github.com/suborbital/vektor/vlog"
)
// hub is responsible for coordinating the transport and discovery plugins
type hub struct {
nodeUUID string
transport Transport
discovery Discovery
log *vlog.Logger
pod *Pod
connectFunc func() *Pod
connections map[string]Connection
topicConnections map[string]TopicConnection
lock sync.RWMutex
}
func initHub(nodeUUID string, options *Options, tspt Transport, dscv Discovery, connectFunc func() *Pod) *hub {
h := &hub{
nodeUUID: nodeUUID,
transport: tspt,
discovery: dscv,
log: options.Logger,
pod: connectFunc(),
connectFunc: connectFunc,
connections: map[string]Connection{},
topicConnections: map[string]TopicConnection{},
lock: sync.RWMutex{},
}
// start transport, then discovery if each have been configured (can have transport but no discovery)
if h.transport != nil {
transportOpts := &TransportOpts{
NodeUUID: nodeUUID,
Port: options.Port,
URI: options.URI,
Logger: options.Logger,
}
// setup messages to be sent to all active connections
h.pod.On(h.outgoingMessageHandler())
go func() {
if err := h.transport.Setup(transportOpts, h.handleIncomingConnection, h.findConnection); err != nil {
options.Logger.Error(errors.Wrap(err, "failed to Setup transport"))
}
}()
if h.discovery != nil {
discoveryOpts := &DiscoveryOpts{
NodeUUID: nodeUUID,
TransportPort: transportOpts.Port,
TransportURI: transportOpts.URI,
Logger: options.Logger,
}
go func() {
if err := h.discovery.Start(discoveryOpts, h.discoveryHandler()); err != nil {
options.Logger.Error(errors.Wrap(err, "failed to Start discovery"))
}
}()
}
}
return h
}
func (h *hub) discoveryHandler() func(endpoint string, uuid string) {
return func(endpoint string, uuid string) {
if uuid == h.nodeUUID {
h.log.Debug("discovered self, discarding")
return
}
// connectEndpoint does this check as well, but it's better to do it here as well
// as it reduces the number of extraneous outgoing handshakes that get attempted.
if existing, exists := h.findConnection(uuid); exists {
if !existing.CanReplace() {
h.log.Debug("encountered duplicate connection, discarding")
return
}
}
if err := h.connectEndpoint(endpoint, uuid); err != nil {
h.log.Error(errors.Wrap(err, "failed to connectEndpoint for discovered peer"))
}
}
}
// connectEndpoint creates a new outgoing connection
func (h *hub) connectEndpoint(endpoint, uuid string) error {
if h.transport == nil {
return ErrTransportNotConfigured
}
if h.transport.Type() == TransportTypeBridge {
return ErrBridgeOnlyTransport
}
h.log.Debug("connecting to endpoint", endpoint)
conn, err := h.transport.CreateConnection(endpoint)
if err != nil {
return errors.Wrap(err, "failed to transport.CreateConnection")
}
h.setupOutgoingConnection(conn, uuid)
return nil
}
// connectBridgeTopic creates a new outgoing connection
func (h *hub) connectBridgeTopic(topic string) error {
if h.transport == nil {
return ErrTransportNotConfigured
}
if h.transport.Type() != TransportTypeBridge {
return ErrNotBridgeTransport
}
h.log.Debug("connecting to topic", topic)
conn, err := h.transport.ConnectBridgeTopic(topic)
if err != nil {
return errors.Wrap(err, "failed to transport.CreateConnection")
}
h.addTopicConnection(conn, topic)
return nil
}
func (h *hub) setupOutgoingConnection(connection Connection, uuid string) {
handshake := &TransportHandshake{h.nodeUUID}
ack, err := connection.DoOutgoingHandshake(handshake)
if err != nil {
h.log.Error(errors.Wrap(err, "failed to connection.DoOutgoingHandshake"))
connection.Close()
return
}
if uuid == "" {
if ack.UUID == "" {
h.log.ErrorString("connection handshake returned empty UUID, terminating connection")
connection.Close()
return
}
uuid = ack.UUID
} else if ack.UUID != uuid {
h.log.ErrorString("connection handshake Ack did not match Discovery Ack, terminating connection")
connection.Close()
return
}
h.setupNewConnection(connection, uuid)
}
func (h *hub) handleIncomingConnection(connection Connection) {
ack := &TransportHandshakeAck{h.nodeUUID}
handshake, err := connection.DoIncomingHandshake(ack)
if err != nil {
h.log.Error(errors.Wrap(err, "failed to connection.DoIncomingHandshake"))
connection.Close()
return
}
if handshake.UUID == "" {
h.log.ErrorString("connection handshake returned empty UUID, terminating connection")
connection.Close()
return
}
h.setupNewConnection(connection, handshake.UUID)
}
func (h *hub) setupNewConnection(connection Connection, uuid string) {
// if an existing connection is found, check if it can be replaced and do so if possible
if existing, exists := h.findConnection(uuid); exists {
if !existing.CanReplace() {
connection.Close()
h.log.Debug("encountered duplicate connection, discarding")
} else {
existing.Close()
h.replaceConnection(connection, uuid)
}
} else {
h.addConnection(connection, uuid)
}
}
func (h *hub) outgoingMessageHandler() MsgFunc {
return func(msg Message) error {
// read-lock while dispatching all of the goroutines to prevent concurrent read/write
h.lock.RLock()
defer h.lock.RUnlock()
for u := range h.connections {
uuid := u
conn := h.connections[uuid]
go func() {
h.log.Debug("sending message", msg.UUID(), "to", uuid)
if err := conn.Send(msg); err != nil {
if errors.Is(err, ErrConnectionClosed) {
h.log.Debug("attempted to send on closed connection, will remove")
} else {
h.log.Warn("error sending to connection", uuid, ":", err.Error())
}
h.removeConnection(uuid)
}
}()
}
return nil
}
}
func (h *hub) incomingMessageHandler(uuid string) ReceiveFunc {
return func(msg Message) {
h.log.Debug("received message ", msg.UUID(), "from node", uuid)
h.pod.Send(msg)
}
}
func (h *hub) addConnection(connection Connection, uuid string) {
h.lock.Lock()
defer h.lock.Unlock()
h.log.Debug("adding connection for", uuid)
connection.Start(h.incomingMessageHandler(uuid))
h.connections[uuid] = connection
}
func (h *hub) addTopicConnection(connection TopicConnection, topic string) {
h.lock.Lock()
defer h.lock.Unlock()
h.log.Debug("adding bridge connection for", topic)
connection.Start(h.connectFunc())
h.topicConnections[topic] = connection
}
func (h *hub) replaceConnection(newConnection Connection, uuid string) {
h.lock.Lock()
defer h.lock.Unlock()
h.log.Debug("replacing connection for", uuid)
delete(h.connections, uuid)
newConnection.Start(h.incomingMessageHandler(uuid))
h.connections[uuid] = newConnection
}
func (h *hub) removeConnection(uuid string) {
h.lock.Lock()
defer h.lock.Unlock()
h.log.Debug("removing connection for", uuid)
delete(h.connections, uuid)
}
func (h *hub) findConnection(uuid string) (Connection, bool) {
h.lock.RLock()
defer h.lock.RUnlock()
conn, exists := h.connections[uuid]
return conn, exists
}

View File

@@ -1,170 +0,0 @@
package grav
import (
"encoding/json"
"io/ioutil"
"net/http"
"time"
"github.com/google/uuid"
)
// MsgTypeDefault and other represent message consts
const (
MsgTypeDefault string = "grav.default"
msgTypePodFeedback string = "grav.feedback"
)
// MsgFunc is a callback function that accepts a message and returns an error
type MsgFunc func(Message) error
// MsgChan is a channel that accepts a message
type MsgChan chan Message
// Message represents a message
type Message interface {
// Unique ID for this message
UUID() string
// ID of the parent event or request, such as HTTP request
ParentID() string
// The UUID of the message being replied to, if any
ReplyTo() string
// Allow setting a message UUID that this message is a response to
SetReplyTo(string)
// Type of message (application-specific)
Type() string
// Time the message was sent
Timestamp() time.Time
// Raw data of message
Data() []byte
// Unmarshal the message's data into a struct
UnmarshalData(interface{}) error
// Marshal the message itself to encoded bytes (JSON or otherwise)
Marshal() ([]byte, error)
// Unmarshal encoded Message into object
Unmarshal([]byte) error
}
// NewMsg creates a new Message with the built-in `_message` type
func NewMsg(msgType string, data []byte) Message {
return new(msgType, "", data)
}
// NewMsgWithParentID returns a new message with the provided parent ID
func NewMsgWithParentID(msgType, parentID string, data []byte) Message {
return new(msgType, parentID, data)
}
// NewMsgReplyTo creates a new message in response to a previous message
func NewMsgReplyTo(ticket MsgReceipt, msgType string, data []byte) Message {
m := new(msgType, "", data)
m.SetReplyTo(ticket.UUID)
return m
}
// MsgFromBytes returns a default _message that has been unmarshalled from bytes.
// Should only be used if the default _message type is being used.
func MsgFromBytes(bytes []byte) (Message, error) {
m := &_message{}
if err := m.Unmarshal(bytes); err != nil {
return nil, err
}
return m, nil
}
// MsgFromRequest extracts an encoded Message from an HTTP request
func MsgFromRequest(r *http.Request) (Message, error) {
defer r.Body.Close()
bytes, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
return MsgFromBytes(bytes)
}
func new(msgType, parentID string, data []byte) Message {
uuid := uuid.New()
m := &_message{
Meta: _meta{
UUID: uuid.String(),
ParentID: parentID,
ReplyTo: "",
MsgType: msgType,
Timestamp: time.Now(),
},
Payload: _payload{
Data: data,
},
}
return m
}
// _message is a basic built-in implementation of Message
// most applications should define their own data structure
// that implements the interface
type _message struct {
Meta _meta `json:"meta"`
Payload _payload `json:"payload"`
}
type _meta struct {
UUID string `json:"uuid"`
ParentID string `json:"parent_id"`
ReplyTo string `json:"response_to"`
MsgType string `json:"msg_type"`
Timestamp time.Time `json:"timestamp"`
}
type _payload struct {
Data []byte `json:"data"`
}
func (m *_message) UUID() string {
return m.Meta.UUID
}
func (m *_message) ParentID() string {
return m.Meta.ParentID
}
func (m *_message) ReplyTo() string {
return m.Meta.ReplyTo
}
func (m *_message) SetReplyTo(uuid string) {
m.Meta.ReplyTo = uuid
}
func (m *_message) Type() string {
return m.Meta.MsgType
}
func (m *_message) Timestamp() time.Time {
return m.Meta.Timestamp
}
func (m *_message) Data() []byte {
return m.Payload.Data
}
func (m *_message) UnmarshalData(target interface{}) error {
return json.Unmarshal(m.Payload.Data, target)
}
func (m *_message) Marshal() ([]byte, error) {
bytes, err := json.Marshal(m)
if err != nil {
return nil, err
}
return bytes, nil
}
func (m *_message) Unmarshal(bytes []byte) error {
return json.Unmarshal(bytes, m)
}

View File

@@ -1,90 +0,0 @@
package grav
import (
"sync"
)
const (
defaultBufferSize = 128
)
// MsgBuffer is a buffer of messages with a particular size limit.
// Oldest messages are automatically evicted as new ones are added
// past said limit. Push() and Iter() are thread-safe.
type MsgBuffer struct {
msgs map[string]Message
order []string
limit int
startIndex int
lock sync.RWMutex
}
func NewMsgBuffer(limit int) *MsgBuffer {
m := &MsgBuffer{
msgs: map[string]Message{},
order: []string{},
limit: limit,
startIndex: 0,
lock: sync.RWMutex{},
}
return m
}
// Push pushes a new message onto the end of the buffer and evicts the oldest, if needed (based on limit)
func (m *MsgBuffer) Push(msg Message) {
m.lock.Lock()
defer m.lock.Unlock()
m.msgs[msg.UUID()] = msg
lastIndex := len(m.order) - 1
if len(m.order) == m.limit {
delete(m.msgs, m.order[m.startIndex]) // delete the current "first"
m.order[m.startIndex] = msg.UUID()
if m.startIndex == lastIndex {
m.startIndex = 0
} else {
m.startIndex++
}
} else {
m.order = append(m.order, msg.UUID())
}
}
// Iter calls msgFunc once per message in the buffer
func (m *MsgBuffer) Iter(msgFunc MsgFunc) {
m.lock.RLock()
defer m.lock.RUnlock()
if len(m.order) == 0 {
return
}
index := m.startIndex
lastIndex := len(m.order) - 1
more := true
for more {
uuid := m.order[index]
msg := m.msgs[uuid]
msgFunc(msg)
newIndex := index
if newIndex == lastIndex {
newIndex = 0
} else {
newIndex++
}
if newIndex == m.startIndex {
more = false
}
index = newIndex
}
}

View File

@@ -1,72 +0,0 @@
package grav
import "github.com/suborbital/vektor/vlog"
// Options represent Grav options
type Options struct {
Logger *vlog.Logger
Transport Transport
Discovery Discovery
Port string
URI string
}
// OptionsModifier is function that modifies an option
type OptionsModifier func(*Options)
func newOptionsWithModifiers(mods ...OptionsModifier) *Options {
opts := defaultOptions()
for _, m := range mods {
m(opts)
}
return opts
}
// UseLogger allows a custom logger to be used
func UseLogger(logger *vlog.Logger) OptionsModifier {
return func(o *Options) {
o.Logger = logger
}
}
// UseTransport sets the transport plugin to be used.
func UseTransport(transport Transport) OptionsModifier {
return func(o *Options) {
o.Transport = transport
}
}
// UseEndpoint sets the endpoint settings for the instance to broadcast for discovery
// Pass empty strings for either if you would like to keep the defaults (8080 and /meta/message)
func UseEndpoint(port, uri string) OptionsModifier {
return func(o *Options) {
if port != "" {
o.Port = port
}
if uri != "" {
o.URI = uri
}
}
}
// UseDiscovery sets the discovery plugin to be used
func UseDiscovery(discovery Discovery) OptionsModifier {
return func(o *Options) {
o.Discovery = discovery
}
}
func defaultOptions() *Options {
o := &Options{
Logger: vlog.Default(),
Port: "8080",
URI: "/meta/message",
Transport: nil,
Discovery: nil,
}
return o
}

View File

@@ -1,277 +0,0 @@
package grav
import (
"errors"
"sync"
"sync/atomic"
)
const (
// defaultPodChanSize is the default size of the channels used for pod - bus communication
defaultPodChanSize = 128
)
// podFeedbackMsgReplay and others are the messages sent via feedback channel when the pod needs to communicate its state to the bus
var (
podFeedbackMsgReplay = NewMsg(msgTypePodFeedback, []byte{})
podFeedbackMsgSuccess = NewMsg(msgTypePodFeedback, []byte{})
podFeedbackMsgDisconnect = NewMsg(msgTypePodFeedback, []byte{})
)
/**
┌─────────────────────┐
│ │
──messageChan─────▶─────────────────────▶─────On────▶
┌────────┐ │ │ ┌───────────────┐
│ Bus │ │ Pod │ │ Pod Owner │
└────────┘ │ │ └───────────────┘
◀───BusChan------─◀─────────────────────◀────Send────
│ │
└─────────────────────┘
Created with Monodraw
**/
// Pod is a connection to Grav
// Pods are bi-directional. Messages can be sent to them from the bus, and they can be used to send messages
// to the bus. Pods are meant to be extremely lightweight with no persistence they are meant to quickly
// and immediately route a message between its owner and the Bus. The Bus is responsible for any "smarts".
// Messages coming from the bus are filtered using the pod's messageFilter, which is configurable by the caller.
type Pod struct {
onFunc MsgFunc // the onFunc is called whenever a message is recieved
onFuncLock sync.RWMutex
messageChan MsgChan // messageChan is used to recieve messages coming from the bus
feedbackChan MsgChan // feedbackChan is used to send "feedback" to the bus about the pod's status
busChan MsgChan // busChan is used to emit messages to the bus
*messageFilter // the embedded messageFilter controls which messages reach the onFunc
opts *podOpts
dead *atomic.Value
}
type podOpts struct {
WantsReplay bool
replayOnce sync.Once
}
// newPod creates a new Pod
func newPod(busChan MsgChan, opts *podOpts) *Pod {
p := &Pod{
onFuncLock: sync.RWMutex{},
messageChan: make(chan Message, defaultPodChanSize),
feedbackChan: make(chan Message, defaultPodChanSize),
busChan: busChan,
messageFilter: newMessageFilter(),
opts: opts,
dead: &atomic.Value{},
}
// do some "delayed setup"
p.opts.replayOnce = sync.Once{}
p.dead.Store(false)
p.start()
return p
}
// Send emits a message to be routed to the bus
// If the returned ticket is nil, it means the pod was unable to send
// It is safe to call methods on a nil ticket, they will error with ErrNoTicket
// This means error checking can be done on a chained call such as err := p.Send(msg).Wait(...)
func (p *Pod) Send(msg Message) *MsgReceipt {
// check to see if the pod has died (aka disconnected)
if p.dead.Load().(bool) == true {
return nil
}
p.FilterUUID(msg.UUID(), false) // don't allow the same message to bounce back through this pod
p.busChan <- msg
t := &MsgReceipt{
UUID: msg.UUID(),
pod: p,
}
return t
}
// ReplyTo sends a response to a message. The reply message's ticket is returned.
func (p *Pod) ReplyTo(inReplyTo Message, msg Message) *MsgReceipt {
msg.SetReplyTo(inReplyTo.UUID())
return p.Send(msg)
}
// On sets the function to be called whenever this pod recieves a message from the bus. If nil is passed, the pod will ignore all messages.
// Calling On multiple times causes the function to be overwritten. To recieve using two different functions, create two pods.
// Errors returned from the onFunc are interpreted as problems handling messages. Too many errors will result in the pod being disconnected.
// Failed messages will be replayed when messages begin to succeed. Returning an error is inadvisable unless there is a real problem handling messages.
func (p *Pod) On(onFunc MsgFunc) {
p.onFuncLock.Lock()
defer p.onFuncLock.Unlock()
p.setOnFunc(onFunc)
}
// OnType sets the function to be called whenever this pod recieves a message and sets the pod's filter to only receive certain message types.
// The same rules as `On` about error handling apply to OnType.
func (p *Pod) OnType(msgType string, onFunc MsgFunc) {
p.onFuncLock.Lock()
defer p.onFuncLock.Unlock()
p.setOnFunc(onFunc)
p.FilterType(msgType, true)
p.TypeInclusive = false // only allow the listed types
}
// Disconnect indicates to the bus that this pod is no longer needed and should be disconnected.
// Sending will immediately become unavailable, and the pod will soon stop recieving messages.
func (p *Pod) Disconnect() {
// stop future messages from being sent and then indicate to the bus that disconnection is desired
// The bus will close the busChan, which will cause the onFunc listener to quit.
p.dead.Store(true)
p.feedbackChan <- podFeedbackMsgDisconnect
}
// ErrMsgNotWanted is used by WaitOn to determine if the current message is what's being waited on
var ErrMsgNotWanted = errors.New("message not wanted")
// ErrWaitTimeout is returned if a timeout is exceeded
var ErrWaitTimeout = errors.New("waited past timeout")
// WaitOn takes a function to be called whenever this pod recieves a message and blocks until that function returns
// something other than ErrMsgNotWanted. WaitOn should be used if there is a need to wait for a particular message.
// When the onFunc returns something other than ErrMsgNotWanted (such as nil or a different error), WaitOn will return and set
// the onFunc to nil. If an error other than ErrMsgNotWanted is returned from the onFunc, it will be propogated to the caller.
// WaitOn will block forever if the desired message is never found. Use WaitUntil if a timeout is desired.
func (p *Pod) WaitOn(onFunc MsgFunc) error {
return p.WaitUntil(nil, onFunc)
}
// WaitUntil takes a function to be called whenever this pod recieves a message and blocks until that function returns
// something other than ErrMsgNotWanted. WaitOn should be used if there is a need to wait for a particular message.
// When the onFunc returns something other than ErrMsgNotWanted (such as nil or a different error), WaitUntil will return and set
// the onFunc to nil. If an error other than ErrMsgNotWanted is returned from the onFunc, it will be propogated to the caller.
// A timeout can be provided. If the timeout is non-nil and greater than 0, ErrWaitTimeout is returned if the time is exceeded.
func (p *Pod) WaitUntil(timeout TimeoutFunc, onFunc MsgFunc) error {
p.onFuncLock.Lock()
errChan := make(chan error)
p.setOnFunc(func(msg Message) error {
if err := onFunc(msg); err != nil {
if err == ErrMsgNotWanted {
return nil // don't do anything
}
errChan <- err
} else {
errChan <- nil
}
return nil
})
p.onFuncLock.Unlock() // can't stay locked here or the onFunc will never be called
var onFuncErr error
if timeout == nil {
timeout = Timeout(-1)
}
select {
case err := <-errChan:
onFuncErr = err
case <-timeout():
onFuncErr = ErrWaitTimeout
}
p.onFuncLock.Lock()
defer p.onFuncLock.Unlock()
p.setOnFunc(nil)
return onFuncErr
}
// waitOnReply waits on a reply message to arrive at the pod and then calls onFunc with that message.
// If the onFunc produces an error, it will be propogated to the caller.
// If a non-nil timeout greater than 0 is passed, the function will return ErrWaitTimeout if the timeout elapses.
func (p *Pod) waitOnReply(ticket *MsgReceipt, timeout TimeoutFunc, onFunc MsgFunc) error {
var reply Message
if err := p.WaitUntil(timeout, func(msg Message) error {
if msg.ReplyTo() != ticket.UUID {
return ErrMsgNotWanted
}
reply = msg
return nil
}); err != nil {
return err
}
return onFunc(reply)
}
// setOnFunc sets the OnFunc. THIS DOES NOT LOCK. THE CALLER MUST LOCK.
func (p *Pod) setOnFunc(on MsgFunc) {
// reset the message filter when the onFunc is changed
p.messageFilter = newMessageFilter()
p.onFunc = on
// request replay from the bus if needed
if on != nil {
p.opts.replayOnce.Do(func() {
if p.opts.WantsReplay {
p.feedbackChan <- podFeedbackMsgReplay
}
})
}
}
// busChans returns the messageChan and feedbackChan to be used by the bus
func (p *Pod) busChans() (MsgChan, MsgChan) {
return p.messageChan, p.feedbackChan
}
func (p *Pod) start() {
go func() {
// this loop ends when the bus closes the messageChan
for {
msg, ok := <-p.messageChan
if !ok {
break
}
go func() {
p.onFuncLock.RLock() // in case the onFunc gets replaced
defer p.onFuncLock.RUnlock()
if p.onFunc == nil {
return
}
if p.allow(msg) {
if err := p.onFunc(msg); err != nil {
// if the onFunc failed, send it back to the bus to be re-sent later
p.feedbackChan <- msg
} else {
// if it was successful, a success message on the channel lets the conn know all is well
p.feedbackChan <- podFeedbackMsgSuccess
}
}
}()
}
// if we've gotten this far, it means the pod has been killed and should not be allowed to send
p.dead.Store(true)
}()
}

View File

@@ -1,260 +0,0 @@
package grav
import (
"errors"
"sync"
"sync/atomic"
)
const (
highWaterMark = 64
)
var (
errFailedMessage = errors.New("pod reports failed message")
errFailedMessageMax = errors.New("pod reports max number of failed messages, will terminate connection")
)
// connectionPool is a ring of connections to pods
// which will be iterated over constantly in order to send
// incoming messages to them
type connectionPool struct {
current *podConnection
maxID int64
lock sync.Mutex
}
func newConnectionPool() *connectionPool {
p := &connectionPool{
current: nil,
maxID: 0,
lock: sync.Mutex{},
}
return p
}
// insert inserts a new connection into the ring
func (c *connectionPool) insert(pod *Pod) {
c.lock.Lock()
defer c.lock.Unlock()
c.maxID++
id := c.maxID
conn := newPodConnection(id, pod)
// if there's nothing in the ring, create a "ring of one"
if c.current == nil {
conn.next = conn
c.current = conn
} else {
c.current.insertAfter(conn)
}
}
// peek returns a peek at the next connection in the ring wihout advancing the ring's current location
func (c *connectionPool) peek() *podConnection {
c.lock.Lock()
defer c.lock.Unlock()
return c.current.next
}
// next returns the next connection in the ring
func (c *connectionPool) next() *podConnection {
c.lock.Lock()
defer c.lock.Unlock()
c.current = c.current.next
return c.current
}
// prepareNext ensures that the next pod connection in the ring is ready to recieve
// new messages by checking its status, deleting it if unhealthy or disconnected, replaying the message
// buffer if needed, or flushing failed messages back onto its channel if needeed.
func (c *connectionPool) prepareNext(buffer *MsgBuffer) error {
// peek gives us the next conn without advancing the ring
// this makes it easy to delete the next conn if it's unhealthy
next := c.peek()
// check the state of the next connection
status := next.checkStatus()
if status.Error != nil {
// if the connection has an issue, handle it
if status.Error == errFailedMessageMax {
c.deleteNext()
return errors.New("removing next podConnection")
}
} else if status.WantsDisconnect {
// if the pod has requested disconnection, grant its wish
c.deleteNext()
return errors.New("next pod requested disconnection, removing podConnection")
} else if status.WantsReplay {
// if the pod has indicated that it wants a replay of recent messages, do so
c.replayNext(buffer)
}
if status.HadSuccess {
// if the most recent status check indicates there was a success,
// then tell the connection to flush any failed messages
// this is a no-op if there are no failed messages queued
next.flushFailed()
}
return nil
}
// replayNext replays the current message buffer into the next connection
func (c *connectionPool) replayNext(buffer *MsgBuffer) {
next := c.peek()
// iterate over the buffer and send each message to the pod
buffer.Iter(func(msg Message) error {
next.send(msg)
return nil
})
}
// deleteNext deletes the next connection in the ring
// this is useful after having checkError'd the next conn
// and seeing that it's unhealthy
func (c *connectionPool) deleteNext() {
c.lock.Lock()
defer c.lock.Unlock()
next := c.current.next
// indicate the conn is dead so future attempts to send are abandonded
next.dead.Store(true)
// close the messageChan so the pod can know it's been cut off
close(next.messageChan)
if next == c.current {
// if there's only one thing in the ring, empty the ring
c.current = nil
} else {
// cut out `next` and link `current` to `next-next`
c.current.next = next.next
}
}
// podConnection is a connection to a pod via its messageChan
// podConnection is also a circular linked list/ring of connections
// that is meant to be iterated around and inserted into/removed from
// forever as the bus sends events to the registered pods
type podConnection struct {
ID int64
next *podConnection
messageChan MsgChan
feedbackChan MsgChan
failed []Message
dead *atomic.Value
}
// connStatus is used to communicate the status of a podConnection back to the bus
type connStatus struct {
HadSuccess bool
WantsReplay bool
WantsDisconnect bool
Error error
}
func newPodConnection(id int64, pod *Pod) *podConnection {
msgChan, feedbackChan := pod.busChans()
p := &podConnection{
ID: id,
messageChan: msgChan,
feedbackChan: feedbackChan,
failed: []Message{},
dead: &atomic.Value{},
next: nil,
}
p.dead.Store(false)
return p
}
// send asynchronously writes a message to a connection's messageChan
// ordering to the messageChan if it becomes full is not guaranteed, this
// is sacrificed to ensure that the bus does not block because of a delinquient pod
func (p *podConnection) send(msg Message) {
go func() {
// if the conn is dead, abandon the attempt
if p.dead.Load().(bool) == true {
return
}
p.messageChan <- msg
}()
}
// checkStatus checks the pod's feedback for any information or failed messages and drains the failures into the failed Message buffer
func (p *podConnection) checkStatus() *connStatus {
status := &connStatus{
HadSuccess: false,
WantsReplay: false,
WantsDisconnect: false,
Error: nil,
}
done := false
for !done {
select {
case feedbackMsg := <-p.feedbackChan:
if feedbackMsg == podFeedbackMsgSuccess {
status.HadSuccess = true
} else if feedbackMsg == podFeedbackMsgReplay {
status.WantsReplay = true
} else if feedbackMsg == podFeedbackMsgDisconnect {
status.WantsDisconnect = true
} else {
p.failed = append(p.failed, feedbackMsg)
status.Error = errFailedMessage
}
default:
done = true
}
}
if len(p.failed) >= highWaterMark {
status.Error = errFailedMessageMax
}
return status
}
// flushFailed takes all of the failed messages in the failed queue
// and pushes them back out onto the pod's channel
func (p *podConnection) flushFailed() {
for i := range p.failed {
failedMsg := p.failed[i]
p.send(failedMsg)
}
if len(p.failed) > 0 {
p.failed = []Message{}
}
}
// insertAfter inserts a new connection into the ring
func (p *podConnection) insertAfter(conn *podConnection) {
next := p
if p.next != nil {
next = p.next
}
p.next = conn
conn.next = next
}

View File

@@ -1,44 +0,0 @@
package grav
import "github.com/pkg/errors"
// ErrNoReceipt is returned when a method is called on a nil ticket
var ErrNoReceipt = errors.New("message receipt is nil")
// MsgReceipt represents a "ticket" that references a message that was sent with the hopes of getting a response
// The embedded pod is a pointer to the pod that sent the original message, and therefore any ticket methods used
// will replace the OnFunc of the pod.
type MsgReceipt struct {
UUID string
pod *Pod
}
// WaitOn will block until a response to the message is recieved and passes it to the provided onFunc.
// onFunc errors are propogated to the caller.
func (m *MsgReceipt) WaitOn(onFunc MsgFunc) error {
return m.WaitUntil(nil, onFunc)
}
// WaitUntil will block until a response to the message is recieved and passes it to the provided onFunc.
// ErrWaitTimeout is returned if the timeout elapses, onFunc errors are propogated to the caller.
func (m *MsgReceipt) WaitUntil(timeout TimeoutFunc, onFunc MsgFunc) error {
if m == nil {
return ErrNoReceipt
}
return m.pod.waitOnReply(m, timeout, onFunc)
}
// OnReply will set the pod's OnFunc to the provided MsgFunc and set it to run asynchronously when a reply is received
// onFunc errors are discarded.
func (m *MsgReceipt) OnReply(mfn MsgFunc) error {
if m == nil {
return ErrNoReceipt
}
go func() {
m.pod.waitOnReply(m, nil, mfn)
}()
return nil
}

View File

@@ -1,28 +0,0 @@
package grav
import "time"
// TimeoutFunc is a function that takes a value (a number of seconds) and returns a channel that fires after that given amount of time
type TimeoutFunc func() chan time.Time
// Timeout returns a function that returns a channel that fires after the provided number of seconds have elapsed
// if the value passed is less than or equal to 0, the timeout will never fire
func Timeout(seconds int) TimeoutFunc {
return func() chan time.Time {
tChan := make(chan time.Time)
if seconds > 0 {
go func() {
duration := time.Second * time.Duration(seconds)
tChan <- <-time.After(duration)
}()
}
return tChan
}
}
// TO is a shorthand for Timeout
func TO(seconds int) TimeoutFunc {
return Timeout(seconds)
}

View File

@@ -1,92 +0,0 @@
package grav
import (
"github.com/pkg/errors"
"github.com/suborbital/vektor/vlog"
)
// TransportMsgTypeHandshake and others represent internal Transport message types used for handshakes and metadata transfer
const (
TransportMsgTypeHandshake = 1
TransportMsgTypeUser = 2
)
// ErrConnectionClosed and others are transport and connection related errors
var (
ErrConnectionClosed = errors.New("connection was closed")
ErrNodeUUIDMismatch = errors.New("handshake UUID did not match node UUID")
ErrNotBridgeTransport = errors.New("transport is not a bridge")
ErrBridgeOnlyTransport = errors.New("transport only supports bridge connection")
)
var (
TransportTypeMesh = TransportType("transport.mesh")
TransportTypeBridge = TransportType("transport.bridge")
)
type (
// ReceiveFunc is a function that allows passing along a received message
ReceiveFunc func(msg Message)
// ConnectFunc is a function that provides a new Connection
ConnectFunc func(Connection)
// FindFunc allows a Transport to query Grav for an active connection for the given UUID
FindFunc func(uuid string) (Connection, bool)
// TransportType defines the type of Transport (mesh or bridge)
TransportType string
)
// TransportOpts is a set of options for transports
type TransportOpts struct {
NodeUUID string
Port string
URI string
Logger *vlog.Logger
Custom interface{}
}
// Transport represents a Grav transport plugin
type Transport interface {
// Type returns the transport's type (mesh or bridge)
Type() TransportType
// Setup is a transport-specific function that allows bootstrapping
// Setup can block forever if needed; for example if a webserver is bring run
Setup(opts *TransportOpts, connFunc ConnectFunc, findFunc FindFunc) error
// CreateConnection connects to an endpoint and returns the Connection
CreateConnection(endpoint string) (Connection, error)
// ConnectBridgeTopic connects to a topic and returns a TopicConnection
ConnectBridgeTopic(topic string) (TopicConnection, error)
}
// Connection represents a connection to another node
type Connection interface {
// Called when the connection handshake is complete and the connection can actively start exchanging messages
Start(recvFunc ReceiveFunc)
// Send a message from the local instance to the connected node
Send(msg Message) error
// CanReplace returns true if the connection can be replaced (i.e. is not a persistent connection like a websocket)
CanReplace() bool
// Initiate a handshake for an outgoing connection and return the remote Ack
DoOutgoingHandshake(handshake *TransportHandshake) (*TransportHandshakeAck, error)
// Wait for an incoming handshake and return the provided Ack to the remote connection
DoIncomingHandshake(handshakeAck *TransportHandshakeAck) (*TransportHandshake, error)
// Close requests that the Connection close itself
Close()
}
// TopicConnection is a connection to something via a bridge such as a topic
type TopicConnection interface {
// Called when the connection can actively start exchanging messages
Start(pod *Pod)
// Close requests that the Connection close itself
Close()
}
// TransportHandshake represents a handshake sent to a node that you're trying to connect to
type TransportHandshake struct {
UUID string `json:"uuid"`
}
// TransportHandshakeAck represents a handshake response
type TransportHandshakeAck struct {
UUID string `json:"uuid"`
}

View File

@@ -1,5 +0,0 @@
# Grav Transport: Websocket
This is a streaming transport plugin for Grav that uses standard websockets.
Handler functions are made available for http.Server. Connections are managed by the `Transport` object.

View File

@@ -1,261 +0,0 @@
package websocket
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
"sync"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
"github.com/suborbital/grav/grav"
"github.com/suborbital/vektor/vlog"
)
var upgrader = websocket.Upgrader{}
// Transport is a transport that connects Grav nodes via standard websockets
type Transport struct {
opts *grav.TransportOpts
log *vlog.Logger
connectionFunc func(grav.Connection)
}
// Conn implements transport.Connection and represents a websocket connection
type Conn struct {
nodeUUID string
log *vlog.Logger
conn *websocket.Conn
cLock sync.Mutex
recvFunc grav.ReceiveFunc
}
// New creates a new websocket transport
func New() *Transport {
t := &Transport{}
return t
}
// Type returns the transport's type
func (t *Transport) Type() grav.TransportType {
return grav.TransportTypeMesh
}
// Setup sets up the transport
func (t *Transport) Setup(opts *grav.TransportOpts, connFunc grav.ConnectFunc, findFunc grav.FindFunc) error {
// independent serving is not yet implemented, use the HTTP handler
t.opts = opts
t.log = opts.Logger
t.connectionFunc = connFunc
return nil
}
// CreateConnection adds a websocket endpoint to emit messages to
func (t *Transport) CreateConnection(endpoint string) (grav.Connection, error) {
if !strings.HasPrefix(endpoint, "ws") {
endpoint = fmt.Sprintf("ws://%s", endpoint)
}
endpointURL, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
c, _, err := websocket.DefaultDialer.Dial(endpointURL.String(), nil)
if err != nil {
return nil, errors.Wrapf(err, "[transport-websocket] failed to Dial endpoint")
}
conn := &Conn{
log: t.log,
conn: c,
cLock: sync.Mutex{},
}
return conn, nil
}
// ConnectBridgeTopic connects to a topic if the transport is a bridge
func (t *Transport) ConnectBridgeTopic(topic string) (grav.TopicConnection, error) {
return nil, grav.ErrNotBridgeTransport
}
// HTTPHandlerFunc returns an http.HandlerFunc for incoming connections
func (t *Transport) HTTPHandlerFunc() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if t.connectionFunc == nil {
t.log.ErrorString("[transport-websocket] incoming connection received, but no connFunc configured")
w.WriteHeader(http.StatusInternalServerError)
return
}
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
t.log.Error(errors.Wrap(err, "[transport-websocket] failed to upgrade connection"))
return
}
t.log.Debug("[transport-websocket] upgraded connection:", r.URL.String())
conn := &Conn{
conn: c,
log: t.log,
}
t.connectionFunc(conn)
}
}
// Start begins the receiving of messages
func (c *Conn) Start(recvFunc grav.ReceiveFunc) {
c.recvFunc = recvFunc
c.conn.SetCloseHandler(func(code int, text string) error {
c.log.Warn(fmt.Sprintf("[transport-websocket] connection closing with code: %d", code))
return nil
})
go func() {
for {
_, message, err := c.conn.ReadMessage()
if err != nil {
c.log.Error(errors.Wrap(err, "[transport-websocket] failed to ReadMessage, terminating connection"))
break
}
c.log.Debug("[transport-websocket] recieved message via", c.nodeUUID)
msg, err := grav.MsgFromBytes(message)
if err != nil {
c.log.Error(errors.Wrap(err, "[transport-websocket] failed to MsgFromBytes"))
continue
}
// send to the Grav instance
c.recvFunc(msg)
}
}()
}
// Send sends a message to the connection
func (c *Conn) Send(msg grav.Message) error {
msgBytes, err := msg.Marshal()
if err != nil {
// not exactly sure what to do here (we don't want this going into the dead letter queue)
c.log.Error(errors.Wrap(err, "[transport-websocket] failed to Marshal message"))
return nil
}
c.log.Debug("[transport-websocket] sending message to connection", c.nodeUUID)
if err := c.WriteMessage(grav.TransportMsgTypeUser, msgBytes); err != nil {
if errors.Is(err, websocket.ErrCloseSent) {
return grav.ErrConnectionClosed
}
return errors.Wrap(err, "[transport-websocket] failed to WriteMessage")
}
c.log.Debug("[transport-websocket] sent message to connection", c.nodeUUID)
return nil
}
// CanReplace returns true if the connection can be replaced
func (c *Conn) CanReplace() bool {
return false
}
// DoOutgoingHandshake performs a connection handshake and returns the UUID of the node that we're connected to
// so that it can be validated against the UUID that was provided in discovery (or if none was provided)
func (c *Conn) DoOutgoingHandshake(handshake *grav.TransportHandshake) (*grav.TransportHandshakeAck, error) {
handshakeJSON, err := json.Marshal(handshake)
if err != nil {
return nil, errors.Wrap(err, "failed to Marshal handshake JSON")
}
c.log.Debug("[transport-websocket] sending handshake")
if err := c.WriteMessage(grav.TransportMsgTypeHandshake, handshakeJSON); err != nil {
return nil, errors.Wrap(err, "failed to WriteMessage handshake")
}
mt, message, err := c.conn.ReadMessage()
if err != nil {
return nil, errors.Wrap(err, "failed to ReadMessage for handshake ack, terminating connection")
}
if mt != grav.TransportMsgTypeHandshake {
return nil, errors.New("first message recieved was not handshake ack")
}
c.log.Debug("[transport-websocket] recieved handshake ack")
ack := grav.TransportHandshakeAck{}
if err := json.Unmarshal(message, &ack); err != nil {
return nil, errors.Wrap(err, "failed to Unmarshal handshake ack")
}
c.nodeUUID = ack.UUID
return &ack, nil
}
// DoIncomingHandshake performs a connection handshake and returns the UUID of the node that we're connected to
// so that it can be validated against the UUID that was provided in discovery (or if none was provided)
func (c *Conn) DoIncomingHandshake(handshakeAck *grav.TransportHandshakeAck) (*grav.TransportHandshake, error) {
mt, message, err := c.conn.ReadMessage()
if err != nil {
return nil, errors.Wrap(err, "failed to ReadMessage for handshake, terminating connection")
}
if mt != grav.TransportMsgTypeHandshake {
return nil, errors.New("first message recieved was not handshake")
}
c.log.Debug("[transport-websocket] recieved handshake")
handshake := grav.TransportHandshake{}
if err := json.Unmarshal(message, &handshake); err != nil {
return nil, errors.Wrap(err, "failed to Unmarshal handshake")
}
ackJSON, err := json.Marshal(handshakeAck)
if err != nil {
return nil, errors.Wrap(err, "failed to Marshal handshake JSON")
}
c.log.Debug("[transport-websocket] sending handshake ack")
if err := c.WriteMessage(grav.TransportMsgTypeHandshake, ackJSON); err != nil {
return nil, errors.Wrap(err, "failed to WriteMessage handshake ack")
}
c.log.Debug("[transport-websocket] sent handshake ack")
c.nodeUUID = handshake.UUID
return &handshake, nil
}
// Close closes the underlying connection
func (c *Conn) Close() {
c.log.Debug("[transport-websocket] connection for", c.nodeUUID, "is closing")
c.conn.Close()
}
// WriteMessage is a concurrent-safe wrapper around the websocket WriteMessage
func (c *Conn) WriteMessage(messageType int, data []byte) error {
c.cLock.Lock()
defer c.cLock.Unlock()
return c.conn.WriteMessage(messageType, data)
}

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,14 +0,0 @@
# vektor API
`vk` is the vektor component that allows for easy development of API servers in Go.
Features:
- HTTPS by default using LetsEncrypt
- Easy configuration of CORS
- Built in logging
- Authentication plug-in point
- Fast HTTP router built in
Planned:
- Rate limiter

View File

@@ -1,76 +0,0 @@
package vk
import (
"context"
"net/http"
"github.com/google/uuid"
"github.com/julienschmidt/httprouter"
"github.com/suborbital/vektor/vlog"
)
// ctxKey is a type to represent a key in the Ctx context.
type ctxKey string
// Ctx serves a similar purpose to context.Context, but has some typed fields
type Ctx struct {
Context context.Context
Log *vlog.Logger
Params httprouter.Params
RespHeaders http.Header
requestID string
scope interface{}
}
// NewCtx creates a new Ctx
func NewCtx(log *vlog.Logger, params httprouter.Params, headers http.Header) *Ctx {
ctx := &Ctx{
Context: context.Background(),
Log: log,
Params: params,
RespHeaders: headers,
}
return ctx
}
// Set sets a value on the Ctx's embedded Context (a la key/value store)
func (c *Ctx) Set(key string, val interface{}) {
realKey := ctxKey(key)
c.Context = context.WithValue(c.Context, realKey, val)
}
// Get gets a value from the Ctx's embedded Context (a la key/value store)
func (c *Ctx) Get(key string) interface{} {
realKey := ctxKey(key)
val := c.Context.Value(realKey)
return val
}
// UseScope sets an object to be the scope of the request, including setting the logger's scope
// the scope can be retrieved later with the Scope() method
func (c *Ctx) UseScope(scope interface{}) {
c.Log = c.Log.CreateScoped(scope)
c.scope = scope
}
// Scope retrieves the context's scope
func (c *Ctx) Scope() interface{} {
return c.scope
}
// UseRequestID is a setter for the request ID
func (c *Ctx) UseRequestID(id string) {
c.requestID = id
}
// RequestID returns the request ID of the current request, generating one if none exists.
func (c *Ctx) RequestID() string {
if c.requestID == "" {
c.requestID = uuid.New().String()
}
return c.requestID
}

View File

@@ -1,91 +0,0 @@
package vk
import (
"encoding/json"
"fmt"
"github.com/suborbital/vektor/vlog"
)
// Error is an interface representing a failed request
type Error interface {
Error() string // this ensures all Errors will also conform to the normal error interface
Message() string
Status() int
}
// ErrorResponse is a concrete implementation of Error,
// representing a failed HTTP request
type ErrorResponse struct {
StatusCode int `json:"status"`
MessageText string `json:"message"`
}
// Error returns a full error string
func (e *ErrorResponse) Error() string {
return fmt.Sprintf("%d: %s", e.StatusCode, e.MessageText)
}
// Status returns the error status code
func (e *ErrorResponse) Status() int {
return e.StatusCode
}
// Message returns the error's message
func (e *ErrorResponse) Message() string {
return e.MessageText
}
// Err returns an error with status and message
func Err(status int, message string) Error {
e := &ErrorResponse{
StatusCode: status,
MessageText: message,
}
return e
}
// E is Err for those who like terse code
func E(status int, message string) Error {
return Err(status, message)
}
// Wrap wraps an error in vk.Error
func Wrap(status int, err error) Error {
return Err(status, err.Error())
}
var (
genericErrorResponseBytes = []byte("Internal Server Error")
genericErrorResponseCode = 500
)
// converts _something_ into bytes, best it can:
// if data is Error type, returns (status, {status: status, message: message})
// if other error, returns (500, []byte(err.Error()))
func errorOrOtherToBytes(l *vlog.Logger, err error) (int, []byte, contentType) {
statusCode := genericErrorResponseCode
// first, check if it's vk.Error interface type, and unpack it for further processing
if e, ok := err.(Error); ok {
statusCode = e.Status() // grab this in case anything fails
errResp := Err(e.Status(), e.Message()) // create a concrete instance that can be marshalled
errJSON, marshalErr := json.Marshal(errResp)
if marshalErr != nil {
// any failure results in the generic response body being used
l.ErrorString("failed to marshal vk.Error:", marshalErr.Error(), "original error:", err.Error())
return statusCode, genericErrorResponseBytes, contentTypeTextPlain
}
return statusCode, errJSON, contentTypeJSON
}
l.Warn("redacting potential unsafe error response, original error:", err.Error())
return statusCode, genericErrorResponseBytes, contentTypeTextPlain
}

View File

@@ -1,140 +0,0 @@
package vk
import (
"fmt"
"net/http"
"strings"
)
// RouteGroup represents a group of routes
type RouteGroup struct {
prefix string
routes []routeHandler
middleware []Middleware
afterware []Afterware
}
type routeHandler struct {
Method string
Path string
Handler HandlerFunc
}
// Group creates a group of routes with a common prefix and middlewares
func Group(prefix string) *RouteGroup {
rg := &RouteGroup{
prefix: prefix,
routes: []routeHandler{},
middleware: []Middleware{},
afterware: []Afterware{},
}
return rg
}
// GET is a shortcut for server.Handle(http.MethodGet, path, handler)
func (g *RouteGroup) GET(path string, handler HandlerFunc) {
g.addRouteHandler(http.MethodGet, path, handler)
}
// HEAD is a shortcut for server.Handle(http.MethodHead, path, handler)
func (g *RouteGroup) HEAD(path string, handler HandlerFunc) {
g.addRouteHandler(http.MethodHead, path, handler)
}
// OPTIONS is a shortcut for server.Handle(http.MethodOptions, path, handler)
func (g *RouteGroup) OPTIONS(path string, handler HandlerFunc) {
g.addRouteHandler(http.MethodOptions, path, handler)
}
// POST is a shortcut for server.Handle(http.MethodPost, path, handler)
func (g *RouteGroup) POST(path string, handler HandlerFunc) {
g.addRouteHandler(http.MethodPost, path, handler)
}
// PUT is a shortcut for server.Handle(http.MethodPut, path, handler)
func (g *RouteGroup) PUT(path string, handler HandlerFunc) {
g.addRouteHandler(http.MethodPut, path, handler)
}
// PATCH is a shortcut for server.Handle(http.MethodPatch, path, handler)
func (g *RouteGroup) PATCH(path string, handler HandlerFunc) {
g.addRouteHandler(http.MethodPatch, path, handler)
}
// DELETE is a shortcut for server.Handle(http.MethodDelete, path, handler)
func (g *RouteGroup) DELETE(path string, handler HandlerFunc) {
g.addRouteHandler(http.MethodDelete, path, handler)
}
// Handle adds a route to be handled
func (g *RouteGroup) Handle(method, path string, handler HandlerFunc) {
g.addRouteHandler(method, path, handler)
}
// AddGroup adds a group of routes to this group as a subgroup.
// the subgroup's prefix is added to all of the routes it contains,
// with the resulting path being "/group.prefix/subgroup.prefix/route/path/here"
func (g *RouteGroup) AddGroup(group *RouteGroup) {
g.routes = append(g.routes, group.routeHandlers()...)
}
// Before adds middleware to the group, which are applied to every handler in the group (called before the handler)
func (g *RouteGroup) Before(middleware ...Middleware) *RouteGroup {
g.middleware = append(g.middleware, middleware...)
return g
}
// After adds afterware to the group, which are applied to every handler in the group (called after the handler)
func (g *RouteGroup) After(afterware ...Afterware) *RouteGroup {
g.afterware = append(g.afterware, afterware...)
return g
}
// routeHandlers computes the "full" path for each handler, and creates
// a HandlerFunc that chains together the group's middlewares
// before calling the inner HandlerFunc. It can be called 'recursively'
// since groups can be added to groups
func (g *RouteGroup) routeHandlers() []routeHandler {
routes := make([]routeHandler, len(g.routes))
for i, r := range g.routes {
fullPath := fmt.Sprintf("%s%s", ensureLeadingSlash(g.prefix), ensureLeadingSlash(r.Path))
augR := routeHandler{
Method: r.Method,
Path: fullPath,
Handler: augmentHandler(r.Handler, g.middleware, g.afterware),
}
routes[i] = augR
}
return routes
}
func (g *RouteGroup) addRouteHandler(method string, path string, handler HandlerFunc) {
rh := routeHandler{
Method: method,
Path: path,
Handler: handler,
}
g.routes = append(g.routes, rh)
}
func (g *RouteGroup) routePrefix() string {
return g.prefix
}
func ensureLeadingSlash(path string) string {
if path == "" {
// handle the "root group" case
return ""
} else if !strings.HasPrefix(path, "/") {
path = "/" + path
}
return path
}

View File

@@ -1,80 +0,0 @@
package vk
import (
"net/http"
)
// Middleware represents a handler that runs on a request before reaching its handler
type Middleware func(*http.Request, *Ctx) error
// Afterware represents a handler that runs on a request after the handler has dealt with the request
type Afterware func(*http.Request, *Ctx)
// ContentTypeMiddleware allows the content-type to be set
func ContentTypeMiddleware(contentType string) Middleware {
return func(r *http.Request, ctx *Ctx) error {
ctx.RespHeaders.Set(contentTypeHeaderKey, contentType)
return nil
}
}
// CORSMiddleware enables CORS with the given domain for a route
// pass "*" to allow all domains, or empty string to allow none
func CORSMiddleware(domain string) Middleware {
return func(r *http.Request, ctx *Ctx) error {
enableCors(ctx, domain)
return nil
}
}
// CORSHandler enables CORS for a route
// pass "*" to allow all domains, or empty string to allow none
func CORSHandler(domain string) HandlerFunc {
return func(r *http.Request, ctx *Ctx) (interface{}, error) {
enableCors(ctx, domain)
return nil, nil
}
}
func enableCors(ctx *Ctx, domain string) {
if domain != "" {
ctx.RespHeaders.Set("Access-Control-Allow-Origin", domain)
ctx.RespHeaders.Set("X-Requested-With", "XMLHttpRequest")
ctx.RespHeaders.Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, Authorization, cache-control")
}
}
func loggerMiddleware() Middleware {
return func(r *http.Request, ctx *Ctx) error {
ctx.Log.Info(r.Method, r.URL.String())
return nil
}
}
// generate a HandlerFunc that passes the request through a set of Middleware first and Afterware after
func augmentHandler(inner HandlerFunc, middleware []Middleware, afterware []Afterware) HandlerFunc {
return func(r *http.Request, ctx *Ctx) (interface{}, error) {
defer func() {
// run the afterware (which cannot affect the response)
// even if something in the request chain fails
for _, a := range afterware {
a(r, ctx)
}
}()
// run the middleware (which can error to stop progression)
for _, m := range middleware {
if err := m(r, ctx); err != nil {
return nil, err
}
}
resp, err := inner(r, ctx)
return resp, err
}
}

View File

@@ -1,73 +0,0 @@
package vk
import (
"crypto/tls"
"net/http"
"github.com/suborbital/vektor/vlog"
)
// OptionsModifier takes an options struct and returns a modified Options struct
type OptionsModifier func(*Options)
// UseDomain sets the server to use a particular domain for TLS
func UseDomain(domain string) OptionsModifier {
return func(o *Options) {
o.Domain = domain
}
}
// UseTLSConfig sets a TLS config that will be used for HTTPS
// This will take precedence over the Domain option in all cases
func UseTLSConfig(config *tls.Config) OptionsModifier {
return func(o *Options) {
o.TLSConfig = config
}
}
// UseTLSPort sets the HTTPS port to be used:
func UseTLSPort(port int) OptionsModifier {
return func(o *Options) {
o.TLSPort = port
}
}
// UseHTTPPort sets the HTTP port to be used:
// If domain is set, HTTP port will be used for LetsEncrypt challenge server
// If domain is NOT set, this option will put VK in insecure HTTP mode
func UseHTTPPort(port int) OptionsModifier {
return func(o *Options) {
o.HTTPPort = port
}
}
// UseLogger allows a custom logger to be used
func UseLogger(logger *vlog.Logger) OptionsModifier {
return func(o *Options) {
o.Logger = logger
}
}
// UseAppName allows an app name to be set (for vanity only, really....)
func UseAppName(name string) OptionsModifier {
return func(o *Options) {
o.AppName = name
}
}
// UseEnvPrefix uses the provided env prefix (default VK) when looking up other options such as `VK_HTTP_PORT`
func UseEnvPrefix(prefix string) OptionsModifier {
return func(o *Options) {
o.EnvPrefix = prefix
}
}
// UseInspector sets a function that will be allowed to inspect every HTTP request
// before it reaches VK's internal router, but cannot modify said request or affect
// the handling of said request in any way. Use at your own risk, as it may introduce
// performance issues if not used correctly.
func UseInspector(isp func(http.Request)) OptionsModifier {
return func(o *Options) {
o.PreRouterInspector = isp
}
}

View File

@@ -1,95 +0,0 @@
package vk
import (
"context"
"crypto/tls"
"net/http"
"github.com/pkg/errors"
"github.com/sethvargo/go-envconfig"
"github.com/suborbital/vektor/vlog"
)
// Options are the available options for Server
type Options struct {
AppName string `env:"_APP_NAME"`
Domain string `env:"_DOMAIN"`
HTTPPort int `env:"_HTTP_PORT"`
TLSPort int `env:"_TLS_PORT"`
TLSConfig *tls.Config `env:"-"`
EnvPrefix string `env:"-"`
Logger *vlog.Logger `env:"-"`
PreRouterInspector func(http.Request) `env:"-"`
}
func newOptsWithModifiers(mods ...OptionsModifier) *Options {
options := &Options{}
// loop through the provided options and apply the
// modifier function to the options object
for _, mod := range mods {
mod(options)
}
envPrefix := defaultEnvPrefix
if options.EnvPrefix != "" {
envPrefix = options.EnvPrefix
}
options.finalize(envPrefix)
return options
}
// ShouldUseTLS returns true if domain is set and/or TLS is configured
func (o *Options) ShouldUseTLS() bool {
return o.Domain != "" || o.TLSConfig != nil
}
// HTTPPortSet returns true if the HTTP port is set
func (o *Options) HTTPPortSet() bool {
return o.HTTPPort != 0
}
// ShouldUseHTTP returns true if insecure HTTP should be used
func (o *Options) ShouldUseHTTP() bool {
return !o.ShouldUseTLS() && o.HTTPPortSet()
}
// finalize "locks in" the options by overriding any existing options with the version from the environment, and setting the default logger if needed
func (o *Options) finalize(prefix string) {
if o.Logger == nil {
o.Logger = vlog.Default(vlog.EnvPrefix(prefix))
}
// if no inspector was set, create an empty one
if o.PreRouterInspector == nil {
o.PreRouterInspector = func(_ http.Request) {}
}
envOpts := Options{}
if err := envconfig.ProcessWith(context.Background(), &envOpts, envconfig.PrefixLookuper(prefix, envconfig.OsLookuper())); err != nil {
o.Logger.Error(errors.Wrap(err, "[vk] failed to ProcessWith environment config"))
return
}
o.replaceFieldsIfNeeded(&envOpts)
}
func (o *Options) replaceFieldsIfNeeded(replacement *Options) {
if replacement.AppName != "" {
o.AppName = replacement.AppName
}
if replacement.Domain != "" {
o.Domain = replacement.Domain
}
if replacement.HTTPPort != 0 {
o.HTTPPort = replacement.HTTPPort
}
if replacement.TLSPort != 0 {
o.TLSPort = replacement.TLSPort
}
}

Some files were not shown because too many files have changed in this diff Show More