mirror of
https://github.com/1Password/onepassword-operator.git
synced 2025-10-22 07:28:06 +00:00
Compare commits
3 Commits
autorestar
...
inject-web
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a8e6a4a4f1 | ||
![]() |
a5f4a7a0c1 | ||
![]() |
b35c668959 |
23
Makefile
23
Makefile
@@ -11,7 +11,9 @@ versionFile = $(CURDIR)/.VERSION
|
||||
curVersion := $(shell cat $(versionFile) | sed 's/^v//')
|
||||
|
||||
OPERATOR_NAME := onepassword-connect-operator
|
||||
DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
|
||||
OPERATOR_NAME := onepassword-secrets-injector
|
||||
OPERATOR_DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
|
||||
INJECTOR_DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
|
||||
|
||||
test: ## Run test suite
|
||||
go test ./...
|
||||
@@ -20,18 +22,31 @@ test/coverage: ## Run test suite with coverage report
|
||||
go test -v ./... -cover
|
||||
|
||||
build/operator: ## Build operator Docker image
|
||||
@docker build -f operator/Dockerfile --build-arg operator_version=$(curVersion) -t $(DOCKER_IMG_TAG) .
|
||||
@docker build -f operator/Dockerfile --build-arg operator_version=$(curVersion) -t $(OPERATOR_DOCKER_IMG_TAG) .
|
||||
@echo "Successfully built and tagged image."
|
||||
@echo "Tag: $(DOCKER_IMG_TAG)"
|
||||
@echo "Tag: $(OPERATOR_DOCKER_IMG_TAG)"
|
||||
|
||||
build/operator/local: ## Build local version of the operator Docker image
|
||||
@docker build -f operator/Dockerfile -t local/$(DOCKER_IMG_TAG) .
|
||||
@docker build -f operator/Dockerfile -t local/$(OPERATOR_DOCKER_IMG_TAG) .
|
||||
|
||||
build/operator/binary: clean ## Build operator binary
|
||||
@mkdir -p dist
|
||||
@go build -mod vendor -a -o manager ./operator/cmd/manager/main.go
|
||||
@mv manager ./dist
|
||||
|
||||
build/secret-injector: ## Build secret-injector Docker image
|
||||
@docker build -f secret-injector/Dockerfile --build-arg operator_version=$(curVersion) -t $(INJECTOR_DOCKER_IMG_TAG) .
|
||||
@echo "Successfully built and tagged image."
|
||||
@echo "Tag: $(INJECTOR_DOCKER_IMG_TAG)"
|
||||
|
||||
build/secret-injector/local: ## Build local version of the secret-injector Docker image
|
||||
@docker build -f secret-injector/Dockerfile -t local/$(INJECTOR_DOCKER_IMG_TAG) .
|
||||
|
||||
build/secret-injector/binary: clean ## Build secret-injector binary
|
||||
@mkdir -p dist
|
||||
@go build -mod vendor -a -o manager ./secret-injector/cmd/manager/main.go
|
||||
@mv manager ./dist
|
||||
|
||||
clean:
|
||||
rm -rf ./dist
|
||||
|
||||
|
@@ -12,6 +12,10 @@ The 1Password Connect Kubernetes Operator will continually check for updates fro
|
||||
|
||||
[Click here for more details on the 1Password Kubernetes Operator](operator/README.md)
|
||||
|
||||
## 1Password Secret Injector
|
||||
|
||||
[Click here for more details on the 1Password Secret Injector](secret-injector/README.md)
|
||||
|
||||
|
||||
# Security
|
||||
|
||||
|
4
go.mod
4
go.mod
@@ -4,14 +4,18 @@ go 1.13
|
||||
|
||||
require (
|
||||
github.com/1Password/connect-sdk-go v1.0.1
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/operator-framework/operator-sdk v0.19.0
|
||||
github.com/prometheus/common v0.14.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/suborbital/grav v0.4.1
|
||||
github.com/suborbital/vektor v0.5.0
|
||||
k8s.io/api v0.18.2
|
||||
k8s.io/apimachinery v0.18.2
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
k8s.io/kubectl v0.18.2
|
||||
k8s.io/kubernetes v1.13.0
|
||||
sigs.k8s.io/controller-runtime v0.6.0
|
||||
)
|
||||
|
||||
|
70
go.sum
70
go.sum
@@ -68,6 +68,7 @@ github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHS
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
||||
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
|
||||
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
@@ -158,9 +159,11 @@ github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqh
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
|
||||
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
|
||||
github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb h1:nXPkFq8X1a9ycY3GYQpFNxHh3j2JgY7zDZfq2EXMIzk=
|
||||
github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
@@ -220,16 +223,19 @@ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyG
|
||||
github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce h1:KXS1Jg+ddGcWA8e1N7cupxaHHZhit5rB9tfDU+mfjyY=
|
||||
github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
@@ -370,6 +376,7 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
|
||||
@@ -407,6 +414,7 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
@@ -429,6 +437,8 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
@@ -454,6 +464,8 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
@@ -554,14 +566,17 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -620,6 +635,7 @@ github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N
|
||||
github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU=
|
||||
github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0=
|
||||
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
@@ -654,10 +670,16 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
|
||||
github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
github.com/nats-io/nats-server/v2 v2.3.2/go.mod h1:dUf7Cm5z5LbciFVwWx54owyCKm8x4/hL6p7rrljhLFY=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
|
||||
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
@@ -692,11 +714,14 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||
@@ -823,9 +848,14 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0
|
||||
github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4=
|
||||
github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/schollz/peerdiscovery v1.6.1 h1:2V/Dw+GZY18W6e3yAeUzJouHmIcr9UlWtqsQtpfObGw=
|
||||
github.com/schollz/peerdiscovery v1.6.1/go.mod h1:bq5/NB9o9/jyEwiW4ubehfToBa2LwdQQMoNiy/vSdYg=
|
||||
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sethvargo/go-envconfig v0.3.0/go.mod h1:XZ2JRR7vhlBEO5zMmOpLgUhgYltqYqq4d4tKagtPUv0=
|
||||
github.com/sethvargo/go-envconfig v0.3.2 h1:277Lb2iTpUZjUZu1qLoLa/aetwvtZbKh8wNWXmc6dSk=
|
||||
github.com/sethvargo/go-envconfig v0.3.2/go.mod h1:XZ2JRR7vhlBEO5zMmOpLgUhgYltqYqq4d4tKagtPUv0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
@@ -838,6 +868,7 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
@@ -885,6 +916,11 @@ github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/suborbital/grav v0.4.1 h1:g70gG4EVqNcy5LMII05ayLtxMD8v5M9kBW1BcJFYsC0=
|
||||
github.com/suborbital/grav v0.4.1/go.mod h1:jN+zB9O6ztW2GqruEU46EMOFjvc8K2UDLyofFJWdI8o=
|
||||
github.com/suborbital/vektor v0.2.2/go.mod h1:6YQE7r6t1JcVs3twpqjXDftsLUaTNUk5YorRKHcDamI=
|
||||
github.com/suborbital/vektor v0.5.0 h1:E5PPiBYboarFoprUmjjG/ieVCeIUpD/1F2MnVa/iaDs=
|
||||
github.com/suborbital/vektor v0.5.0/go.mod h1:iNMR6/alEK1D7fbupwIlGlK5LajngEvq/N+RGVWaqNw=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc=
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
@@ -986,10 +1022,16 @@ golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8=
|
||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1057,6 +1099,11 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3ob
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c=
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1080,6 +1127,7 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1126,6 +1174,14 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1133,12 +1189,19 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
||||
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -1229,6 +1292,7 @@ google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBr
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 h1:wDju+RU97qa0FZT0QnZDg9Uc2dH0Ql513kFvHocz+WM=
|
||||
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
@@ -1297,6 +1361,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
helm.sh/helm/v3 v3.2.4/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0=
|
||||
@@ -1316,6 +1381,7 @@ k8s.io/api v0.0.0-20191122220107-b5267f2975e0/go.mod h1:vYpRfxYkMrmPPSesoHEkGNHx
|
||||
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
|
||||
k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
|
||||
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
|
||||
k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY=
|
||||
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
|
||||
k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8=
|
||||
@@ -1331,7 +1397,9 @@ k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftc
|
||||
k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
|
||||
k8s.io/apiserver v0.0.0-20191122221311-9d521947b1e1/go.mod h1:RbsZY5zzBIWnz4KbctZsTVjwIuOpTp4Z8oCgFHN4kZQ=
|
||||
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
|
||||
k8s.io/apiserver v0.18.2 h1:fwKxdTWwwYhxvtjo0UUfX+/fsitsNtfErPNegH2x9ic=
|
||||
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
|
||||
k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4=
|
||||
k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA=
|
||||
k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ=
|
||||
k8s.io/cli-runtime v0.18.2/go.mod h1:yfFR2sQQzDsV0VEKGZtrJwEy4hLZ2oj4ZIfodgxAHWQ=
|
||||
@@ -1343,6 +1411,7 @@ k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRV
|
||||
k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA=
|
||||
k8s.io/component-base v0.0.0-20191122220729-2684fb322cb9/go.mod h1:NFuUusy/X4Tk21m21tcNUihnmp4OI7lXU7/xA+rYXkc=
|
||||
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
|
||||
k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y=
|
||||
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
@@ -1367,6 +1436,7 @@ k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+
|
||||
k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU=
|
||||
k8s.io/kubectl v0.18.2 h1:9jnGSOC2DDVZmMUTMi0D1aed438mfQcgqa5TAzVjA1k=
|
||||
k8s.io/kubectl v0.18.2/go.mod h1:OdgFa3AlsPKRpFFYE7ICTwulXOcMGXHTc+UKhHKvrb4=
|
||||
k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4=
|
||||
k8s.io/metrics v0.18.2/go.mod h1:qga8E7QfYNR9Q89cSCAjinC9pTZ7yv1XSVGUB0vJypg=
|
||||
|
@@ -13,6 +13,11 @@ import (
|
||||
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/controller"
|
||||
op "github.com/1Password/onepassword-operator/operator/pkg/onepassword"
|
||||
"github.com/1Password/onepassword-operator/operator/pkg/onepassword/message"
|
||||
"github.com/suborbital/grav/discovery/local"
|
||||
"github.com/suborbital/grav/grav"
|
||||
"github.com/suborbital/grav/transport/websocket"
|
||||
"github.com/suborbital/vektor/vlog"
|
||||
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
|
||||
@@ -40,6 +45,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
)
|
||||
|
||||
const envHostVariable = "OP_CONNECT_HOST"
|
||||
const envPollingIntervalVariable = "POLLING_INTERVAL"
|
||||
const manageConnect = "MANAGE_CONNECT"
|
||||
const restartDeploymentsEnvVariable = "AUTO_RESTART"
|
||||
@@ -167,9 +173,15 @@ func main() {
|
||||
// Add the Metrics Service
|
||||
addMetrics(ctx, cfg)
|
||||
|
||||
// Setup update secrets task
|
||||
updatedSecretsPoller := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
|
||||
_, connectSet := os.LookupEnv(envHostVariable)
|
||||
done := make(chan bool)
|
||||
updateSecretsHandler := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
|
||||
// Setup update secrets task
|
||||
if connectSet {
|
||||
consumeConnectEvents(*updateSecretsHandler)
|
||||
} else {
|
||||
// If not using connect then we will use polling to get secret updates
|
||||
// TODO implement 1Password events-api
|
||||
ticker := time.NewTicker(getPollingIntervalForUpdatingSecrets())
|
||||
go func() {
|
||||
for {
|
||||
@@ -178,10 +190,11 @@ func main() {
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
updatedSecretsPoller.UpdateKubernetesSecretsTask()
|
||||
updateSecretsHandler.UpdateKubernetesSecretsTask("", "")
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Start the Cmd
|
||||
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
|
||||
@@ -300,3 +313,43 @@ func shouldAutoRestartDeployments() bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func consumeConnectEvents(updateSecretsHandler op.SecretUpdateHandler) {
|
||||
log.Info(fmt.Sprintf("Operator Version: %s", version.Version))
|
||||
log.Info("Testing stuff")
|
||||
logger := vlog.Default(vlog.Level(vlog.LogLevelDebug))
|
||||
gwss := websocket.New()
|
||||
locald := local.New()
|
||||
|
||||
port := "42829"
|
||||
if port, err := strconv.Atoi(os.Getenv("OP_BUS_PORT")); err == nil {
|
||||
port = port
|
||||
}
|
||||
|
||||
g := grav.New(
|
||||
grav.UseLogger(logger),
|
||||
grav.UseEndpoint(port, "http://onepassword-connect/meta/message"),
|
||||
grav.UseTransport(gwss),
|
||||
grav.UseDiscovery(locald),
|
||||
)
|
||||
|
||||
pod := g.Connect()
|
||||
pod.OnType(message.TypeItemUpdate, ItemUpdate(updateSecretsHandler))
|
||||
}
|
||||
|
||||
// B5ItemUsage Grav message handler for activity.event messages. On READ
|
||||
// events an update will be sent to the b5 api
|
||||
func ItemUpdate(updateSecretsHandler op.SecretUpdateHandler) grav.MsgFunc {
|
||||
return func(msg grav.Message) error {
|
||||
e := message.ItemUpdateEvent{}
|
||||
if err := msg.UnmarshalData(&e); err != nil {
|
||||
log.Error(err, "failed to UnmarshalData into Event")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("Detected update for item %s at vault %s", e.ItemId, e.VaultId))
|
||||
updateSecretsHandler.UpdateKubernetesSecretsTask("", "")
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
27
operator/pkg/onepassword/message/item_update.go
Normal file
27
operator/pkg/onepassword/message/item_update.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package message
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// TypeItemUpdate and others are sync message types
|
||||
const (
|
||||
TypeItemUpdate = "item.update"
|
||||
)
|
||||
|
||||
// ItemUpdateEvent is the data for a sync status message
|
||||
type ItemUpdateEvent struct {
|
||||
VaultId string `json:"vaultId"`
|
||||
ItemId string `json:"itemId"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// Type returns a the syns status data type
|
||||
func (s *ItemUpdateEvent) Type() string {
|
||||
return TypeItemUpdate
|
||||
}
|
||||
|
||||
// Bytes returns Bytes
|
||||
func (s *ItemUpdateEvent) Bytes() []byte {
|
||||
bytes, _ := json.Marshal(s)
|
||||
|
||||
return bytes
|
||||
}
|
@@ -36,13 +36,13 @@ type SecretUpdateHandler struct {
|
||||
shouldAutoRestartDeploymentsGlobal bool
|
||||
}
|
||||
|
||||
func (h *SecretUpdateHandler) UpdateKubernetesSecretsTask() error {
|
||||
updatedKubernetesSecrets, err := h.updateKubernetesSecrets()
|
||||
func (h *SecretUpdateHandler) UpdateKubernetesSecretsTask(vaultId, itemId string) error {
|
||||
updatedKubernetesSecrets, err := h.updateKubernetesSecrets(vaultId, itemId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updatedInjectedSecrets, err := h.updateInjectedSecrets()
|
||||
updatedInjectedSecrets, err := h.updateInjectedSecrets(vaultId, itemId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -113,7 +113,7 @@ func (h *SecretUpdateHandler) restartDeployment(deployment *appsv1.Deployment) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *SecretUpdateHandler) updateKubernetesSecrets() (map[string]map[string]*corev1.Secret, error) {
|
||||
func (h *SecretUpdateHandler) updateKubernetesSecrets(vaultId, itemId string) (map[string]map[string]*corev1.Secret, error) {
|
||||
secrets := &corev1.SecretList{}
|
||||
err := h.client.List(context.Background(), secrets)
|
||||
if err != nil {
|
||||
@@ -126,11 +126,16 @@ func (h *SecretUpdateHandler) updateKubernetesSecrets() (map[string]map[string]*
|
||||
secret := secrets.Items[i]
|
||||
|
||||
itemPath := secret.Annotations[ItemPathAnnotation]
|
||||
|
||||
currentVersion := secret.Annotations[VersionAnnotation]
|
||||
if len(itemPath) == 0 || len(currentVersion) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if vaultId != "" && itemId != "" && itemPath != fmt.Sprintf("vaults/%s/items%s", vaultId, itemId) {
|
||||
continue
|
||||
}
|
||||
|
||||
item, err := GetOnePasswordItemByPath(h.opConnectClient, secret.Annotations[ItemPathAnnotation])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to retrieve item: %v", err)
|
||||
@@ -157,7 +162,7 @@ func (h *SecretUpdateHandler) updateKubernetesSecrets() (map[string]map[string]*
|
||||
return updatedSecrets, nil
|
||||
}
|
||||
|
||||
func (h *SecretUpdateHandler) updateInjectedSecrets() (map[string]map[string]*onepasswordv1.OnePasswordItem, error) {
|
||||
func (h *SecretUpdateHandler) updateInjectedSecrets(vaultId, itemId string) (map[string]map[string]*onepasswordv1.OnePasswordItem, error) {
|
||||
// fetch all onepassworditems
|
||||
onepasswordItems := &onepasswordv1.OnePasswordItemList{}
|
||||
err := h.client.List(context.Background(), onepasswordItems)
|
||||
@@ -180,6 +185,9 @@ func (h *SecretUpdateHandler) updateInjectedSecrets() (map[string]map[string]*on
|
||||
if len(itemPath) == 0 || len(currentVersion) == 0 {
|
||||
continue
|
||||
}
|
||||
if vaultId != "" && itemId != "" && itemPath != fmt.Sprintf("vaults/%s/items%s", vaultId, itemId) {
|
||||
continue
|
||||
}
|
||||
|
||||
storedItem, err := GetOnePasswordItemByPath(h.opConnectClient, itemPath)
|
||||
if err != nil {
|
||||
|
@@ -918,7 +918,7 @@ func TestUpdateSecretHandler(t *testing.T) {
|
||||
shouldAutoRestartDeploymentsGlobal: testData.globalAutoRestartEnabled,
|
||||
}
|
||||
|
||||
err := h.UpdateKubernetesSecretsTask()
|
||||
err := h.UpdateKubernetesSecretsTask("", "")
|
||||
|
||||
assert.Equal(t, testData.expectedError, err)
|
||||
|
||||
|
30
secret-injector/Dockerfile
Normal file
30
secret-injector/Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.13 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
# Copy the go source
|
||||
COPY secret-injector/cmd/main.go secret-injector/main.go
|
||||
COPY secret-injector/pkg/ secret-injector/pkg/
|
||||
COPY vendor/ vendor/
|
||||
# Build
|
||||
ARG secret_injector_version=dev
|
||||
RUN CGO_ENABLED=0 \
|
||||
GO111MODULE=on \
|
||||
go build \
|
||||
-ldflags "-X \"github.com/1Password/onepassword-operator/operator/version.Version=$secret_injector_version\"" \
|
||||
-mod vendor \
|
||||
-a -o injector secret-injector/main.go
|
||||
|
||||
# Use distroless as minimal base image to package the secret-injector binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/injector .
|
||||
USER nonroot:nonroot
|
||||
|
||||
ENTRYPOINT ["/injector"]
|
||||
|
98
secret-injector/Makefile
Normal file
98
secret-injector/Makefile
Normal file
@@ -0,0 +1,98 @@
|
||||
# Image URL to use all building/pushing image targets;
|
||||
# Use your own docker registry and image name for dev/test by overridding the
|
||||
# IMAGE_REPO, IMAGE_NAME and IMAGE_TAG environment variable.
|
||||
IMAGE_REPO ?= docker.io/morvencao
|
||||
IMAGE_NAME ?= op-secret-injector
|
||||
|
||||
# Github host to use for checking the source tree;
|
||||
# Override this variable ue with your own value if you're working on forked repo.
|
||||
GIT_HOST ?= github.com/morvencao
|
||||
|
||||
PWD := $(shell pwd)
|
||||
BASE_DIR := $(shell basename $(PWD))
|
||||
|
||||
# Keep an existing GOPATH, make a private one if it is undefined
|
||||
GOPATH_DEFAULT := $(PWD)/.go
|
||||
export GOPATH ?= $(GOPATH_DEFAULT)
|
||||
TESTARGS_DEFAULT := "-v"
|
||||
export TESTARGS ?= $(TESTARGS_DEFAULT)
|
||||
DEST := $(GOPATH)/src/$(GIT_HOST)/$(BASE_DIR)
|
||||
IMAGE_TAG ?= $(shell date +v%Y%m%d)-$(shell git describe --match=$(git rev-parse --short=8 HEAD) --tags --always --dirty)
|
||||
|
||||
|
||||
LOCAL_OS := $(shell uname)
|
||||
ifeq ($(LOCAL_OS),Linux)
|
||||
TARGET_OS ?= linux
|
||||
XARGS_FLAGS="-r"
|
||||
else ifeq ($(LOCAL_OS),Darwin)
|
||||
TARGET_OS ?= darwin
|
||||
XARGS_FLAGS=
|
||||
else
|
||||
$(error "This system's OS $(LOCAL_OS) isn't recognized/supported")
|
||||
endif
|
||||
|
||||
all: fmt lint test build image
|
||||
|
||||
ifeq (,$(wildcard go.mod))
|
||||
ifneq ("$(realpath $(DEST))", "$(realpath $(PWD))")
|
||||
$(error Please run 'make' from $(DEST). Current directory is $(PWD))
|
||||
endif
|
||||
endif
|
||||
|
||||
############################################################
|
||||
# format section
|
||||
############################################################
|
||||
|
||||
fmt:
|
||||
@echo "Run go fmt..."
|
||||
|
||||
############################################################
|
||||
# lint section
|
||||
############################################################
|
||||
|
||||
lint:
|
||||
@echo "Runing the golangci-lint..."
|
||||
|
||||
############################################################
|
||||
# test section
|
||||
############################################################
|
||||
|
||||
test:
|
||||
@echo "Running the tests for $(IMAGE_NAME)..."
|
||||
@go test $(TESTARGS) ./...
|
||||
|
||||
############################################################
|
||||
# build section
|
||||
############################################################
|
||||
|
||||
build:
|
||||
@echo "Building the $(IMAGE_NAME) binary..."
|
||||
@CGO_ENABLED=0 go build -o build/_output/bin/$(IMAGE_NAME) ./cmd/
|
||||
|
||||
build-linux:
|
||||
@echo "Building the $(IMAGE_NAME) binary for Docker (linux)..."
|
||||
@GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o build/_output/linux/bin/$(IMAGE_NAME) ./cmd/
|
||||
|
||||
############################################################
|
||||
# image section
|
||||
############################################################
|
||||
|
||||
image: build-image push-image
|
||||
|
||||
build-image: build-linux
|
||||
@echo "Building the docker image: $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG)..."
|
||||
@docker build -t $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG) -f build/Dockerfile .
|
||||
|
||||
push-image: build-image
|
||||
@echo "Pushing the docker image for $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG) and $(IMAGE_REPO)/$(IMAGE_NAME):latest..."
|
||||
@docker tag $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG) $(IMAGE_REPO)/$(IMAGE_NAME):latest
|
||||
@docker push $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG)
|
||||
@docker push $(IMAGE_REPO)/$(IMAGE_NAME):latest
|
||||
|
||||
############################################################
|
||||
# clean section
|
||||
############################################################
|
||||
clean:
|
||||
@rm -rf build/_output
|
||||
|
||||
.PHONY: all fmt lint check test build image clean
|
84
secret-injector/README.md
Normal file
84
secret-injector/README.md
Normal file
@@ -0,0 +1,84 @@
|
||||
## Deploy
|
||||
|
||||
1. Create namespace `op-secret-injector` in which the 1Password secret injector webhook is deployed:
|
||||
|
||||
```
|
||||
# kubectl create ns op-secret-injector
|
||||
```
|
||||
|
||||
2. Create a signed cert/key pair and store it in a Kubernetes `secret` that will be consumed by 1Password secret injector deployment:
|
||||
|
||||
```
|
||||
# ./deploy/webhook-create-signed-cert.sh \
|
||||
--service op-secret-injector-webhook-svc \
|
||||
--secret op-secret-injector-webhook-certs \
|
||||
--namespace op-secret-injector
|
||||
```
|
||||
|
||||
3. Patch the `MutatingWebhookConfiguration` by set `caBundle` with correct value from Kubernetes cluster:
|
||||
|
||||
```
|
||||
# cat deploy/mutatingwebhook.yaml | \
|
||||
deploy/webhook-patch-ca-bundle.sh > \
|
||||
deploy/mutatingwebhook-ca-bundle.yaml
|
||||
```
|
||||
|
||||
4. Deploy resources:
|
||||
|
||||
```
|
||||
# kubectl create -f deploy/deployment.yaml
|
||||
# kubectl create -f deploy/service.yaml
|
||||
# kubectl create -f deploy/mutatingwebhook-ca-bundle.yaml
|
||||
```
|
||||
|
||||
## Verify
|
||||
|
||||
1. The sidecar inject webhook should be in running state:
|
||||
|
||||
```
|
||||
# kubectl -n sidecar-injector get pod
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
sidecar-injector-webhook-deployment-7c8bc5f4c9-28c84 1/1 Running 0 30s
|
||||
# kubectl -n sidecar-injector get deploy
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
sidecar-injector-webhook-deployment 1/1 1 1 67s
|
||||
```
|
||||
|
||||
2. Create new namespace `injection` and label it with `sidecar-injector=enabled`:
|
||||
|
||||
```
|
||||
# kubectl create ns injection
|
||||
# kubectl label namespace injection sidecar-injection=enabled
|
||||
# kubectl get namespace -L sidecar-injection
|
||||
NAME STATUS AGE SIDECAR-INJECTION
|
||||
default Active 26m
|
||||
injection Active 13s enabled
|
||||
kube-public Active 26m
|
||||
kube-system Active 26m
|
||||
sidecar-injector Active 17m
|
||||
```
|
||||
|
||||
3. Deploy an app in Kubernetes cluster, take `alpine` app as an example
|
||||
|
||||
```
|
||||
# kubectl run alpine --image=alpine --restart=Never -n injection --overrides='{"apiVersion":"v1","metadata":{"annotations":{"sidecar-injector-webhook.morven.me/inject":"yes"}}}' --command -- sleep infinity
|
||||
```
|
||||
|
||||
4. Verify sidecar container is injected:
|
||||
|
||||
```
|
||||
# kubectl get pod
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
alpine 2/2 Running 0 1m
|
||||
# kubectl -n injection get pod alpine -o jsonpath="{.spec.containers[*].name}"
|
||||
alpine sidecar-nginx
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Sometimes you may find that pod is injected with sidecar container as expected, check the following items:
|
||||
|
||||
1. The sidecar-injector webhook is in running state and no error logs.
|
||||
2. The namespace in which application pod is deployed has the correct labels as configured in `mutatingwebhookconfiguration`.
|
||||
3. Check the `caBundle` is patched to `mutatingwebhookconfiguration` object by checking if `caBundle` fields is empty.
|
||||
4. Check if the application pod has annotation `sidecar-injector-webhook.morven.me/inject":"yes"`.
|
52
secret-injector/app_example.yaml
Normal file
52
secret-injector/app_example.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: app-example
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app: app-example
|
||||
ports:
|
||||
- port: 5000
|
||||
name: app-example
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: app-example
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: app-example
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
operator.1password.io/inject: "app-example"
|
||||
labels:
|
||||
app: app-example
|
||||
spec:
|
||||
containers:
|
||||
- name: app-example
|
||||
command: ["./example"]
|
||||
image: connect-app-example:latest
|
||||
imagePullPolicy: Never
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "0.2"
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
env:
|
||||
- name: OP_VAULT
|
||||
value: ApplicationConfiguration
|
||||
- name: APP_TITLE
|
||||
value: op://ApplicationConfiguration/Webapp/title
|
||||
- name: BUTTON_TEXT
|
||||
value: op://ApplicationConfiguration/Webapp/action
|
||||
- name: OP_CONNECT_HOST
|
||||
value: http://onepassword-connect:8080/
|
||||
- name: OP_CONNECT_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: onepassword-token
|
||||
key: token
|
85
secret-injector/cmd/main.go
Normal file
85
secret-injector/cmd/main.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/1Password/onepassword-operator/secret-injector/pkg/webhook"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
connectTokenSecretKeyEnv = "OP_CONNECT_TOKEN_KEY"
|
||||
connectTokenSecretNameEnv = "OP_CONNECT_TOKEN_NAME"
|
||||
connectHostEnv = "OP_CONNECT_HOST"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var parameters webhook.WebhookServerParameters
|
||||
|
||||
glog.Info("Starting webhook")
|
||||
// get command line parameters
|
||||
flag.IntVar(¶meters.Port, "port", 8443, "Webhook server port.")
|
||||
flag.StringVar(¶meters.CertFile, "tlsCertFile", "/etc/webhook/certs/cert.pem", "File containing the x509 Certificate for HTTPS.")
|
||||
flag.StringVar(¶meters.KeyFile, "tlsKeyFile", "/etc/webhook/certs/key.pem", "File containing the x509 private key to --tlsCertFile.")
|
||||
flag.Parse()
|
||||
|
||||
pair, err := tls.LoadX509KeyPair(parameters.CertFile, parameters.KeyFile)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to load key pair: %v", err)
|
||||
}
|
||||
|
||||
connectHost, present := os.LookupEnv(connectHostEnv)
|
||||
if !present {
|
||||
glog.Error("")
|
||||
}
|
||||
|
||||
connectTokenName, present := os.LookupEnv(connectTokenSecretNameEnv)
|
||||
if !present {
|
||||
glog.Error("")
|
||||
}
|
||||
|
||||
connectTokenKey, present := os.LookupEnv(connectTokenSecretKeyEnv)
|
||||
if !present {
|
||||
glog.Error("")
|
||||
}
|
||||
|
||||
webhookConfig := webhook.Config{
|
||||
ConnectHost: connectHost,
|
||||
ConnectTokenName: connectTokenName,
|
||||
ConnectTokenKey: connectTokenKey,
|
||||
}
|
||||
webhookServer := &webhook.WebhookServer{
|
||||
Config: webhookConfig,
|
||||
Server: &http.Server{
|
||||
Addr: fmt.Sprintf(":%v", parameters.Port),
|
||||
TLSConfig: &tls.Config{Certificates: []tls.Certificate{pair}},
|
||||
},
|
||||
}
|
||||
|
||||
// define http server and server handler
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/inject", webhookServer.Serve)
|
||||
webhookServer.Server.Handler = mux
|
||||
|
||||
// start webhook server in new rountine
|
||||
go func() {
|
||||
if err := webhookServer.Server.ListenAndServeTLS("", ""); err != nil {
|
||||
glog.Errorf("Failed to listen and serve webhook server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// listening OS shutdown singal
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-signalChan
|
||||
|
||||
glog.Infof("Got OS shutdown signal, shutting down webhook server gracefully...")
|
||||
webhookServer.Server.Shutdown(context.Background())
|
||||
}
|
42
secret-injector/deploy/deployment.yaml
Normal file
42
secret-injector/deploy/deployment.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: op-secret-injector-webhook-deployment
|
||||
namespace: op-secret-injector
|
||||
labels:
|
||||
app: op-secret-injector
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: op-secret-injector
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: op-secret-injector
|
||||
spec:
|
||||
containers:
|
||||
- name: op-secret-injector
|
||||
image: local/onepassword-secrets-injector:v1.1.0
|
||||
imagePullPolicy: Never
|
||||
args:
|
||||
- -tlsCertFile=/etc/webhook/certs/cert.pem
|
||||
- -tlsKeyFile=/etc/webhook/certs/key.pem
|
||||
- -alsologtostderr
|
||||
- -v=4
|
||||
- 2>&1
|
||||
env:
|
||||
- name: OP_CONNECT_HOST
|
||||
value: http://onepassword-connect:8080/
|
||||
- name: OP_CONNECT_TOKEN_NAME
|
||||
value: onepassword-token
|
||||
- name: OP_CONNECT_TOKEN_KEY
|
||||
value: token
|
||||
volumeMounts:
|
||||
- name: webhook-certs
|
||||
mountPath: /etc/webhook/certs
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: webhook-certs
|
||||
secret:
|
||||
secretName: op-secret-injector-webhook-certs
|
22
secret-injector/deploy/mutatingwebhook-ca-bundle.yaml
Normal file
22
secret-injector/deploy/mutatingwebhook-ca-bundle.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: op-secret-injector-webhook-cfg
|
||||
labels:
|
||||
app: op-secret-injector
|
||||
webhooks:
|
||||
- name: op-secret-injector.morven.me
|
||||
clientConfig:
|
||||
service:
|
||||
name: op-secret-injector-webhook-svc
|
||||
namespace: op-secret-injector
|
||||
path: "/inject"
|
||||
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJd01Ea3lOekl3TkRjMU9Wb1hEVE13TURreU5qSXdORGMxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTmlICjZzZVJsZG9CWlRpRVJMeVhwbXFCU3ZOcmJyMWFMcVhpZVVWcXdCcytOUUora1hsazBIRWFldnJRU2QvNnVqY2UKSHpuNFR6Smh3Qk9pYU5BSDN6QUZkeXZxRGwwZVFzNm50R2pDbVFFK0xrUU5PQlVXYmk3WEc2am1tdDA5aFFUVwpTOXg2UDdpai9lUUtLRUJFQTFlRWYvTFZibDZPMVBqa0lXV2E0SjFRMEZoQUtnSjdxUmVJaEg1dkRoVHF3TXVzClZLTEF2bU9xRk03aDNmZ1UzWVltZldpMUFoVnF0VklMYmhkOS8xbzFYM2ZESitFK0dESGMyb0NKK1QvQkxJTmsKOWhTWEhWOTdONFhib1BUWktzZXJFa3JQTnlFYkY4alpvWndBc3FuRVhBNW5sem5vTlJnTFNqSEE0NFZXOGZyawo1RWtJdFNPdkFMMHM1K0FDMnFzQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTSG5DcFRMSDRQeDRkai9oWTVNWEx6TndNeWhqQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFvamR6ZzlySgpZTjlJSTJjaUNRS0djZEZGbWtHcGxiandRczBVMVdKY0plZWs3WDh3WWdPMnI3UFhLRklDTEM3aGM5bkUxYnluCkxha2YwMzhUNzRBQzlQOHZXUUJSb2lFMlBKV1BGMjhGTFJWeWgwTWdYQ3dZU20zeitIRDR5TjViWFpNSmJ4WlYKamRza0IzeVpHSW9jZ2RBSk1rU0ptQTN6RkowaHpsY09EZTNOTVA0Ujl4Z0VWczU4bHV5bjl6bm5sL2FDODlHdwpuVnVPRkk0S0dwOFF5NXFjQUxKZndiRGNrNzBjbnRQUEhBN2trT1JtUG41Z2hNSFJPZGxsamxmdXYxVE5RcVo2CjhjUENRRW1zc1ZyajFrVEh6Y3FOUXpqOWVMK2VPMGtyRWw2dzZMcm5YY0dleUxIZVc1cHF6YUY2bWZrTitEZEQKSENjV0U2V1pvTUp2UFE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: [""]
|
||||
apiVersions: ["v1"]
|
||||
resources: ["pods"]
|
||||
namespaceSelector:
|
||||
matchLabels:
|
||||
op-secret-injection: enabled
|
22
secret-injector/deploy/mutatingwebhook.yaml
Normal file
22
secret-injector/deploy/mutatingwebhook.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: op-secret-injector-webhook-cfg
|
||||
labels:
|
||||
app: op-secret-injector
|
||||
webhooks:
|
||||
- name: op-secret-injector.morven.me
|
||||
clientConfig:
|
||||
service:
|
||||
name: op-secret-injector-webhook-svc
|
||||
namespace: op-secret-injector
|
||||
path: "/inject"
|
||||
caBundle: ${CA_BUNDLE}
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: [""]
|
||||
apiVersions: ["v1"]
|
||||
resources: ["pods"]
|
||||
namespaceSelector:
|
||||
matchLabels:
|
||||
op-secret-injection: enabled
|
13
secret-injector/deploy/service.yaml
Normal file
13
secret-injector/deploy/service.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: op-secret-injector-webhook-svc
|
||||
namespace: op-secret-injector
|
||||
labels:
|
||||
app: op-secret-injector
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
app: op-secret-injector
|
131
secret-injector/deploy/webhook-create-signed-cert.sh
Executable file
131
secret-injector/deploy/webhook-create-signed-cert.sh
Executable file
@@ -0,0 +1,131 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Generate certificate suitable for use with an op-secret-injector webhook service.
|
||||
|
||||
This script uses k8s' CertificateSigningRequest API to a generate a
|
||||
certificate signed by k8s CA suitable for use with op-secret-injector webhook
|
||||
services. This requires permissions to create and approve CSR. See
|
||||
https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster for
|
||||
detailed explanation and additional instructions.
|
||||
|
||||
The server key/cert k8s CA cert are stored in a k8s secret.
|
||||
|
||||
usage: ${0} [OPTIONS]
|
||||
|
||||
The following flags are required.
|
||||
|
||||
--service Service name of webhook.
|
||||
--namespace Namespace where webhook service and secret reside.
|
||||
--secret Secret name for CA certificate and server certificate/key pair.
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case ${1} in
|
||||
--service)
|
||||
service="$2"
|
||||
shift
|
||||
;;
|
||||
--secret)
|
||||
secret="$2"
|
||||
shift
|
||||
;;
|
||||
--namespace)
|
||||
namespace="$2"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
[ -z "${service}" ] && service=op-secret-injector-webhook-svc
|
||||
[ -z "${secret}" ] && secret=op-secret-injector-webhook-certs
|
||||
[ -z "${namespace}" ] && namespace=default
|
||||
|
||||
if [ ! -x "$(command -v openssl)" ]; then
|
||||
echo "openssl not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
csrName=${service}.${namespace}
|
||||
tmpdir=$(mktemp -d)
|
||||
echo "creating certs in tmpdir ${tmpdir} "
|
||||
|
||||
cat <<EOF >> "${tmpdir}"/csr.conf
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = serverAuth
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = ${service}
|
||||
DNS.2 = ${service}.${namespace}
|
||||
DNS.3 = ${service}.${namespace}.svc
|
||||
EOF
|
||||
|
||||
openssl genrsa -out "${tmpdir}"/server-key.pem 2048
|
||||
openssl req -new -key "${tmpdir}"/server-key.pem -subj "/CN=${service}.${namespace}.svc" -out "${tmpdir}"/server.csr -config "${tmpdir}"/csr.conf
|
||||
|
||||
# clean-up any previously created CSR for our service. Ignore errors if not present.
|
||||
kubectl delete csr ${csrName} 2>/dev/null || true
|
||||
|
||||
# create server cert/key CSR and send to k8s API
|
||||
cat <<EOF | kubectl create -f -
|
||||
apiVersion: certificates.k8s.io/v1beta1
|
||||
kind: CertificateSigningRequest
|
||||
metadata:
|
||||
name: ${csrName}
|
||||
spec:
|
||||
groups:
|
||||
- system:authenticated
|
||||
request: $(< "${tmpdir}"/server.csr base64 | tr -d '\n')
|
||||
usages:
|
||||
- digital signature
|
||||
- key encipherment
|
||||
- server auth
|
||||
EOF
|
||||
|
||||
# verify CSR has been created
|
||||
while true; do
|
||||
if kubectl get csr ${csrName}; then
|
||||
break
|
||||
else
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
|
||||
# approve and fetch the signed certificate
|
||||
kubectl certificate approve ${csrName}
|
||||
# verify certificate has been signed
|
||||
for _ in $(seq 10); do
|
||||
serverCert=$(kubectl get csr ${csrName} -o jsonpath='{.status.certificate}')
|
||||
if [[ ${serverCert} != '' ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
if [[ ${serverCert} == '' ]]; then
|
||||
echo "ERROR: After approving csr ${csrName}, the signed certificate did not appear on the resource. Giving up after 10 attempts." >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "${serverCert}" | openssl base64 -d -A -out "${tmpdir}"/server-cert.pem
|
||||
|
||||
|
||||
# create the secret with CA cert and server cert/key
|
||||
kubectl create secret generic ${secret} \
|
||||
--from-file=key.pem="${tmpdir}"/server-key.pem \
|
||||
--from-file=cert.pem="${tmpdir}"/server-cert.pem \
|
||||
--dry-run -o yaml |
|
||||
kubectl -n ${namespace} apply -f -
|
19
secret-injector/deploy/webhook-patch-ca-bundle.sh
Executable file
19
secret-injector/deploy/webhook-patch-ca-bundle.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
CA_BUNDLE=$(kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}')
|
||||
|
||||
if [ -z "${CA_BUNDLE}" ]; then
|
||||
CA_BUNDLE=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.ca\.crt}")
|
||||
fi
|
||||
|
||||
export CA_BUNDLE
|
||||
|
||||
if command -v envsubst >/dev/null 2>&1; then
|
||||
envsubst
|
||||
else
|
||||
sed -e "s|\${CA_BUNDLE}|${CA_BUNDLE}|g"
|
||||
fi
|
214
secret-injector/golangci.yml
Normal file
214
secret-injector/golangci.yml
Normal file
@@ -0,0 +1,214 @@
|
||||
service:
|
||||
# When updating this, also update the version stored in docker/build-tools/Dockerfile in the multicloudlab/tools repo.
|
||||
golangci-lint-version: 1.18.x # use the fixed version to not introduce new linters unexpectedly
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
deadline: 20m
|
||||
|
||||
# which dirs to skip: they won't be analyzed;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but next dirs are always skipped independently
|
||||
# from this option's value:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs:
|
||||
- genfiles$
|
||||
- vendor$
|
||||
|
||||
# which files to skip: they will be analyzed, but issues from them
|
||||
# won't be reported. Default value is empty list, but there is
|
||||
# no need to include all autogenerated files, we confidently recognize
|
||||
# autogenerated files. If it's not please let us know.
|
||||
skip-files:
|
||||
- ".*\\.pb\\.go"
|
||||
- ".*\\.gen\\.go"
|
||||
|
||||
linters:
|
||||
# please, do not use `enable-all`: it's deprecated and will be removed soon.
|
||||
# inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- lll
|
||||
- misspell
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- varcheck
|
||||
# don't enable:
|
||||
# - gocritic
|
||||
# - bodyclose
|
||||
# - depguard
|
||||
# - dogsled
|
||||
# - dupl
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
# - godox
|
||||
# - maligned
|
||||
# - nakedret
|
||||
# - prealloc
|
||||
# - scopelint
|
||||
# - whitespace
|
||||
# - stylecheck
|
||||
|
||||
linters-settings:
|
||||
errcheck:
|
||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: false
|
||||
|
||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-blank: false
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: false
|
||||
golint:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0.0
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# it's a comma-separated list of prefixes
|
||||
local-prefixes: github.com/IBM/
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: US
|
||||
ignore-words:
|
||||
- cancelled
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||
line-length: 300
|
||||
# tab width in spaces. Default to 1.
|
||||
tab-width: 1
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused exported identifiers; default is false.
|
||||
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
unparam:
|
||||
# call graph construction algorithm (cha, rta). In general, use cha for libraries,
|
||||
# and rta for programs with main packages. Default is cha.
|
||||
algo: cha
|
||||
|
||||
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
||||
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
gocritic:
|
||||
enabled-checks:
|
||||
- appendCombine
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCond
|
||||
- boolExprSimplify
|
||||
- builtinShadow
|
||||
- captLocal
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
- commentedOutCode
|
||||
- commentedOutImport
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- docStub
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
- emptyFallthrough
|
||||
- equalFold
|
||||
- flagDeref
|
||||
- flagName
|
||||
- hexLiteral
|
||||
- indexAlloc
|
||||
- initClause
|
||||
- methodExprCall
|
||||
- nilValReturn
|
||||
- octalLiteral
|
||||
- offBy1
|
||||
- rangeExprCopy
|
||||
- regexpMust
|
||||
- sloppyLen
|
||||
- stringXbytes
|
||||
- switchTrue
|
||||
- typeAssertChain
|
||||
- typeSwitchVar
|
||||
- typeUnparen
|
||||
- underef
|
||||
- unlambda
|
||||
- unnecessaryBlock
|
||||
- unslice
|
||||
- valSwap
|
||||
- weakCond
|
||||
|
||||
# Unused
|
||||
# - yodaStyleExpr
|
||||
# - appendAssign
|
||||
# - commentFormatting
|
||||
# - emptyStringTest
|
||||
# - exitAfterDefer
|
||||
# - ifElseChain
|
||||
# - hugeParam
|
||||
# - importShadow
|
||||
# - nestingReduce
|
||||
# - paramTypeCombine
|
||||
# - ptrToRefParam
|
||||
# - rangeValCopy
|
||||
# - singleCaseSwitch
|
||||
# - sloppyReassign
|
||||
# - unlabelStmt
|
||||
# - unnamedResult
|
||||
# - wrapperFunc
|
||||
|
||||
issues:
|
||||
# List of regexps of issue texts to exclude, empty list by default.
|
||||
# But independently from this option we use default exclude patterns,
|
||||
# it can be disabled by `exclude-use-default: false`. To list all
|
||||
# excluded by default patterns execute `golangci-lint run --help`
|
||||
exclude:
|
||||
- composite literal uses unkeyed fields
|
||||
|
||||
exclude-rules:
|
||||
# Exclude some linters from running on test files.
|
||||
- path: _test\.go$|^tests/|^samples/
|
||||
linters:
|
||||
- errcheck
|
||||
- maligned
|
||||
|
||||
# Independently from option `exclude` we use default exclude patterns,
|
||||
# it can be disabled by this option. To list all
|
||||
# excluded by default patterns execute `golangci-lint run --help`.
|
||||
# Default value for this option is true.
|
||||
exclude-use-default: true
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
469
secret-injector/pkg/webhook/webhook.go
Normal file
469
secret-injector/pkg/webhook/webhook.go
Normal file
@@ -0,0 +1,469 @@
|
||||
package webhook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
)
|
||||
|
||||
const (
|
||||
// binVolumeName is the name of the volume where the OP CLI binary is stored.
|
||||
binVolumeName = "op-bin"
|
||||
|
||||
// binVolumeMountPath is the mount path where the OP CLI binary can be found.
|
||||
binVolumeMountPath = "/op/bin/"
|
||||
)
|
||||
|
||||
// binVolume is the shared, in-memory volume where the OP CLI binary lives.
|
||||
var binVolume = corev1.Volume{
|
||||
Name: binVolumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{
|
||||
Medium: corev1.StorageMediumMemory,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// binVolumeMount is the shared volume mount where the OP CLI binary lives.
|
||||
var binVolumeMount = corev1.VolumeMount{
|
||||
Name: binVolumeName,
|
||||
MountPath: binVolumeMountPath,
|
||||
ReadOnly: true,
|
||||
}
|
||||
|
||||
var (
|
||||
runtimeScheme = runtime.NewScheme()
|
||||
codecs = serializer.NewCodecFactory(runtimeScheme)
|
||||
deserializer = codecs.UniversalDeserializer()
|
||||
|
||||
// (https://github.com/kubernetes/kubernetes/issues/57982)
|
||||
defaulter = runtime.ObjectDefaulter(runtimeScheme)
|
||||
)
|
||||
|
||||
var ignoredNamespaces = []string{
|
||||
metav1.NamespaceSystem,
|
||||
metav1.NamespacePublic,
|
||||
}
|
||||
|
||||
const (
|
||||
injectionStatus = "operator.1password.io/status"
|
||||
injectAnnotation = "operator.1password.io/inject"
|
||||
versionAnnotation = "operator.1password.io/version"
|
||||
)
|
||||
|
||||
type WebhookServer struct {
|
||||
Config Config
|
||||
Server *http.Server
|
||||
}
|
||||
|
||||
// Webhook Server parameters
|
||||
type WebhookServerParameters struct {
|
||||
Port int // webhook server port
|
||||
CertFile string // path to the x509 certificate for https
|
||||
KeyFile string // path to the x509 private key matching `CertFile`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
ConnectHost string
|
||||
ConnectTokenName string
|
||||
ConnectTokenKey string
|
||||
}
|
||||
|
||||
type patchOperation struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
Value interface{} `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
_ = corev1.AddToScheme(runtimeScheme)
|
||||
_ = admissionregistrationv1beta1.AddToScheme(runtimeScheme)
|
||||
_ = v1.AddToScheme(runtimeScheme)
|
||||
}
|
||||
|
||||
func applyDefaultsWorkaround(containers []corev1.Container, volumes []corev1.Volume) {
|
||||
defaulter.Default(&corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: containers,
|
||||
Volumes: volumes,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Check whether the target resoured need to be mutated
|
||||
func mutationRequired(ignoredList []string, metadata *metav1.ObjectMeta) bool {
|
||||
// skip special kubernete system namespaces
|
||||
for _, namespace := range ignoredList {
|
||||
if metadata.Namespace == namespace {
|
||||
glog.Infof("Skip mutation for %v for it's in special namespace:%v", metadata.Name, metadata.Namespace)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
annotations := metadata.GetAnnotations()
|
||||
if annotations == nil {
|
||||
annotations = map[string]string{}
|
||||
}
|
||||
|
||||
status := annotations[injectionStatus]
|
||||
_, enabled := annotations[injectAnnotation]
|
||||
|
||||
// determine whether to perform mutation based on annotation for the target resource
|
||||
required := false
|
||||
if strings.ToLower(status) != "injected" && enabled {
|
||||
required = true
|
||||
}
|
||||
|
||||
glog.Infof("Mutation policy for %v/%v: status: %q required:%v", metadata.Namespace, metadata.Name, status, required)
|
||||
return required
|
||||
}
|
||||
|
||||
func addContainers(target, added []corev1.Container, basePath string) (patch []patchOperation) {
|
||||
first := len(target) == 0
|
||||
var value interface{}
|
||||
for _, add := range added {
|
||||
value = add
|
||||
path := basePath
|
||||
if first {
|
||||
first = false
|
||||
value = []corev1.Container{add}
|
||||
} else {
|
||||
path = path + "/-"
|
||||
}
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "add",
|
||||
Path: path,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
return patch
|
||||
}
|
||||
|
||||
func addVolume(target, added []corev1.Volume, basePath string) (patch []patchOperation) {
|
||||
first := len(target) == 0
|
||||
var value interface{}
|
||||
for _, add := range added {
|
||||
value = add
|
||||
path := basePath
|
||||
if first {
|
||||
first = false
|
||||
value = []corev1.Volume{add}
|
||||
} else {
|
||||
path = path + "/-"
|
||||
}
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "add",
|
||||
Path: path,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
return patch
|
||||
}
|
||||
|
||||
func updateAnnotation(target map[string]string, added map[string]string) (patch []patchOperation) {
|
||||
for key, value := range added {
|
||||
if target == nil || target[key] == "" {
|
||||
target = map[string]string{}
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "add",
|
||||
Path: "/metadata/annotations",
|
||||
Value: map[string]string{
|
||||
key: value,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "replace",
|
||||
Path: "/metadata/annotations/" + key,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
}
|
||||
return patch
|
||||
}
|
||||
|
||||
// main mutation process
|
||||
func (whsvr *WebhookServer) mutate(ar *v1beta1.AdmissionReview) *v1beta1.AdmissionResponse {
|
||||
ctx := context.Background()
|
||||
req := ar.Request
|
||||
var pod corev1.Pod
|
||||
if err := json.Unmarshal(req.Object.Raw, &pod); err != nil {
|
||||
glog.Errorf("Could not unmarshal raw object: %v", err)
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Result: &metav1.Status{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
glog.Infof("AdmissionReview for Kind=%v, Namespace=%v Name=%v (%v) UID=%v patchOperation=%v UserInfo=%v",
|
||||
req.Kind, req.Namespace, req.Name, pod.Name, req.UID, req.Operation, req.UserInfo)
|
||||
|
||||
// determine whether to perform mutation
|
||||
if !mutationRequired(ignoredNamespaces, &pod.ObjectMeta) {
|
||||
glog.Infof("Skipping mutation for %s/%s due to policy check", pod.Namespace, pod.Name)
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
}
|
||||
}
|
||||
|
||||
containersStr := pod.Annotations[injectAnnotation]
|
||||
|
||||
containers := map[string]struct{}{}
|
||||
|
||||
for _, container := range strings.Split(containersStr, ",") {
|
||||
containers[container] = struct{}{}
|
||||
}
|
||||
|
||||
version, ok := pod.Annotations[versionAnnotation]
|
||||
if !ok {
|
||||
version = "latest"
|
||||
}
|
||||
|
||||
mutated := false
|
||||
|
||||
var patch []patchOperation
|
||||
for i, c := range pod.Spec.InitContainers {
|
||||
_, mutate := containers[c.Name]
|
||||
if !mutate {
|
||||
continue
|
||||
}
|
||||
c, didMutate, initContainerPatch, err := whsvr.mutateContainer(ctx, &c, i)
|
||||
if err != nil {
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Result: &metav1.Status{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
if didMutate {
|
||||
mutated = true
|
||||
pod.Spec.InitContainers[i] = *c
|
||||
}
|
||||
patch = append(patch, initContainerPatch...)
|
||||
}
|
||||
|
||||
for i, c := range pod.Spec.Containers {
|
||||
_, mutate := containers[c.Name]
|
||||
if !mutate {
|
||||
continue
|
||||
}
|
||||
|
||||
c, didMutate, containerPatch, err := whsvr.mutateContainer(ctx, &c, i)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occured mutating container: ", err)
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Result: &metav1.Status{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
patch = append(patch, containerPatch...)
|
||||
if didMutate {
|
||||
mutated = true
|
||||
pod.Spec.Containers[i] = *c
|
||||
}
|
||||
}
|
||||
|
||||
// binInitContainer is the container that pulls the OP CLI
|
||||
// into a shared volume mount.
|
||||
var binInitContainer = corev1.Container{
|
||||
Name: "copy-op-bin",
|
||||
Image: "op-example" + ":" + version,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
Command: []string{"sh", "-c",
|
||||
fmt.Sprintf("cp /usr/local/bin/op %s", binVolumeMountPath)},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: binVolumeName,
|
||||
MountPath: binVolumeMountPath,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if !mutated {
|
||||
glog.Infof("No mutations made for %s/%s", pod.Namespace, pod.Name)
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
}
|
||||
}
|
||||
annotations := map[string]string{injectionStatus: "injected"}
|
||||
patchBytes, err := createOPCLIPatch(&pod, annotations, []corev1.Container{binInitContainer}, patch)
|
||||
if err != nil {
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Result: &metav1.Status{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
glog.Infof("AdmissionResponse: patch=%v\n", string(patchBytes))
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Patch: patchBytes,
|
||||
PatchType: func() *v1beta1.PatchType {
|
||||
pt := v1beta1.PatchTypeJSONPatch
|
||||
return &pt
|
||||
}(),
|
||||
}
|
||||
}
|
||||
|
||||
// create mutation patch for resoures
|
||||
func createOPCLIPatch(pod *corev1.Pod, annotations map[string]string, containers []corev1.Container, patch []patchOperation) ([]byte, error) {
|
||||
|
||||
patch = append(patch, addVolume(pod.Spec.Volumes, []corev1.Volume{binVolume}, "/spec/volumes")...)
|
||||
patch = append(patch, addContainers(pod.Spec.InitContainers, containers, "/spec/initContainers")...)
|
||||
patch = append(patch, updateAnnotation(pod.Annotations, annotations)...)
|
||||
|
||||
return json.Marshal(patch)
|
||||
}
|
||||
|
||||
func createOPConnectPatch(container *corev1.Container, containerIndex int, host, tokenSecretName, tokenSecretKey string) []patchOperation {
|
||||
var patch []patchOperation
|
||||
connectHostEnvVar := corev1.EnvVar{
|
||||
Name: "OP_CONNECT_HOST",
|
||||
Value: host,
|
||||
}
|
||||
|
||||
connectTokenEnvVar := corev1.EnvVar{
|
||||
Name: "OP_CONNECT_TOKEN",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
Key: tokenSecretKey,
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: tokenSecretName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
envs := []corev1.EnvVar{
|
||||
connectHostEnvVar,
|
||||
connectTokenEnvVar,
|
||||
}
|
||||
|
||||
patch = append(patch, setEnvironment(*container, containerIndex, envs, "/spec/containers")...)
|
||||
|
||||
return patch
|
||||
}
|
||||
|
||||
func (whsvr *WebhookServer) mutateContainer(_ context.Context, container *corev1.Container, containerIndex int) (*corev1.Container, bool, []patchOperation, error) {
|
||||
// Because we are running a command in the pod before starting the container app,
|
||||
// we need to prepend the pod comand with the op run command
|
||||
if len(container.Command) == 0 {
|
||||
return container, false, nil, fmt.Errorf("not attaching OP to the container %s: the podspec does not define a command", container.Name)
|
||||
}
|
||||
|
||||
// Prepend the command with op run --
|
||||
container.Command = append([]string{binVolumeMountPath + "op", "run", "--"}, container.Command...)
|
||||
|
||||
var patch []patchOperation
|
||||
|
||||
// adding the cli to the container using a volume mount
|
||||
path := fmt.Sprintf("%s/%d/volumeMounts", "/spec/containers", containerIndex)
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "add",
|
||||
Path: path,
|
||||
Value: []corev1.VolumeMount{binVolumeMount},
|
||||
})
|
||||
|
||||
// replacing the container command with a command prepended with op run
|
||||
path = fmt.Sprintf("%s/%d/command", "/spec/containers", containerIndex)
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "replace",
|
||||
Path: path,
|
||||
Value: container.Command,
|
||||
})
|
||||
|
||||
//creating patch for adding conenct environment variables to container
|
||||
patch = append(patch, createOPConnectPatch(container, containerIndex, whsvr.Config.ConnectHost, whsvr.Config.ConnectTokenName, whsvr.Config.ConnectTokenKey)...)
|
||||
return container, true, patch, nil
|
||||
}
|
||||
|
||||
func setEnvironment(container corev1.Container, containerIndex int, addedEnv []corev1.EnvVar, basePath string) (patch []patchOperation) {
|
||||
first := len(container.Env) == 0
|
||||
var value interface{}
|
||||
for _, add := range addedEnv {
|
||||
path := fmt.Sprintf("%s/%d/env", basePath, containerIndex)
|
||||
value = add
|
||||
if first {
|
||||
first = false
|
||||
value = []corev1.EnvVar{add}
|
||||
} else {
|
||||
path = path + "/-"
|
||||
}
|
||||
patch = append(patch, patchOperation{
|
||||
Op: "add",
|
||||
Path: path,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
return patch
|
||||
}
|
||||
|
||||
// Serve method for webhook server
|
||||
func (whsvr *WebhookServer) Serve(w http.ResponseWriter, r *http.Request) {
|
||||
var body []byte
|
||||
if r.Body != nil {
|
||||
if data, err := ioutil.ReadAll(r.Body); err == nil {
|
||||
body = data
|
||||
}
|
||||
}
|
||||
if len(body) == 0 {
|
||||
glog.Error("empty body")
|
||||
http.Error(w, "empty body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// verify the content type is accurate
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType != "application/json" {
|
||||
glog.Errorf("Content-Type=%s, expect application/json", contentType)
|
||||
http.Error(w, "invalid Content-Type, expect `application/json`", http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
var admissionResponse *v1beta1.AdmissionResponse
|
||||
ar := v1beta1.AdmissionReview{}
|
||||
if _, _, err := deserializer.Decode(body, nil, &ar); err != nil {
|
||||
glog.Errorf("Can't decode body: %v", err)
|
||||
admissionResponse = &v1beta1.AdmissionResponse{
|
||||
Result: &metav1.Status{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
admissionResponse = whsvr.mutate(&ar)
|
||||
}
|
||||
|
||||
admissionReview := v1beta1.AdmissionReview{}
|
||||
if admissionResponse != nil {
|
||||
admissionReview.Response = admissionResponse
|
||||
if ar.Request != nil {
|
||||
admissionReview.Response.UID = ar.Request.UID
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := json.Marshal(admissionReview)
|
||||
if err != nil {
|
||||
glog.Errorf("Can't encode response: %v", err)
|
||||
http.Error(w, fmt.Sprintf("could not encode response: %v", err), http.StatusInternalServerError)
|
||||
}
|
||||
glog.Infof("Ready to write reponse ...")
|
||||
if _, err := w.Write(resp); err != nil {
|
||||
glog.Errorf("Can't write response: %v", err)
|
||||
http.Error(w, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
9
secret-injector/test/Dockerfile
Normal file
9
secret-injector/test/Dockerfile
Normal file
@@ -0,0 +1,9 @@
|
||||
FROM ubuntu:latest
|
||||
ARG VERSION
|
||||
|
||||
RUN apt-get update && apt-get install -y curl unzip jq && \
|
||||
curl -o 1password.zip https://bucket.agilebits.com/cli-private-beta/v2/op_linux_amd64_v2-alpha2.zip && \
|
||||
unzip 1password.zip -d /usr/local/bin && \
|
||||
rm 1password.zip
|
||||
|
||||
CMD ["op"]
|
202
vendor/github.com/docker/distribution/LICENSE
generated
vendored
Normal file
202
vendor/github.com/docker/distribution/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
Normal file
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
Normal file
@@ -0,0 +1,247 @@
|
||||
package digestset
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDigestNotFound is used when a matching digest
|
||||
// could not be found in a set.
|
||||
ErrDigestNotFound = errors.New("digest not found")
|
||||
|
||||
// ErrDigestAmbiguous is used when multiple digests
|
||||
// are found in a set. None of the matching digests
|
||||
// should be considered valid matches.
|
||||
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||
)
|
||||
|
||||
// Set is used to hold a unique set of digests which
|
||||
// may be easily referenced by easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are omitted from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
// appropriately long short code should be used.
|
||||
type Set struct {
|
||||
mutex sync.RWMutex
|
||||
entries digestEntries
|
||||
}
|
||||
|
||||
// NewSet creates an empty set of digests
|
||||
// which may have digests added.
|
||||
func NewSet() *Set {
|
||||
return &Set{
|
||||
entries: digestEntries{},
|
||||
}
|
||||
}
|
||||
|
||||
// checkShortMatch checks whether two digests match as either whole
|
||||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
if len(hex) == len(shortHex) {
|
||||
if hex != shortHex {
|
||||
return false
|
||||
}
|
||||
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
} else if !strings.HasPrefix(hex, shortHex) {
|
||||
return false
|
||||
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Lookup looks for a digest matching the given string representation.
|
||||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (digest.Digest, error) {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
if len(dst.entries) == 0 {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
var (
|
||||
searchFunc func(int) bool
|
||||
alg digest.Algorithm
|
||||
hex string
|
||||
)
|
||||
dgst, err := digest.Parse(d)
|
||||
if err == digest.ErrDigestInvalidFormat {
|
||||
hex = d
|
||||
searchFunc = func(i int) bool {
|
||||
return dst.entries[i].val >= d
|
||||
}
|
||||
} else {
|
||||
hex = dgst.Hex()
|
||||
alg = dgst.Algorithm()
|
||||
searchFunc = func(i int) bool {
|
||||
if dst.entries[i].val == hex {
|
||||
return dst.entries[i].alg >= alg
|
||||
}
|
||||
return dst.entries[i].val >= hex
|
||||
}
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||
return "", ErrDigestAmbiguous
|
||||
}
|
||||
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
|
||||
// Add adds the given digest to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// set, this operation will be a no-op.
|
||||
func (dst *Set) Add(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) {
|
||||
dst.entries = append(dst.entries, entry)
|
||||
return nil
|
||||
} else if dst.entries[idx].digest == d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := append(dst.entries, nil)
|
||||
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||
entries[idx] = entry
|
||||
dst.entries = entries
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes the given digest from the set. An err will be
|
||||
// returned if the given digest is invalid. If the digest does
|
||||
// not exist in the set, this operation will be a no-op.
|
||||
func (dst *Set) Remove(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
// Not found if idx is after or value at idx is not digest
|
||||
if idx == len(dst.entries) || dst.entries[idx].digest != d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := dst.entries
|
||||
copy(entries[idx:], entries[idx+1:])
|
||||
entries = entries[:len(entries)-1]
|
||||
dst.entries = entries
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// All returns all the digests in the set
|
||||
func (dst *Set) All() []digest.Digest {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
retValues := make([]digest.Digest, len(dst.entries))
|
||||
for i := range dst.entries {
|
||||
retValues[i] = dst.entries[i].digest
|
||||
}
|
||||
|
||||
return retValues
|
||||
}
|
||||
|
||||
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||
// length represents the minimum value, the maximum length may be the
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
m := make(map[digest.Digest]string, len(dst.entries))
|
||||
l := length
|
||||
resetIdx := 0
|
||||
for i := 0; i < len(dst.entries); i++ {
|
||||
var short string
|
||||
extended := true
|
||||
for extended {
|
||||
extended = false
|
||||
if len(dst.entries[i].val) <= l {
|
||||
short = dst.entries[i].digest.String()
|
||||
} else {
|
||||
short = dst.entries[i].val[:l]
|
||||
for j := i + 1; j < len(dst.entries); j++ {
|
||||
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||
if j > resetIdx {
|
||||
resetIdx = j
|
||||
}
|
||||
extended = true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if extended {
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
m[dst.entries[i].digest] = short
|
||||
if i >= resetIdx {
|
||||
l = length
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type digestEntry struct {
|
||||
alg digest.Algorithm
|
||||
val string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
type digestEntries []*digestEntry
|
||||
|
||||
func (d digestEntries) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d digestEntries) Less(i, j int) bool {
|
||||
if d[i].val != d[j].val {
|
||||
return d[i].val < d[j].val
|
||||
}
|
||||
return d[i].alg < d[j].alg
|
||||
}
|
||||
|
||||
func (d digestEntries) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
Normal file
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package reference
|
||||
|
||||
import "path"
|
||||
|
||||
// IsNameOnly returns true if reference only contains a repo name.
|
||||
func IsNameOnly(ref Named) bool {
|
||||
if _, ok := ref.(NamedTagged); ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := ref.(Canonical); ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FamiliarName returns the familiar name string
|
||||
// for the given named, familiarizing if needed.
|
||||
func FamiliarName(ref Named) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().Name()
|
||||
}
|
||||
return ref.Name()
|
||||
}
|
||||
|
||||
// FamiliarString returns the familiar string representation
|
||||
// for the given reference, familiarizing if needed.
|
||||
func FamiliarString(ref Reference) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().String()
|
||||
}
|
||||
return ref.String()
|
||||
}
|
||||
|
||||
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||
// See https://godoc.org/path#Match for supported patterns.
|
||||
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
|
||||
matched, err := path.Match(pattern, FamiliarString(ref))
|
||||
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||
matched, _ = path.Match(pattern, FamiliarName(namedRef))
|
||||
}
|
||||
return matched, err
|
||||
}
|
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
Normal file
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digestset"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
legacyDefaultDomain = "index.docker.io"
|
||||
defaultDomain = "docker.io"
|
||||
officialRepoName = "library"
|
||||
defaultTag = "latest"
|
||||
)
|
||||
|
||||
// normalizedNamed represents a name which has been
|
||||
// normalized and has a familiar form. A familiar name
|
||||
// is what is used in Docker UI. An example normalized
|
||||
// name is "docker.io/library/ubuntu" and corresponding
|
||||
// familiar name of "ubuntu".
|
||||
type normalizedNamed interface {
|
||||
Named
|
||||
Familiar() Named
|
||||
}
|
||||
|
||||
// ParseNormalizedNamed parses a string into a named reference
|
||||
// transforming a familiar name from Docker UI to a fully
|
||||
// qualified reference. If the value may be an identifier
|
||||
// use ParseAnyReference.
|
||||
func ParseNormalizedNamed(s string) (Named, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
|
||||
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
|
||||
}
|
||||
domain, remainder := splitDockerDomain(s)
|
||||
var remoteName string
|
||||
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
|
||||
remoteName = remainder[:tagSep]
|
||||
} else {
|
||||
remoteName = remainder
|
||||
}
|
||||
if strings.ToLower(remoteName) != remoteName {
|
||||
return nil, errors.New("invalid reference format: repository name must be lowercase")
|
||||
}
|
||||
|
||||
ref, err := Parse(domain + "/" + remainder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
named, isNamed := ref.(Named)
|
||||
if !isNamed {
|
||||
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitDockerDomain(name string) (domain, remainder string) {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||
domain, remainder = defaultDomain, name
|
||||
} else {
|
||||
domain, remainder = name[:i], name[i+1:]
|
||||
}
|
||||
if domain == legacyDefaultDomain {
|
||||
domain = defaultDomain
|
||||
}
|
||||
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
|
||||
remainder = officialRepoName + "/" + remainder
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||
// Returns a familiarized named only reference.
|
||||
func familiarizeName(named namedRepository) repository {
|
||||
repo := repository{
|
||||
domain: named.Domain(),
|
||||
path: named.Path(),
|
||||
}
|
||||
|
||||
if repo.domain == defaultDomain {
|
||||
repo.domain = ""
|
||||
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
|
||||
repo.path = split[1]
|
||||
}
|
||||
}
|
||||
return repo
|
||||
}
|
||||
|
||||
func (r reference) Familiar() Named {
|
||||
return reference{
|
||||
namedRepository: familiarizeName(r.namedRepository),
|
||||
tag: r.tag,
|
||||
digest: r.digest,
|
||||
}
|
||||
}
|
||||
|
||||
func (r repository) Familiar() Named {
|
||||
return familiarizeName(r)
|
||||
}
|
||||
|
||||
func (t taggedReference) Familiar() Named {
|
||||
return taggedReference{
|
||||
namedRepository: familiarizeName(t.namedRepository),
|
||||
tag: t.tag,
|
||||
}
|
||||
}
|
||||
|
||||
func (c canonicalReference) Familiar() Named {
|
||||
return canonicalReference{
|
||||
namedRepository: familiarizeName(c.namedRepository),
|
||||
digest: c.digest,
|
||||
}
|
||||
}
|
||||
|
||||
// TagNameOnly adds the default tag "latest" to a reference if it only has
|
||||
// a repo name.
|
||||
func TagNameOnly(ref Named) Named {
|
||||
if IsNameOnly(ref) {
|
||||
namedTagged, err := WithTag(ref, defaultTag)
|
||||
if err != nil {
|
||||
// Default tag must be valid, to create a NamedTagged
|
||||
// type with non-validated input the WithTag function
|
||||
// should be used instead
|
||||
panic(err)
|
||||
}
|
||||
return namedTagged
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
// ParseAnyReference parses a reference string as a possible identifier,
|
||||
// full digest, or familiar name.
|
||||
func ParseAnyReference(ref string) (Reference, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
|
||||
return digestReference("sha256:" + ref), nil
|
||||
}
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
||||
|
||||
// ParseAnyReferenceWithSet parses a reference string as a possible short
|
||||
// identifier to be matched in a digest set, a full digest, or familiar name.
|
||||
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
|
||||
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
|
||||
dgst, err := ds.Lookup(ref)
|
||||
if err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
} else {
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
433
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
433
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
@@ -0,0 +1,433 @@
|
||||
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
//
|
||||
// identifier := /[a-f0-9]{64}/
|
||||
// short-identifier := /[a-f0-9]{6,64}/
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||
NameTotalLengthMax = 255
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||
ErrReferenceInvalidFormat = errors.New("invalid reference format")
|
||||
|
||||
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrTagInvalidFormat = errors.New("invalid tag format")
|
||||
|
||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||
|
||||
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
|
||||
|
||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||
|
||||
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
|
||||
|
||||
// ErrNameNotCanonical is returned when a name is not canonical.
|
||||
ErrNameNotCanonical = errors.New("repository name must be canonical")
|
||||
)
|
||||
|
||||
// Reference is an opaque object reference identifier that may include
|
||||
// modifiers such as a hostname, name, tag, and digest.
|
||||
type Reference interface {
|
||||
// String returns the full reference
|
||||
String() string
|
||||
}
|
||||
|
||||
// Field provides a wrapper type for resolving correct reference types when
|
||||
// working with encoding.
|
||||
type Field struct {
|
||||
reference Reference
|
||||
}
|
||||
|
||||
// AsField wraps a reference in a Field for encoding.
|
||||
func AsField(reference Reference) Field {
|
||||
return Field{reference}
|
||||
}
|
||||
|
||||
// Reference unwraps the reference type from the field to
|
||||
// return the Reference object. This object should be
|
||||
// of the appropriate type to further check for different
|
||||
// reference types.
|
||||
func (f Field) Reference() Reference {
|
||||
return f.reference
|
||||
}
|
||||
|
||||
// MarshalText serializes the field to byte text which
|
||||
// is the string of the reference.
|
||||
func (f Field) MarshalText() (p []byte, err error) {
|
||||
return []byte(f.reference.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses text bytes by invoking the
|
||||
// reference parser to ensure the appropriately
|
||||
// typed reference object is wrapped by field.
|
||||
func (f *Field) UnmarshalText(p []byte) error {
|
||||
r, err := Parse(string(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.reference = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// Named is an object with a full name
|
||||
type Named interface {
|
||||
Reference
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Tagged is an object which has a tag
|
||||
type Tagged interface {
|
||||
Reference
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// NamedTagged is an object including a name and tag.
|
||||
type NamedTagged interface {
|
||||
Named
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// Digested is an object which has a digest
|
||||
// in which it can be referenced by
|
||||
type Digested interface {
|
||||
Reference
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with domain and digest
|
||||
type Canonical interface {
|
||||
Named
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// namedRepository is a reference to a repository with a name.
|
||||
// A namedRepository has both domain and path components.
|
||||
type namedRepository interface {
|
||||
Named
|
||||
Domain() string
|
||||
Path() string
|
||||
}
|
||||
|
||||
// Domain returns the domain part of the Named reference
|
||||
func Domain(named Named) string {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain()
|
||||
}
|
||||
domain, _ := splitDomain(named.Name())
|
||||
return domain
|
||||
}
|
||||
|
||||
// Path returns the name without the domain part of the Named reference
|
||||
func Path(named Named) (name string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Path()
|
||||
}
|
||||
_, path := splitDomain(named.Name())
|
||||
return path
|
||||
}
|
||||
|
||||
func splitDomain(name string) (string, string) {
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if len(match) != 3 {
|
||||
return "", name
|
||||
}
|
||||
return match[1], match[2]
|
||||
}
|
||||
|
||||
// SplitHostname splits a named reference into a
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
// DEPRECATED: Use Domain or Path
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain(), r.Path()
|
||||
}
|
||||
return splitDomain(named.Name())
|
||||
}
|
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: Parse will not handle short digests.
|
||||
func Parse(s string) (Reference, error) {
|
||||
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||
if matches == nil {
|
||||
if s == "" {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
|
||||
return nil, ErrNameContainsUppercase
|
||||
}
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
|
||||
if len(matches[1]) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
var repo repository
|
||||
|
||||
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
||||
if nameMatch != nil && len(nameMatch) == 3 {
|
||||
repo.domain = nameMatch[1]
|
||||
repo.path = nameMatch[2]
|
||||
} else {
|
||||
repo.domain = ""
|
||||
repo.path = matches[1]
|
||||
}
|
||||
|
||||
ref := reference{
|
||||
namedRepository: repo,
|
||||
tag: matches[2],
|
||||
}
|
||||
if matches[3] != "" {
|
||||
var err error
|
||||
ref.digest, err = digest.Parse(matches[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r := getBestReferenceType(ref)
|
||||
if r == nil {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name and be in the canonical
|
||||
// form, otherwise an error is returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if named.String() != s {
|
||||
return nil, ErrNameNotCanonical
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// WithName returns a named object representing the given string. If the input
|
||||
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||
func WithName(name string) (Named, error) {
|
||||
if len(name) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if match == nil || len(match) != 3 {
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
return repository{
|
||||
domain: match[1],
|
||||
path: match[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
// reference incorporating both the name and the tag.
|
||||
func WithTag(name Named, tag string) (NamedTagged, error) {
|
||||
if !anchoredTagRegexp.MatchString(tag) {
|
||||
return nil, ErrTagInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if canonical, ok := name.(Canonical); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
digest: canonical.Digest(),
|
||||
}, nil
|
||||
}
|
||||
return taggedReference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||
// a reference incorporating both the name and the digest.
|
||||
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||
return nil, ErrDigestInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if tagged, ok := name.(Tagged); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tagged.Tag(),
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
return canonicalReference{
|
||||
namedRepository: repo,
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
func TrimNamed(ref Named) Named {
|
||||
domain, path := SplitHostname(ref)
|
||||
return repository{
|
||||
domain: domain,
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func getBestReferenceType(ref reference) Reference {
|
||||
if ref.Name() == "" {
|
||||
// Allow digest only references
|
||||
if ref.digest != "" {
|
||||
return digestReference(ref.digest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if ref.tag == "" {
|
||||
if ref.digest != "" {
|
||||
return canonicalReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
digest: ref.digest,
|
||||
}
|
||||
}
|
||||
return ref.namedRepository
|
||||
}
|
||||
if ref.digest == "" {
|
||||
return taggedReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
tag: ref.tag,
|
||||
}
|
||||
}
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
type reference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (r reference) String() string {
|
||||
return r.Name() + ":" + r.tag + "@" + r.digest.String()
|
||||
}
|
||||
|
||||
func (r reference) Tag() string {
|
||||
return r.tag
|
||||
}
|
||||
|
||||
func (r reference) Digest() digest.Digest {
|
||||
return r.digest
|
||||
}
|
||||
|
||||
type repository struct {
|
||||
domain string
|
||||
path string
|
||||
}
|
||||
|
||||
func (r repository) String() string {
|
||||
return r.Name()
|
||||
}
|
||||
|
||||
func (r repository) Name() string {
|
||||
if r.domain == "" {
|
||||
return r.path
|
||||
}
|
||||
return r.domain + "/" + r.path
|
||||
}
|
||||
|
||||
func (r repository) Domain() string {
|
||||
return r.domain
|
||||
}
|
||||
|
||||
func (r repository) Path() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
type digestReference digest.Digest
|
||||
|
||||
func (d digestReference) String() string {
|
||||
return digest.Digest(d).String()
|
||||
}
|
||||
|
||||
func (d digestReference) Digest() digest.Digest {
|
||||
return digest.Digest(d)
|
||||
}
|
||||
|
||||
type taggedReference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
}
|
||||
|
||||
func (t taggedReference) String() string {
|
||||
return t.Name() + ":" + t.tag
|
||||
}
|
||||
|
||||
func (t taggedReference) Tag() string {
|
||||
return t.tag
|
||||
}
|
||||
|
||||
type canonicalReference struct {
|
||||
namedRepository
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (c canonicalReference) String() string {
|
||||
return c.Name() + "@" + c.digest.String()
|
||||
}
|
||||
|
||||
func (c canonicalReference) Digest() digest.Digest {
|
||||
return c.digest
|
||||
}
|
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
package reference
|
||||
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression(
|
||||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp
|
||||
// and followed by an optional port.
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// DomainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
DomainRegexp = expression(
|
||||
domainComponentRegexp,
|
||||
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(DomainRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(DomainRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
IdentifierRegexp = match(`([a-f0-9]{64})`)
|
||||
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
|
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
|
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile
|
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp {
|
||||
re := match(regexp.QuoteMeta(s))
|
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete {
|
||||
panic("must be a literal")
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
var s string
|
||||
for _, re := range res {
|
||||
s += re.String()
|
||||
}
|
||||
|
||||
return match(s)
|
||||
}
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `?`)
|
||||
}
|
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `+`)
|
||||
}
|
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(?:` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`^` + expression(res...).String() + `$`)
|
||||
}
|
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
Normal file
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
44
vendor/github.com/golang/glog/README
generated
vendored
Normal file
44
vendor/github.com/golang/glog/README
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
glog
|
||||
====
|
||||
|
||||
Leveled execution logs for Go.
|
||||
|
||||
This is an efficient pure Go implementation of leveled logs in the
|
||||
manner of the open source C++ package
|
||||
https://github.com/google/glog
|
||||
|
||||
By binding methods to booleans it is possible to use the log package
|
||||
without paying the expense of evaluating the arguments to the log.
|
||||
Through the -vmodule flag, the package also provides fine-grained
|
||||
control over logging at the file level.
|
||||
|
||||
The comment from glog.go introduces the ideas:
|
||||
|
||||
Package glog implements logging analogous to the Google-internal
|
||||
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
|
||||
Error, Fatal, plus formatting variants such as Infof. It
|
||||
also provides V-style logging controlled by the -v and
|
||||
-vmodule=file=2 flags.
|
||||
|
||||
Basic examples:
|
||||
|
||||
glog.Info("Prepare to repel boarders")
|
||||
|
||||
glog.Fatalf("Initialization failed: %s", err)
|
||||
|
||||
See the documentation for the V function for an explanation
|
||||
of these examples:
|
||||
|
||||
if glog.V(2) {
|
||||
glog.Info("Starting transaction...")
|
||||
}
|
||||
|
||||
glog.V(2).Infoln("Processed", nItems, "elements")
|
||||
|
||||
|
||||
The repository contains an open source version of the log package
|
||||
used inside Google. The master copy of the source lives inside
|
||||
Google, not here. The code in this repo is for export only and is not itself
|
||||
under development. Feature requests will be ignored.
|
||||
|
||||
Send bug reports to golang-nuts@googlegroups.com.
|
1180
vendor/github.com/golang/glog/glog.go
generated
vendored
Normal file
1180
vendor/github.com/golang/glog/glog.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
124
vendor/github.com/golang/glog/glog_file.go
generated
vendored
Normal file
124
vendor/github.com/golang/glog/glog_file.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
|
||||
//
|
||||
// Copyright 2013 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// File I/O for logs.
|
||||
|
||||
package glog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MaxSize is the maximum size of a log file in bytes.
|
||||
var MaxSize uint64 = 1024 * 1024 * 1800
|
||||
|
||||
// logDirs lists the candidate directories for new log files.
|
||||
var logDirs []string
|
||||
|
||||
// If non-empty, overrides the choice of directory in which to write logs.
|
||||
// See createLogDirs for the full list of possible destinations.
|
||||
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
|
||||
|
||||
func createLogDirs() {
|
||||
if *logDir != "" {
|
||||
logDirs = append(logDirs, *logDir)
|
||||
}
|
||||
logDirs = append(logDirs, os.TempDir())
|
||||
}
|
||||
|
||||
var (
|
||||
pid = os.Getpid()
|
||||
program = filepath.Base(os.Args[0])
|
||||
host = "unknownhost"
|
||||
userName = "unknownuser"
|
||||
)
|
||||
|
||||
func init() {
|
||||
h, err := os.Hostname()
|
||||
if err == nil {
|
||||
host = shortHostname(h)
|
||||
}
|
||||
|
||||
current, err := user.Current()
|
||||
if err == nil {
|
||||
userName = current.Username
|
||||
}
|
||||
|
||||
// Sanitize userName since it may contain filepath separators on Windows.
|
||||
userName = strings.Replace(userName, `\`, "_", -1)
|
||||
}
|
||||
|
||||
// shortHostname returns its argument, truncating at the first period.
|
||||
// For instance, given "www.google.com" it returns "www".
|
||||
func shortHostname(hostname string) string {
|
||||
if i := strings.Index(hostname, "."); i >= 0 {
|
||||
return hostname[:i]
|
||||
}
|
||||
return hostname
|
||||
}
|
||||
|
||||
// logName returns a new log file name containing tag, with start time t, and
|
||||
// the name for the symlink for tag.
|
||||
func logName(tag string, t time.Time) (name, link string) {
|
||||
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
|
||||
program,
|
||||
host,
|
||||
userName,
|
||||
tag,
|
||||
t.Year(),
|
||||
t.Month(),
|
||||
t.Day(),
|
||||
t.Hour(),
|
||||
t.Minute(),
|
||||
t.Second(),
|
||||
pid)
|
||||
return name, program + "." + tag
|
||||
}
|
||||
|
||||
var onceLogDirs sync.Once
|
||||
|
||||
// create creates a new log file and returns the file and its filename, which
|
||||
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
|
||||
// successfully, create also attempts to update the symlink for that tag, ignoring
|
||||
// errors.
|
||||
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
|
||||
onceLogDirs.Do(createLogDirs)
|
||||
if len(logDirs) == 0 {
|
||||
return nil, "", errors.New("log: no log dirs")
|
||||
}
|
||||
name, link := logName(tag, t)
|
||||
var lastErr error
|
||||
for _, dir := range logDirs {
|
||||
fname := filepath.Join(dir, name)
|
||||
f, err := os.Create(fname)
|
||||
if err == nil {
|
||||
symlink := filepath.Join(dir, link)
|
||||
os.Remove(symlink) // ignore err
|
||||
os.Symlink(name, symlink) // ignore err
|
||||
return f, fname, nil
|
||||
}
|
||||
lastErr = err
|
||||
}
|
||||
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
|
||||
}
|
4
vendor/github.com/google/uuid/hash.go
generated
vendored
4
vendor/github.com/google/uuid/hash.go
generated
vendored
@@ -26,8 +26,8 @@ var (
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space[:])
|
||||
h.Write(data)
|
||||
h.Write(space[:]) //nolint:errcheck
|
||||
h.Write(data) //nolint:errcheck
|
||||
s := h.Sum(nil)
|
||||
var uuid UUID
|
||||
copy(uuid[:], s)
|
||||
|
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Copyright 2021 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var jsonNull = []byte("null")
|
||||
|
||||
// NullUUID represents a UUID that may be null.
|
||||
// NullUUID implements the SQL driver.Scanner interface so
|
||||
// it can be used as a scan destination:
|
||||
//
|
||||
// var u uuid.NullUUID
|
||||
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
||||
// ...
|
||||
// if u.Valid {
|
||||
// // use u.UUID
|
||||
// } else {
|
||||
// // NULL value
|
||||
// }
|
||||
//
|
||||
type NullUUID struct {
|
||||
UUID UUID
|
||||
Valid bool // Valid is true if UUID is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the SQL driver.Scanner interface.
|
||||
func (nu *NullUUID) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
nu.UUID, nu.Valid = Nil, false
|
||||
return nil
|
||||
}
|
||||
|
||||
err := nu.UUID.Scan(value)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nu NullUUID) Value() (driver.Value, error) {
|
||||
if !nu.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
// Delegate to UUID Value function
|
||||
return nu.UUID.Value()
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (nu NullUUID) MarshalBinary() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID[:], nil
|
||||
}
|
||||
|
||||
return []byte(nil), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(nu.UUID[:], data)
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (nu NullUUID) MarshalText() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID.MarshalText()
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
nu.UUID = id
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (nu NullUUID) MarshalJSON() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return json.Marshal(nu.UUID)
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
|
||||
if bytes.Equal(data, jsonNull) {
|
||||
*nu = NullUUID{}
|
||||
return nil // valid null UUID
|
||||
}
|
||||
err := json.Unmarshal(data, &nu.UUID)
|
||||
nu.Valid = err == nil
|
||||
return err
|
||||
}
|
2
vendor/github.com/google/uuid/sql.go
generated
vendored
2
vendor/github.com/google/uuid/sql.go
generated
vendored
@@ -9,7 +9,7 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
|
||||
// Currently, database types that map to string and []byte are supported. Please
|
||||
// consult database-specific driver documentation for matching types.
|
||||
func (uuid *UUID) Scan(src interface{}) error {
|
||||
|
55
vendor/github.com/google/uuid/uuid.go
generated
vendored
55
vendor/github.com/google/uuid/uuid.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
@@ -33,7 +34,27 @@ const (
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
var rander = rand.Reader // random function
|
||||
const randPoolSize = 16 * 16
|
||||
|
||||
var (
|
||||
rander = rand.Reader // random function
|
||||
poolEnabled = false
|
||||
poolMu sync.Mutex
|
||||
poolPos = randPoolSize // protected with poolMu
|
||||
pool [randPoolSize]byte // protected with poolMu
|
||||
)
|
||||
|
||||
type invalidLengthError struct{ len int }
|
||||
|
||||
func (err invalidLengthError) Error() string {
|
||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
||||
}
|
||||
|
||||
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
||||
func IsInvalidLengthError(err error) bool {
|
||||
_, ok := err.(invalidLengthError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
@@ -68,7 +89,7 @@ func Parse(s string) (UUID, error) {
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
|
||||
return uuid, invalidLengthError{len(s)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
@@ -112,7 +133,7 @@ func ParseBytes(b []byte) (UUID, error) {
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
|
||||
return uuid, invalidLengthError{len(b)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
@@ -243,3 +264,31 @@ func SetRand(r io.Reader) {
|
||||
}
|
||||
rander = r
|
||||
}
|
||||
|
||||
// EnableRandPool enables internal randomness pool used for Random
|
||||
// (Version 4) UUID generation. The pool contains random bytes read from
|
||||
// the random number generator on demand in batches. Enabling the pool
|
||||
// may improve the UUID generation throughput significantly.
|
||||
//
|
||||
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
||||
// for security sensitive applications.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func EnableRandPool() {
|
||||
poolEnabled = true
|
||||
}
|
||||
|
||||
// DisableRandPool disables the randomness pool if it was previously
|
||||
// enabled with EnableRandPool.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func DisableRandPool() {
|
||||
poolEnabled = false
|
||||
defer poolMu.Unlock()
|
||||
poolMu.Lock()
|
||||
poolPos = randPoolSize
|
||||
}
|
||||
|
33
vendor/github.com/google/uuid/version4.go
generated
vendored
33
vendor/github.com/google/uuid/version4.go
generated
vendored
@@ -14,11 +14,21 @@ func New() UUID {
|
||||
return Must(NewRandom())
|
||||
}
|
||||
|
||||
// NewString creates a new random UUID and returns it as a string or panics.
|
||||
// NewString is equivalent to the expression
|
||||
//
|
||||
// uuid.New().String()
|
||||
func NewString() string {
|
||||
return Must(NewRandom()).String()
|
||||
}
|
||||
|
||||
// NewRandom returns a Random (Version 4) UUID.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
//
|
||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
@@ -27,7 +37,10 @@ func New() UUID {
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() (UUID, error) {
|
||||
if !poolEnabled {
|
||||
return NewRandomFromReader(rander)
|
||||
}
|
||||
return newRandomFromPool()
|
||||
}
|
||||
|
||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||
@@ -41,3 +54,23 @@ func NewRandomFromReader(r io.Reader) (UUID, error) {
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
func newRandomFromPool() (UUID, error) {
|
||||
var uuid UUID
|
||||
poolMu.Lock()
|
||||
if poolPos == randPoolSize {
|
||||
_, err := io.ReadFull(rander, pool[:])
|
||||
if err != nil {
|
||||
poolMu.Unlock()
|
||||
return Nil, err
|
||||
}
|
||||
poolPos = 0
|
||||
}
|
||||
copy(uuid[:], pool[poolPos:(poolPos+16)])
|
||||
poolPos += 16
|
||||
poolMu.Unlock()
|
||||
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
.idea/
|
||||
*.iml
|
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# This is the official list of Gorilla WebSocket authors for copyright
|
||||
# purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Google LLC (https://opensource.google.com/)
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
# Gorilla WebSocket
|
||||
|
||||
[](https://godoc.org/github.com/gorilla/websocket)
|
||||
[](https://circleci.com/gh/gorilla/websocket)
|
||||
|
||||
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
|
||||
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
|
||||
|
||||
### Documentation
|
||||
|
||||
* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
|
||||
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
|
||||
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
|
||||
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
|
||||
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
|
||||
|
||||
### Status
|
||||
|
||||
The Gorilla WebSocket package provides a complete and tested implementation of
|
||||
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
|
||||
package API is stable.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/gorilla/websocket
|
||||
|
||||
### Protocol Compliance
|
||||
|
||||
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
|
||||
Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
|
||||
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
|
||||
|
||||
### Gorilla WebSocket compared with other packages
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
|
||||
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
|
||||
<tr><td>Passes <a href="https://github.com/crossbario/autobahn-testsuite">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
|
||||
<tr><td colspan="3">Other Features</tr></td>
|
||||
<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
|
||||
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
|
||||
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
|
||||
</table>
|
||||
|
||||
Notes:
|
||||
|
||||
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
|
||||
2. The application can get the type of a received data message by implementing
|
||||
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
|
||||
function.
|
||||
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
|
||||
Read returns when the input buffer is full or a frame boundary is
|
||||
encountered. Each call to Write sends a single frame message. The Gorilla
|
||||
io.Reader and io.WriteCloser operate on a single WebSocket message.
|
||||
|
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
@@ -0,0 +1,395 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBadHandshake is returned when the server response to opening handshake is
|
||||
// invalid.
|
||||
var ErrBadHandshake = errors.New("websocket: bad handshake")
|
||||
|
||||
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
|
||||
|
||||
// NewClient creates a new client connection using the given net connection.
|
||||
// The URL u specifies the host and request URI. Use requestHeader to specify
|
||||
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
|
||||
// (Cookie). Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etc.
|
||||
//
|
||||
// Deprecated: Use Dialer instead.
|
||||
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
|
||||
d := Dialer{
|
||||
ReadBufferSize: readBufSize,
|
||||
WriteBufferSize: writeBufSize,
|
||||
NetDial: func(net, addr string) (net.Conn, error) {
|
||||
return netConn, nil
|
||||
},
|
||||
}
|
||||
return d.Dial(u.String(), requestHeader)
|
||||
}
|
||||
|
||||
// A Dialer contains options for connecting to WebSocket server.
|
||||
type Dialer struct {
|
||||
// NetDial specifies the dial function for creating TCP connections. If
|
||||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// NetDialContext specifies the dial function for creating TCP connections. If
|
||||
// NetDialContext is nil, net.DialContext is used.
|
||||
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
// If Proxy is nil or returns a nil *URL, no proxy is used.
|
||||
Proxy func(*http.Request) (*url.URL, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
|
||||
// If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then a useful default size is used. The I/O buffer sizes
|
||||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
|
||||
// EnableCompression specifies if the client should attempt to negotiate
|
||||
// per message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
|
||||
// Jar specifies the cookie jar.
|
||||
// If Jar is nil, cookies are not sent in requests and ignored
|
||||
// in responses.
|
||||
Jar http.CookieJar
|
||||
}
|
||||
|
||||
// Dial creates a new client connection by calling DialContext with a background context.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
return d.DialContext(context.Background(), urlStr, requestHeader)
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
|
||||
hostNoPort = hostNoPort[:i]
|
||||
} else {
|
||||
switch u.Scheme {
|
||||
case "wss":
|
||||
hostPort += ":443"
|
||||
case "https":
|
||||
hostPort += ":443"
|
||||
default:
|
||||
hostPort += ":80"
|
||||
}
|
||||
}
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default values.
|
||||
var DefaultDialer = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
HandshakeTimeout: 45 * time.Second,
|
||||
}
|
||||
|
||||
// nilDialer is dialer to use when receiver is nil.
|
||||
var nilDialer = *DefaultDialer
|
||||
|
||||
// DialContext creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// The context will be used in the request and in the Dialer.
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
if d == nil {
|
||||
d = &nilDialer
|
||||
}
|
||||
|
||||
challengeKey, err := generateChallengeKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "ws":
|
||||
u.Scheme = "http"
|
||||
case "wss":
|
||||
u.Scheme = "https"
|
||||
default:
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
// User name and password are not allowed in websocket URIs.
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
URL: u,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: u.Host,
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
// Set the cookies present in the cookie jar of the dialer
|
||||
if d.Jar != nil {
|
||||
for _, cookie := range d.Jar.Cookies(u) {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the request headers using the capitalization for names and values in
|
||||
// RFC examples. Although the capitalization shouldn't matter, there are
|
||||
// servers that depend on it. The Header.Set method is not used because the
|
||||
// method canonicalizes the header names.
|
||||
req.Header["Upgrade"] = []string{"websocket"}
|
||||
req.Header["Connection"] = []string{"Upgrade"}
|
||||
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
|
||||
req.Header["Sec-WebSocket-Version"] = []string{"13"}
|
||||
if len(d.Subprotocols) > 0 {
|
||||
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
|
||||
}
|
||||
for k, vs := range requestHeader {
|
||||
switch {
|
||||
case k == "Host":
|
||||
if len(vs) > 0 {
|
||||
req.Host = vs[0]
|
||||
}
|
||||
case k == "Upgrade" ||
|
||||
k == "Connection" ||
|
||||
k == "Sec-Websocket-Key" ||
|
||||
k == "Sec-Websocket-Version" ||
|
||||
k == "Sec-Websocket-Extensions" ||
|
||||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
|
||||
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
|
||||
case k == "Sec-Websocket-Protocol":
|
||||
req.Header["Sec-WebSocket-Protocol"] = vs
|
||||
default:
|
||||
req.Header[k] = vs
|
||||
}
|
||||
}
|
||||
|
||||
if d.EnableCompression {
|
||||
req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
|
||||
}
|
||||
|
||||
if d.HandshakeTimeout != 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Get network dial function.
|
||||
var netDial func(network, add string) (net.Conn, error)
|
||||
|
||||
if d.NetDialContext != nil {
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return d.NetDialContext(ctx, network, addr)
|
||||
}
|
||||
} else if d.NetDial != nil {
|
||||
netDial = d.NetDial
|
||||
} else {
|
||||
netDialer := &net.Dialer{}
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to set the connection deadline.
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
forwardDial := netDial
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
c, err := forwardDial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to connect through a proxy.
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err := d.Proxy(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
netDial = dialer.Dial
|
||||
}
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
trace := httptrace.ContextClientTrace(ctx)
|
||||
if trace != nil && trace.GetConn != nil {
|
||||
trace.GetConn(hostPort)
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if trace != nil && trace.GotConn != nil {
|
||||
trace.GotConn(httptrace.GotConnInfo{
|
||||
Conn: netConn,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if netConn != nil {
|
||||
netConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if u.Scheme == "https" {
|
||||
cfg := cloneTLSConfig(d.TLSClientConfig)
|
||||
if cfg.ServerName == "" {
|
||||
cfg.ServerName = hostNoPort
|
||||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
|
||||
var err error
|
||||
if trace != nil {
|
||||
err = doHandshakeWithTrace(trace, tlsConn, cfg)
|
||||
} else {
|
||||
err = doHandshake(tlsConn, cfg)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
|
||||
|
||||
if err := req.Write(netConn); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||
if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(conn.br, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if d.Jar != nil {
|
||||
if rc := resp.Cookies(); len(rc) > 0 {
|
||||
d.Jar.SetCookies(u, rc)
|
||||
}
|
||||
}
|
||||
|
||||
if resp.StatusCode != 101 ||
|
||||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
|
||||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
|
||||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
|
||||
// Before closing the network connection on return from this
|
||||
// function, slurp up some of the response to aid application
|
||||
// debugging.
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := io.ReadFull(resp.Body, buf)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
|
||||
return nil, resp, ErrBadHandshake
|
||||
}
|
||||
|
||||
for _, ext := range parseExtensions(resp.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
_, snct := ext["server_no_context_takeover"]
|
||||
_, cnct := ext["client_no_context_takeover"]
|
||||
if !snct || !cnct {
|
||||
return nil, resp, errInvalidCompression
|
||||
}
|
||||
conn.newCompressionWriter = compressNoContextTakeover
|
||||
conn.newDecompressionReader = decompressNoContextTakeover
|
||||
break
|
||||
}
|
||||
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
|
||||
|
||||
netConn.SetDeadline(time.Time{})
|
||||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
||||
|
||||
func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return cfg.Clone()
|
||||
}
|
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// cloneTLSConfig clones all public fields except the fields
|
||||
// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
|
||||
// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
|
||||
// config in active use.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return &tls.Config{
|
||||
Rand: cfg.Rand,
|
||||
Time: cfg.Time,
|
||||
Certificates: cfg.Certificates,
|
||||
NameToCertificate: cfg.NameToCertificate,
|
||||
GetCertificate: cfg.GetCertificate,
|
||||
RootCAs: cfg.RootCAs,
|
||||
NextProtos: cfg.NextProtos,
|
||||
ServerName: cfg.ServerName,
|
||||
ClientAuth: cfg.ClientAuth,
|
||||
ClientCAs: cfg.ClientCAs,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
CipherSuites: cfg.CipherSuites,
|
||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||
ClientSessionCache: cfg.ClientSessionCache,
|
||||
MinVersion: cfg.MinVersion,
|
||||
MaxVersion: cfg.MaxVersion,
|
||||
CurvePreferences: cfg.CurvePreferences,
|
||||
}
|
||||
}
|
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
|
||||
maxCompressionLevel = flate.BestCompression
|
||||
defaultCompressionLevel = 1
|
||||
)
|
||||
|
||||
var (
|
||||
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
|
||||
flateReaderPool = sync.Pool{New: func() interface{} {
|
||||
return flate.NewReader(nil)
|
||||
}}
|
||||
)
|
||||
|
||||
func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
|
||||
const tail =
|
||||
// Add four bytes as specified in RFC
|
||||
"\x00\x00\xff\xff" +
|
||||
// Add final block to squelch unexpected EOF error from flate reader.
|
||||
"\x01\x00\x00\xff\xff"
|
||||
|
||||
fr, _ := flateReaderPool.Get().(io.ReadCloser)
|
||||
fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
|
||||
return &flateReadWrapper{fr}
|
||||
}
|
||||
|
||||
func isValidCompressionLevel(level int) bool {
|
||||
return minCompressionLevel <= level && level <= maxCompressionLevel
|
||||
}
|
||||
|
||||
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
|
||||
p := &flateWriterPools[level-minCompressionLevel]
|
||||
tw := &truncWriter{w: w}
|
||||
fw, _ := p.Get().(*flate.Writer)
|
||||
if fw == nil {
|
||||
fw, _ = flate.NewWriter(tw, level)
|
||||
} else {
|
||||
fw.Reset(tw)
|
||||
}
|
||||
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
|
||||
}
|
||||
|
||||
// truncWriter is an io.Writer that writes all but the last four bytes of the
|
||||
// stream to another io.Writer.
|
||||
type truncWriter struct {
|
||||
w io.WriteCloser
|
||||
n int
|
||||
p [4]byte
|
||||
}
|
||||
|
||||
func (w *truncWriter) Write(p []byte) (int, error) {
|
||||
n := 0
|
||||
|
||||
// fill buffer first for simplicity.
|
||||
if w.n < len(w.p) {
|
||||
n = copy(w.p[w.n:], p)
|
||||
p = p[n:]
|
||||
w.n += n
|
||||
if len(p) == 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
m := len(p)
|
||||
if m > len(w.p) {
|
||||
m = len(w.p)
|
||||
}
|
||||
|
||||
if nn, err := w.w.Write(w.p[:m]); err != nil {
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
copy(w.p[:], w.p[m:])
|
||||
copy(w.p[len(w.p)-m:], p[len(p)-m:])
|
||||
nn, err := w.w.Write(p[:len(p)-m])
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
type flateWriteWrapper struct {
|
||||
fw *flate.Writer
|
||||
tw *truncWriter
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
|
||||
if w.fw == nil {
|
||||
return 0, errWriteClosed
|
||||
}
|
||||
return w.fw.Write(p)
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Close() error {
|
||||
if w.fw == nil {
|
||||
return errWriteClosed
|
||||
}
|
||||
err1 := w.fw.Flush()
|
||||
w.p.Put(w.fw)
|
||||
w.fw = nil
|
||||
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
|
||||
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
|
||||
}
|
||||
err2 := w.tw.w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
type flateReadWrapper struct {
|
||||
fr io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Read(p []byte) (int, error) {
|
||||
if r.fr == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
n, err := r.fr.Read(p)
|
||||
if err == io.EOF {
|
||||
// Preemptively place the reader back in the pool. This helps with
|
||||
// scenarios where the application does not call NextReader() soon after
|
||||
// this final read.
|
||||
r.Close()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Close() error {
|
||||
if r.fr == nil {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
err := r.fr.Close()
|
||||
flateReaderPool.Put(r.fr)
|
||||
r.fr = nil
|
||||
return err
|
||||
}
|
1201
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
1201
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "net"
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
b := net.Buffers(bufs)
|
||||
_, err := b.WriteTo(c.conn)
|
||||
return err
|
||||
}
|
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
if _, err := c.conn.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
227
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
227
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package websocket implements the WebSocket protocol defined in RFC 6455.
|
||||
//
|
||||
// Overview
|
||||
//
|
||||
// The Conn type represents a WebSocket connection. A server application calls
|
||||
// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// ReadBufferSize: 1024,
|
||||
// WriteBufferSize: 1024,
|
||||
// }
|
||||
//
|
||||
// func handler(w http.ResponseWriter, r *http.Request) {
|
||||
// conn, err := upgrader.Upgrade(w, r, nil)
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// ... Use conn to send and receive messages.
|
||||
// }
|
||||
//
|
||||
// Call the connection's WriteMessage and ReadMessage methods to send and
|
||||
// receive messages as a slice of bytes. This snippet of code shows how to echo
|
||||
// messages using these methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// if err := conn.WriteMessage(messageType, p); err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// In above snippet of code, p is a []byte and messageType is an int with value
|
||||
// websocket.BinaryMessage or websocket.TextMessage.
|
||||
//
|
||||
// An application can also send and receive messages using the io.WriteCloser
|
||||
// and io.Reader interfaces. To send a message, call the connection NextWriter
|
||||
// method to get an io.WriteCloser, write the message to the writer and close
|
||||
// the writer when done. To receive a message, call the connection NextReader
|
||||
// method to get an io.Reader and read until io.EOF is returned. This snippet
|
||||
// shows how to echo messages using the NextWriter and NextReader methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, r, err := conn.NextReader()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// w, err := conn.NextWriter(messageType)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if _, err := io.Copy(w, r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err := w.Close(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Data Messages
|
||||
//
|
||||
// The WebSocket protocol distinguishes between text and binary data messages.
|
||||
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
|
||||
// binary messages is left to the application.
|
||||
//
|
||||
// This package uses the TextMessage and BinaryMessage integer constants to
|
||||
// identify the two data message types. The ReadMessage and NextReader methods
|
||||
// return the type of the received message. The messageType argument to the
|
||||
// WriteMessage and NextWriter methods specifies the type of a sent message.
|
||||
//
|
||||
// It is the application's responsibility to ensure that text messages are
|
||||
// valid UTF-8 encoded text.
|
||||
//
|
||||
// Control Messages
|
||||
//
|
||||
// The WebSocket protocol defines three types of control messages: close, ping
|
||||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received close messages by calling the handler function
|
||||
// set with the SetCloseHandler method and by returning a *CloseError from the
|
||||
// NextReader, ReadMessage or the message Read method. The default close
|
||||
// handler sends a close message to the peer.
|
||||
//
|
||||
// Connections handle received ping messages by calling the handler function
|
||||
// set with the SetPingHandler method. The default ping handler sends a pong
|
||||
// message to the peer.
|
||||
//
|
||||
// Connections handle received pong messages by calling the handler function
|
||||
// set with the SetPongHandler method. The default pong handler does nothing.
|
||||
// If an application sends ping messages, then the application should set a
|
||||
// pong handler to receive the corresponding pong.
|
||||
//
|
||||
// The control message handler functions are called from the NextReader,
|
||||
// ReadMessage and message reader Read methods. The default close and ping
|
||||
// handlers can block these methods for a short time when the handler writes to
|
||||
// the connection.
|
||||
//
|
||||
// The application must read the connection to process close, ping and pong
|
||||
// messages sent from the peer. If the application is not otherwise interested
|
||||
// in messages from the peer, then the application should start a goroutine to
|
||||
// read and discard messages from the peer. A simple example is:
|
||||
//
|
||||
// func readLoop(c *websocket.Conn) {
|
||||
// for {
|
||||
// if _, _, err := c.NextReader(); err != nil {
|
||||
// c.Close()
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Concurrency
|
||||
//
|
||||
// Connections support one concurrent reader and one concurrent writer.
|
||||
//
|
||||
// Applications are responsible for ensuring that no more than one goroutine
|
||||
// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
|
||||
// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
|
||||
// that no more than one goroutine calls the read methods (NextReader,
|
||||
// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
|
||||
// concurrently.
|
||||
//
|
||||
// The Close and WriteControl methods can be called concurrently with all other
|
||||
// methods.
|
||||
//
|
||||
// Origin Considerations
|
||||
//
|
||||
// Web browsers allow Javascript applications to open a WebSocket connection to
|
||||
// any host. It's up to the server to enforce an origin policy using the Origin
|
||||
// request header sent by the browser.
|
||||
//
|
||||
// The Upgrader calls the function specified in the CheckOrigin field to check
|
||||
// the origin. If the CheckOrigin function returns false, then the Upgrade
|
||||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and the Origin host is
|
||||
// not equal to the Host request header.
|
||||
//
|
||||
// The deprecated package-level Upgrade function does not perform origin
|
||||
// checking. The application is responsible for checking the Origin header
|
||||
// before calling the Upgrade function.
|
||||
//
|
||||
// Buffers
|
||||
//
|
||||
// Connections buffer network input and output to reduce the number
|
||||
// of system calls when reading or writing messages.
|
||||
//
|
||||
// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
|
||||
// Section 5 for a discussion of message framing. A WebSocket frame header is
|
||||
// written to the network each time a write buffer is flushed to the network.
|
||||
// Decreasing the size of the write buffer can increase the amount of framing
|
||||
// overhead on the connection.
|
||||
//
|
||||
// The buffer sizes in bytes are specified by the ReadBufferSize and
|
||||
// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
|
||||
// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
|
||||
// buffers created by the HTTP server when a buffer size field is set to zero.
|
||||
// The HTTP server buffers have a size of 4096 at the time of this writing.
|
||||
//
|
||||
// The buffer sizes do not limit the size of a message that can be read or
|
||||
// written by a connection.
|
||||
//
|
||||
// Buffers are held for the lifetime of the connection by default. If the
|
||||
// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
|
||||
// write buffer only when writing a message.
|
||||
//
|
||||
// Applications should tune the buffer sizes to balance memory use and
|
||||
// performance. Increasing the buffer size uses more memory, but can reduce the
|
||||
// number of system calls to read or write the network. In the case of writing,
|
||||
// increasing the buffer size can reduce the number of frame headers written to
|
||||
// the network.
|
||||
//
|
||||
// Some guidelines for setting buffer parameters are:
|
||||
//
|
||||
// Limit the buffer sizes to the maximum expected message size. Buffers larger
|
||||
// than the largest message do not provide any benefit.
|
||||
//
|
||||
// Depending on the distribution of message sizes, setting the buffer size to
|
||||
// a value less than the maximum expected message size can greatly reduce memory
|
||||
// use with a small impact on performance. Here's an example: If 99% of the
|
||||
// messages are smaller than 256 bytes and the maximum message size is 512
|
||||
// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
|
||||
// than a buffer size of 512 bytes. The memory savings is 50%.
|
||||
//
|
||||
// A write buffer pool is useful when the application has a modest number
|
||||
// writes over a large number of connections. when buffers are pooled, a larger
|
||||
// buffer size has a reduced impact on total memory use and has the benefit of
|
||||
// reducing system calls and frame overhead.
|
||||
//
|
||||
// Compression EXPERIMENTAL
|
||||
//
|
||||
// Per message compression extensions (RFC 7692) are experimentally supported
|
||||
// by this package in a limited capacity. Setting the EnableCompression option
|
||||
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
|
||||
// support.
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// EnableCompression: true,
|
||||
// }
|
||||
//
|
||||
// If compression was successfully negotiated with the connection's peer, any
|
||||
// message received in compressed form will be automatically decompressed.
|
||||
// All Read methods will return uncompressed bytes.
|
||||
//
|
||||
// Per message compression of messages written to a connection can be enabled
|
||||
// or disabled by calling the corresponding Conn method:
|
||||
//
|
||||
// conn.EnableWriteCompression(false)
|
||||
//
|
||||
// Currently this package does not support compression with "context takeover".
|
||||
// This means that messages must be compressed and decompressed in isolation,
|
||||
// without retaining sliding window or dictionary state across messages. For
|
||||
// more details refer to RFC 7692.
|
||||
//
|
||||
// Use of compression is experimental and may result in decreased performance.
|
||||
package websocket
|
3
vendor/github.com/gorilla/websocket/go.mod
generated
vendored
Normal file
3
vendor/github.com/gorilla/websocket/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/gorilla/websocket
|
||||
|
||||
go 1.12
|
0
vendor/github.com/gorilla/websocket/go.sum
generated
vendored
Normal file
0
vendor/github.com/gorilla/websocket/go.sum
generated
vendored
Normal file
42
vendor/github.com/gorilla/websocket/join.go
generated
vendored
Normal file
42
vendor/github.com/gorilla/websocket/join.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// JoinMessages concatenates received messages to create a single io.Reader.
|
||||
// The string term is appended to each message. The returned reader does not
|
||||
// support concurrent calls to the Read method.
|
||||
func JoinMessages(c *Conn, term string) io.Reader {
|
||||
return &joinReader{c: c, term: term}
|
||||
}
|
||||
|
||||
type joinReader struct {
|
||||
c *Conn
|
||||
term string
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r *joinReader) Read(p []byte) (int, error) {
|
||||
if r.r == nil {
|
||||
var err error
|
||||
_, r.r, err = r.c.NextReader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if r.term != "" {
|
||||
r.r = io.MultiReader(r.r, strings.NewReader(r.term))
|
||||
}
|
||||
}
|
||||
n, err := r.r.Read(p)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
r.r = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// Deprecated: Use c.WriteJSON instead.
|
||||
func WriteJSON(c *Conn, v interface{}) error {
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// See the documentation for encoding/json Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (c *Conn) WriteJSON(v interface{}) error {
|
||||
w, err := c.NextWriter(TextMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := json.NewEncoder(w).Encode(v)
|
||||
err2 := w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// Deprecated: Use c.ReadJSON instead.
|
||||
func ReadJSON(c *Conn, v interface{}) error {
|
||||
return c.ReadJSON(v)
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for the encoding/json Unmarshal function for details
|
||||
// about the conversion of JSON to a Go value.
|
||||
func (c *Conn) ReadJSON(v interface{}) error {
|
||||
_, r, err := c.NextReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.NewDecoder(r).Decode(v)
|
||||
if err == io.EOF {
|
||||
// One value is expected in the message.
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
54
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
54
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package websocket
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
// Mask one byte at a time for small buffers.
|
||||
if len(b) < 2*wordSize {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
||||
|
||||
// Mask one byte at a time to word boundary.
|
||||
if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
|
||||
n = wordSize - n
|
||||
for i := range b[:n] {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Create aligned word size key.
|
||||
var k [wordSize]byte
|
||||
for i := range k {
|
||||
k[i] = key[(pos+i)&3]
|
||||
}
|
||||
kw := *(*uintptr)(unsafe.Pointer(&k))
|
||||
|
||||
// Mask one word at a time.
|
||||
n := (len(b) / wordSize) * wordSize
|
||||
for i := 0; i < n; i += wordSize {
|
||||
*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
|
||||
}
|
||||
|
||||
// Mask one byte at a time for remaining bytes.
|
||||
b = b[n:]
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
|
||||
return pos & 3
|
||||
}
|
15
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
15
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package websocket
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PreparedMessage caches on the wire representations of a message payload.
|
||||
// Use PreparedMessage to efficiently send a message payload to multiple
|
||||
// connections. PreparedMessage is especially useful when compression is used
|
||||
// because the CPU and memory expensive compression operation can be executed
|
||||
// once for a given set of compression options.
|
||||
type PreparedMessage struct {
|
||||
messageType int
|
||||
data []byte
|
||||
mu sync.Mutex
|
||||
frames map[prepareKey]*preparedFrame
|
||||
}
|
||||
|
||||
// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
|
||||
type prepareKey struct {
|
||||
isServer bool
|
||||
compress bool
|
||||
compressionLevel int
|
||||
}
|
||||
|
||||
// preparedFrame contains data in wire representation.
|
||||
type preparedFrame struct {
|
||||
once sync.Once
|
||||
data []byte
|
||||
}
|
||||
|
||||
// NewPreparedMessage returns an initialized PreparedMessage. You can then send
|
||||
// it to connection using WritePreparedMessage method. Valid wire
|
||||
// representation will be calculated lazily only once for a set of current
|
||||
// connection options.
|
||||
func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
|
||||
pm := &PreparedMessage{
|
||||
messageType: messageType,
|
||||
frames: make(map[prepareKey]*preparedFrame),
|
||||
data: data,
|
||||
}
|
||||
|
||||
// Prepare a plain server frame.
|
||||
_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// To protect against caller modifying the data argument, remember the data
|
||||
// copied to the plain server frame.
|
||||
pm.data = frameData[len(frameData)-len(data):]
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
|
||||
pm.mu.Lock()
|
||||
frame, ok := pm.frames[key]
|
||||
if !ok {
|
||||
frame = &preparedFrame{}
|
||||
pm.frames[key] = frame
|
||||
}
|
||||
pm.mu.Unlock()
|
||||
|
||||
var err error
|
||||
frame.once.Do(func() {
|
||||
// Prepare a frame using a 'fake' connection.
|
||||
// TODO: Refactor code in conn.go to allow more direct construction of
|
||||
// the frame.
|
||||
mu := make(chan struct{}, 1)
|
||||
mu <- struct{}{}
|
||||
var nc prepareConn
|
||||
c := &Conn{
|
||||
conn: &nc,
|
||||
mu: mu,
|
||||
isServer: key.isServer,
|
||||
compressionLevel: key.compressionLevel,
|
||||
enableWriteCompression: true,
|
||||
writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
|
||||
}
|
||||
if key.compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
}
|
||||
err = c.WriteMessage(pm.messageType, pm.data)
|
||||
frame.data = nc.buf.Bytes()
|
||||
})
|
||||
return pm.messageType, frame.data, err
|
||||
}
|
||||
|
||||
type prepareConn struct {
|
||||
buf bytes.Buffer
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
|
||||
func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
|
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type netDialerFunc func(network, addr string) (net.Conn, error)
|
||||
|
||||
func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
|
||||
return fn(network, addr)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
|
||||
return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
|
||||
})
|
||||
}
|
||||
|
||||
type httpProxyDialer struct {
|
||||
proxyURL *url.URL
|
||||
forwardDial func(network, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
|
||||
hostPort, _ := hostPortNoPort(hpd.proxyURL)
|
||||
conn, err := hpd.forwardDial(network, hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectHeader := make(http.Header)
|
||||
if user := hpd.proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
|
||||
connectReq := &http.Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{Opaque: addr},
|
||||
Host: addr,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
if err := connectReq.Write(conn); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read response. It's OK to use and discard buffered reader here becaue
|
||||
// the remote server does not speak until spoken to.
|
||||
br := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
conn.Close()
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, errors.New(f[1])
|
||||
}
|
||||
return conn, nil
|
||||
}
|
363
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
363
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
@@ -0,0 +1,363 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HandshakeError describes an error with the handshake from the peer.
|
||||
type HandshakeError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e HandshakeError) Error() string { return e.message }
|
||||
|
||||
// Upgrader specifies parameters for upgrading an HTTP connection to a
|
||||
// WebSocket connection.
|
||||
type Upgrader struct {
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then buffers allocated by the HTTP server are used. The
|
||||
// I/O buffer sizes do not limit the size of the messages that can be sent
|
||||
// or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the server's supported protocols in order of
|
||||
// preference. If this field is not nil, then the Upgrade method negotiates a
|
||||
// subprotocol by selecting the first match in this list with a protocol
|
||||
// requested by the client. If there's no match, then no protocol is
|
||||
// negotiated (the Sec-Websocket-Protocol header is not included in the
|
||||
// handshake response).
|
||||
Subprotocols []string
|
||||
|
||||
// Error specifies the function for generating HTTP error responses. If Error
|
||||
// is nil, then http.Error is used to generate the HTTP response.
|
||||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, then a safe default is used: return false if the
|
||||
// Origin request header is present and the origin host is not equal to
|
||||
// request Host header.
|
||||
//
|
||||
// A CheckOrigin function should carefully validate the request origin to
|
||||
// prevent cross-site request forgery.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
|
||||
// EnableCompression specify if the server should attempt to negotiate per
|
||||
// message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
}
|
||||
|
||||
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
|
||||
err := HandshakeError{reason}
|
||||
if u.Error != nil {
|
||||
u.Error(w, r, status, err)
|
||||
} else {
|
||||
w.Header().Set("Sec-Websocket-Version", "13")
|
||||
http.Error(w, http.StatusText(status), status)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
|
||||
func checkSameOrigin(r *http.Request) bool {
|
||||
origin := r.Header["Origin"]
|
||||
if len(origin) == 0 {
|
||||
return true
|
||||
}
|
||||
u, err := url.Parse(origin[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return equalASCIIFold(u.Host, r.Host)
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
if u.Subprotocols != nil {
|
||||
clientProtocols := Subprotocols(r)
|
||||
for _, serverProtocol := range u.Subprotocols {
|
||||
for _, clientProtocol := range clientProtocols {
|
||||
if clientProtocol == serverProtocol {
|
||||
return clientProtocol
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if responseHeader != nil {
|
||||
return responseHeader.Get("Sec-Websocket-Protocol")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// application negotiated subprotocol (Sec-WebSocket-Protocol).
|
||||
//
|
||||
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
|
||||
// response.
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
const badHandshake = "websocket: the client is not using the websocket protocol: "
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
|
||||
}
|
||||
|
||||
if r.Method != "GET" {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
if challengeKey == "" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
|
||||
}
|
||||
|
||||
subprotocol := u.selectSubprotocol(r, responseHeader)
|
||||
|
||||
// Negotiate PMCE
|
||||
var compress bool
|
||||
if u.EnableCompression {
|
||||
for _, ext := range parseExtensions(r.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
compress = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
|
||||
}
|
||||
var brw *bufio.ReadWriter
|
||||
netConn, brw, err := h.Hijack()
|
||||
if err != nil {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
if brw.Reader.Buffered() > 0 {
|
||||
netConn.Close()
|
||||
return nil, errors.New("websocket: client sent data before handshake is complete")
|
||||
}
|
||||
|
||||
var br *bufio.Reader
|
||||
if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
|
||||
// Reuse hijacked buffered reader as connection reader.
|
||||
br = brw.Reader
|
||||
}
|
||||
|
||||
buf := bufioWriterBuffer(netConn, brw.Writer)
|
||||
|
||||
var writeBuf []byte
|
||||
if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
|
||||
// Reuse hijacked write buffer as connection buffer.
|
||||
writeBuf = buf
|
||||
}
|
||||
|
||||
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
|
||||
c.subprotocol = subprotocol
|
||||
|
||||
if compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
c.newDecompressionReader = decompressNoContextTakeover
|
||||
}
|
||||
|
||||
// Use larger of hijacked buffer and connection write buffer for header.
|
||||
p := buf
|
||||
if len(c.writeBuf) > len(p) {
|
||||
p = c.writeBuf
|
||||
}
|
||||
p = p[:0]
|
||||
|
||||
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
|
||||
p = append(p, computeAcceptKey(challengeKey)...)
|
||||
p = append(p, "\r\n"...)
|
||||
if c.subprotocol != "" {
|
||||
p = append(p, "Sec-WebSocket-Protocol: "...)
|
||||
p = append(p, c.subprotocol...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
if compress {
|
||||
p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
|
||||
}
|
||||
for k, vs := range responseHeader {
|
||||
if k == "Sec-Websocket-Protocol" {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if b <= 31 {
|
||||
// prevent response splitting.
|
||||
b = ' '
|
||||
}
|
||||
p = append(p, b)
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
// Clear deadlines set by HTTP server.
|
||||
netConn.SetDeadline(time.Time{})
|
||||
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
|
||||
}
|
||||
if _, err = netConn.Write(p); err != nil {
|
||||
netConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// Deprecated: Use websocket.Upgrader instead.
|
||||
//
|
||||
// Upgrade does not perform origin checking. The application is responsible for
|
||||
// checking the Origin header before calling Upgrade. An example implementation
|
||||
// of the same origin policy check is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", http.StatusForbidden)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// If the endpoint supports subprotocols, then the application is responsible
|
||||
// for negotiating the protocol used on the connection. Use the Subprotocols()
|
||||
// function to get the subprotocols requested by the client. Use the
|
||||
// Sec-Websocket-Protocol response header to specify the subprotocol selected
|
||||
// by the application.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// The connection buffers IO to the underlying network connection. The
|
||||
// readBufSize and writeBufSize parameters specify the size of the buffers to
|
||||
// use. Messages can be larger than the buffers.
|
||||
//
|
||||
// If the request is not a valid WebSocket handshake, then Upgrade returns an
|
||||
// error of type HandshakeError. Applications should handle this error by
|
||||
// replying to the client with an HTTP error response.
|
||||
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
|
||||
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
|
||||
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
|
||||
// don't return errors to maintain backwards compatibility
|
||||
}
|
||||
u.CheckOrigin = func(r *http.Request) bool {
|
||||
// allow all connections by default
|
||||
return true
|
||||
}
|
||||
return u.Upgrade(w, r, responseHeader)
|
||||
}
|
||||
|
||||
// Subprotocols returns the subprotocols requested by the client in the
|
||||
// Sec-Websocket-Protocol header.
|
||||
func Subprotocols(r *http.Request) []string {
|
||||
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
|
||||
if h == "" {
|
||||
return nil
|
||||
}
|
||||
protocols := strings.Split(h, ",")
|
||||
for i := range protocols {
|
||||
protocols[i] = strings.TrimSpace(protocols[i])
|
||||
}
|
||||
return protocols
|
||||
}
|
||||
|
||||
// IsWebSocketUpgrade returns true if the client requested upgrade to the
|
||||
// WebSocket protocol.
|
||||
func IsWebSocketUpgrade(r *http.Request) bool {
|
||||
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
|
||||
tokenListContainsValue(r.Header, "Upgrade", "websocket")
|
||||
}
|
||||
|
||||
// bufioReaderSize size returns the size of a bufio.Reader.
|
||||
func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
|
||||
// This code assumes that peek on a reset reader returns
|
||||
// bufio.Reader.buf[:0].
|
||||
// TODO: Use bufio.Reader.Size() after Go 1.10
|
||||
br.Reset(originalReader)
|
||||
if p, err := br.Peek(0); err == nil {
|
||||
return cap(p)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// writeHook is an io.Writer that records the last slice passed to it vio
|
||||
// io.Writer.Write.
|
||||
type writeHook struct {
|
||||
p []byte
|
||||
}
|
||||
|
||||
func (wh *writeHook) Write(p []byte) (int, error) {
|
||||
wh.p = p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// bufioWriterBuffer grabs the buffer from a bufio.Writer.
|
||||
func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
|
||||
// This code assumes that bufio.Writer.buf[:1] is passed to the
|
||||
// bufio.Writer's underlying writer.
|
||||
var wh writeHook
|
||||
bw.Reset(&wh)
|
||||
bw.WriteByte(0)
|
||||
bw.Flush()
|
||||
|
||||
bw.Reset(originalWriter)
|
||||
|
||||
return wh.p[:cap(wh.p)]
|
||||
}
|
19
vendor/github.com/gorilla/websocket/trace.go
generated
vendored
Normal file
19
vendor/github.com/gorilla/websocket/trace.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if trace.TLSHandshakeStart != nil {
|
||||
trace.TLSHandshakeStart()
|
||||
}
|
||||
err := doHandshake(tlsConn, cfg)
|
||||
if trace.TLSHandshakeDone != nil {
|
||||
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
|
||||
}
|
||||
return err
|
||||
}
|
12
vendor/github.com/gorilla/websocket/trace_17.go
generated
vendored
Normal file
12
vendor/github.com/gorilla/websocket/trace_17.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
return doHandshake(tlsConn, cfg)
|
||||
}
|
283
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
283
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
@@ -0,0 +1,283 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
|
||||
func computeAcceptKey(challengeKey string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(challengeKey))
|
||||
h.Write(keyGUID)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func generateChallengeKey() (string, error) {
|
||||
p := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(p), nil
|
||||
}
|
||||
|
||||
// Token octets per RFC 2616.
|
||||
var isTokenOctet = [256]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
// skipSpace returns a slice of the string s with all leading RFC 2616 linear
|
||||
// whitespace removed.
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if b := s[i]; b != ' ' && b != '\t' {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
// nextToken returns the leading RFC 2616 token of s and the string following
|
||||
// the token.
|
||||
func nextToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if !isTokenOctet[s[i]] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
|
||||
// and the string following the token or quoted string.
|
||||
func nextTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return nextToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// equalASCIIFold returns true if s is equal to t with ASCII case folding as
|
||||
// defined in RFC 4790.
|
||||
func equalASCIIFold(s, t string) bool {
|
||||
for s != "" && t != "" {
|
||||
sr, size := utf8.DecodeRuneInString(s)
|
||||
s = s[size:]
|
||||
tr, size := utf8.DecodeRuneInString(t)
|
||||
t = t[size:]
|
||||
if sr == tr {
|
||||
continue
|
||||
}
|
||||
if 'A' <= sr && sr <= 'Z' {
|
||||
sr = sr + 'a' - 'A'
|
||||
}
|
||||
if 'A' <= tr && tr <= 'Z' {
|
||||
tr = tr + 'a' - 'A'
|
||||
}
|
||||
if sr != tr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return s == t
|
||||
}
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains a token equal to value with ASCII case folding.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
headers:
|
||||
for _, s := range header[name] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
if equalASCIIFold(t, value) {
|
||||
return true
|
||||
}
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parseExtensions parses WebSocket extensions from a header.
|
||||
func parseExtensions(header http.Header) []map[string]string {
|
||||
// From RFC 6455:
|
||||
//
|
||||
// Sec-WebSocket-Extensions = extension-list
|
||||
// extension-list = 1#extension
|
||||
// extension = extension-token *( ";" extension-param )
|
||||
// extension-token = registered-token
|
||||
// registered-token = token
|
||||
// extension-param = token [ "=" (token | quoted-string) ]
|
||||
// ;When using the quoted-string syntax variant, the value
|
||||
// ;after quoted-string unescaping MUST conform to the
|
||||
// ;'token' ABNF.
|
||||
|
||||
var result []map[string]string
|
||||
headers:
|
||||
for _, s := range header["Sec-Websocket-Extensions"] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
ext := map[string]string{"": t}
|
||||
for {
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ";") {
|
||||
break
|
||||
}
|
||||
var k string
|
||||
k, s = nextToken(skipSpace(s[1:]))
|
||||
if k == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
var v string
|
||||
if strings.HasPrefix(s, "=") {
|
||||
v, s = nextTokenOrQuoted(skipSpace(s[1:]))
|
||||
s = skipSpace(s)
|
||||
}
|
||||
if s != "" && s[0] != ',' && s[0] != ';' {
|
||||
continue headers
|
||||
}
|
||||
ext[k] = v
|
||||
}
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
result = append(result, ext)
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
@@ -0,0 +1,473 @@
|
||||
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
|
||||
//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
|
||||
|
||||
// Package proxy provides support for a variety of protocols to proxy network
|
||||
// data.
|
||||
//
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type proxy_direct struct{}
|
||||
|
||||
// Direct is a direct proxy: one that makes network connections directly.
|
||||
var proxy_Direct = proxy_direct{}
|
||||
|
||||
func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, addr)
|
||||
}
|
||||
|
||||
// A PerHost directs connections to a default Dialer unless the host name
|
||||
// requested matches one of a number of exceptions.
|
||||
type proxy_PerHost struct {
|
||||
def, bypass proxy_Dialer
|
||||
|
||||
bypassNetworks []*net.IPNet
|
||||
bypassIPs []net.IP
|
||||
bypassZones []string
|
||||
bypassHosts []string
|
||||
}
|
||||
|
||||
// NewPerHost returns a PerHost Dialer that directs connections to either
|
||||
// defaultDialer or bypass, depending on whether the connection matches one of
|
||||
// the configured rules.
|
||||
func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
|
||||
return &proxy_PerHost{
|
||||
def: defaultDialer,
|
||||
bypass: bypass,
|
||||
}
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network through either
|
||||
// defaultDialer or bypass.
|
||||
func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.dialerForRequest(host).Dial(network, addr)
|
||||
}
|
||||
|
||||
func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
for _, net := range p.bypassNetworks {
|
||||
if net.Contains(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassIP := range p.bypassIPs {
|
||||
if bypassIP.Equal(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
for _, zone := range p.bypassZones {
|
||||
if strings.HasSuffix(host, zone) {
|
||||
return p.bypass
|
||||
}
|
||||
if host == zone[1:] {
|
||||
// For a zone ".example.com", we match "example.com"
|
||||
// too.
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassHost := range p.bypassHosts {
|
||||
if bypassHost == host {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
// AddFromString parses a string that contains comma-separated values
|
||||
// specifying hosts that should use the bypass proxy. Each value is either an
|
||||
// IP address, a CIDR range, a zone (*.example.com) or a host name
|
||||
// (localhost). A best effort is made to parse the string and errors are
|
||||
// ignored.
|
||||
func (p *proxy_PerHost) AddFromString(s string) {
|
||||
hosts := strings.Split(s, ",")
|
||||
for _, host := range hosts {
|
||||
host = strings.TrimSpace(host)
|
||||
if len(host) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(host, "/") {
|
||||
// We assume that it's a CIDR address like 127.0.0.0/8
|
||||
if _, net, err := net.ParseCIDR(host); err == nil {
|
||||
p.AddNetwork(net)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
p.AddIP(ip)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(host, "*.") {
|
||||
p.AddZone(host[1:])
|
||||
continue
|
||||
}
|
||||
p.AddHost(host)
|
||||
}
|
||||
}
|
||||
|
||||
// AddIP specifies an IP address that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match an IP.
|
||||
func (p *proxy_PerHost) AddIP(ip net.IP) {
|
||||
p.bypassIPs = append(p.bypassIPs, ip)
|
||||
}
|
||||
|
||||
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match.
|
||||
func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
|
||||
p.bypassNetworks = append(p.bypassNetworks, net)
|
||||
}
|
||||
|
||||
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
|
||||
// "example.com" matches "example.com" and all of its subdomains.
|
||||
func (p *proxy_PerHost) AddZone(zone string) {
|
||||
if strings.HasSuffix(zone, ".") {
|
||||
zone = zone[:len(zone)-1]
|
||||
}
|
||||
if !strings.HasPrefix(zone, ".") {
|
||||
zone = "." + zone
|
||||
}
|
||||
p.bypassZones = append(p.bypassZones, zone)
|
||||
}
|
||||
|
||||
// AddHost specifies a host name that will use the bypass proxy.
|
||||
func (p *proxy_PerHost) AddHost(host string) {
|
||||
if strings.HasSuffix(host, ".") {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
p.bypassHosts = append(p.bypassHosts, host)
|
||||
}
|
||||
|
||||
// A Dialer is a means to establish a connection.
|
||||
type proxy_Dialer interface {
|
||||
// Dial connects to the given address via the proxy.
|
||||
Dial(network, addr string) (c net.Conn, err error)
|
||||
}
|
||||
|
||||
// Auth contains authentication parameters that specific Dialers may require.
|
||||
type proxy_Auth struct {
|
||||
User, Password string
|
||||
}
|
||||
|
||||
// FromEnvironment returns the dialer specified by the proxy related variables in
|
||||
// the environment.
|
||||
func proxy_FromEnvironment() proxy_Dialer {
|
||||
allProxy := proxy_allProxyEnv.Get()
|
||||
if len(allProxy) == 0 {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
proxyURL, err := url.Parse(allProxy)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
noProxy := proxy_noProxyEnv.Get()
|
||||
if len(noProxy) == 0 {
|
||||
return proxy
|
||||
}
|
||||
|
||||
perHost := proxy_NewPerHost(proxy, proxy_Direct)
|
||||
perHost.AddFromString(noProxy)
|
||||
return perHost
|
||||
}
|
||||
|
||||
// proxySchemes is a map from URL schemes to a function that creates a Dialer
|
||||
// from a URL with such a scheme.
|
||||
var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
|
||||
|
||||
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
|
||||
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
|
||||
// by FromURL.
|
||||
func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
|
||||
if proxy_proxySchemes == nil {
|
||||
proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
|
||||
}
|
||||
proxy_proxySchemes[scheme] = f
|
||||
}
|
||||
|
||||
// FromURL returns a Dialer given a URL specification and an underlying
|
||||
// Dialer for it to make network requests.
|
||||
func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
var auth *proxy_Auth
|
||||
if u.User != nil {
|
||||
auth = new(proxy_Auth)
|
||||
auth.User = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
auth.Password = p
|
||||
}
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "socks5":
|
||||
return proxy_SOCKS5("tcp", u.Host, auth, forward)
|
||||
}
|
||||
|
||||
// If the scheme doesn't match any of the built-in schemes, see if it
|
||||
// was registered by another package.
|
||||
if proxy_proxySchemes != nil {
|
||||
if f, ok := proxy_proxySchemes[u.Scheme]; ok {
|
||||
return f(u, forward)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
proxy_allProxyEnv = &proxy_envOnce{
|
||||
names: []string{"ALL_PROXY", "all_proxy"},
|
||||
}
|
||||
proxy_noProxyEnv = &proxy_envOnce{
|
||||
names: []string{"NO_PROXY", "no_proxy"},
|
||||
}
|
||||
)
|
||||
|
||||
// envOnce looks up an environment variable (optionally by multiple
|
||||
// names) once. It mitigates expensive lookups on some platforms
|
||||
// (e.g. Windows).
|
||||
// (Borrowed from net/http/transport.go)
|
||||
type proxy_envOnce struct {
|
||||
names []string
|
||||
once sync.Once
|
||||
val string
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) Get() string {
|
||||
e.once.Do(e.init)
|
||||
return e.val
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) init() {
|
||||
for _, n := range e.names {
|
||||
e.val = os.Getenv(n)
|
||||
if e.val != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
|
||||
// with an optional username and password. See RFC 1928 and RFC 1929.
|
||||
func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
s := &proxy_socks5{
|
||||
network: network,
|
||||
addr: addr,
|
||||
forward: forward,
|
||||
}
|
||||
if auth != nil {
|
||||
s.user = auth.User
|
||||
s.password = auth.Password
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type proxy_socks5 struct {
|
||||
user, password string
|
||||
network, addr string
|
||||
forward proxy_Dialer
|
||||
}
|
||||
|
||||
const proxy_socks5Version = 5
|
||||
|
||||
const (
|
||||
proxy_socks5AuthNone = 0
|
||||
proxy_socks5AuthPassword = 2
|
||||
)
|
||||
|
||||
const proxy_socks5Connect = 1
|
||||
|
||||
const (
|
||||
proxy_socks5IP4 = 1
|
||||
proxy_socks5Domain = 3
|
||||
proxy_socks5IP6 = 4
|
||||
)
|
||||
|
||||
var proxy_socks5Errors = []string{
|
||||
"",
|
||||
"general failure",
|
||||
"connection forbidden",
|
||||
"network unreachable",
|
||||
"host unreachable",
|
||||
"connection refused",
|
||||
"TTL expired",
|
||||
"command not supported",
|
||||
"address type not supported",
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
|
||||
func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp6", "tcp4":
|
||||
default:
|
||||
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
|
||||
}
|
||||
|
||||
conn, err := s.forward.Dial(s.network, s.addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.connect(conn, addr); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// connect takes an existing connection to a socks5 proxy server,
|
||||
// and commands the server to extend that connection to target,
|
||||
// which must be a canonical address with a host and port.
|
||||
func (s *proxy_socks5) connect(conn net.Conn, target string) error {
|
||||
host, portStr, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to parse port number: " + portStr)
|
||||
}
|
||||
if port < 1 || port > 0xffff {
|
||||
return errors.New("proxy: port number out of range: " + portStr)
|
||||
}
|
||||
|
||||
// the size here is just an estimate
|
||||
buf := make([]byte, 0, 6+len(host))
|
||||
|
||||
buf = append(buf, proxy_socks5Version)
|
||||
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
|
||||
buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
|
||||
} else {
|
||||
buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
|
||||
}
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
if buf[0] != 5 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
if buf[1] == 0xff {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
||||
}
|
||||
|
||||
// See RFC 1929
|
||||
if buf[1] == proxy_socks5AuthPassword {
|
||||
buf = buf[:0]
|
||||
buf = append(buf, 1 /* password protocol version */)
|
||||
buf = append(buf, uint8(len(s.user)))
|
||||
buf = append(buf, s.user...)
|
||||
buf = append(buf, uint8(len(s.password)))
|
||||
buf = append(buf, s.password...)
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if buf[1] != 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf[:0]
|
||||
buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
buf = append(buf, proxy_socks5IP4)
|
||||
ip = ip4
|
||||
} else {
|
||||
buf = append(buf, proxy_socks5IP6)
|
||||
}
|
||||
buf = append(buf, ip...)
|
||||
} else {
|
||||
if len(host) > 255 {
|
||||
return errors.New("proxy: destination host name too long: " + host)
|
||||
}
|
||||
buf = append(buf, proxy_socks5Domain)
|
||||
buf = append(buf, byte(len(host)))
|
||||
buf = append(buf, host...)
|
||||
}
|
||||
buf = append(buf, byte(port>>8), byte(port))
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
failure := "unknown error"
|
||||
if int(buf[1]) < len(proxy_socks5Errors) {
|
||||
failure = proxy_socks5Errors[buf[1]]
|
||||
}
|
||||
|
||||
if len(failure) > 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
||||
}
|
||||
|
||||
bytesToDiscard := 0
|
||||
switch buf[3] {
|
||||
case proxy_socks5IP4:
|
||||
bytesToDiscard = net.IPv4len
|
||||
case proxy_socks5IP6:
|
||||
bytesToDiscard = net.IPv6len
|
||||
case proxy_socks5Domain:
|
||||
_, err := io.ReadFull(conn, buf[:1])
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
bytesToDiscard = int(buf[0])
|
||||
default:
|
||||
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
||||
}
|
||||
|
||||
if cap(buf) < bytesToDiscard {
|
||||
buf = make([]byte, bytesToDiscard)
|
||||
} else {
|
||||
buf = buf[:bytesToDiscard]
|
||||
}
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
// Also need to discard the port number
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
1
vendor/github.com/opencontainers/go-digest/.mailmap
generated
vendored
Normal file
1
vendor/github.com/opencontainers/go-digest/.mailmap
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Stephen J Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
|
12
vendor/github.com/opencontainers/go-digest/.pullapprove.yml
generated
vendored
Normal file
12
vendor/github.com/opencontainers/go-digest/.pullapprove.yml
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
approve_by_comment: true
|
||||
approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
|
||||
reject_regex: ^Rejected
|
||||
reset_on_push: true
|
||||
author_approval: ignored
|
||||
signed_off_by:
|
||||
required: true
|
||||
reviewers:
|
||||
teams:
|
||||
- go-digest-maintainers
|
||||
name: default
|
||||
required: 2
|
4
vendor/github.com/opencontainers/go-digest/.travis.yml
generated
vendored
Normal file
4
vendor/github.com/opencontainers/go-digest/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.7
|
||||
- master
|
72
vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
generated
vendored
Normal file
72
vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
# Contributing to Docker open source projects
|
||||
|
||||
Want to hack on this project? Awesome! Here are instructions to get you started.
|
||||
|
||||
This project is a part of the [Docker](https://www.docker.com) project, and follows
|
||||
the same rules and principles. If you're already familiar with the way
|
||||
Docker does things, you'll feel right at home.
|
||||
|
||||
Otherwise, go read Docker's
|
||||
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
|
||||
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
|
||||
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
|
||||
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
|
||||
|
||||
For an in-depth description of our contribution process, visit the
|
||||
contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
1 Letterman Drive
|
||||
Suite D4700
|
||||
San Francisco, CA, 94129
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
191
vendor/github.com/opencontainers/go-digest/LICENSE.code
generated
vendored
Normal file
191
vendor/github.com/opencontainers/go-digest/LICENSE.code
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2016 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
425
vendor/github.com/opencontainers/go-digest/LICENSE.docs
generated
vendored
Normal file
425
vendor/github.com/opencontainers/go-digest/LICENSE.docs
generated
vendored
Normal file
@@ -0,0 +1,425 @@
|
||||
Attribution-ShareAlike 4.0 International
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons Corporation ("Creative Commons") is not a law firm and
|
||||
does not provide legal services or legal advice. Distribution of
|
||||
Creative Commons public licenses does not create a lawyer-client or
|
||||
other relationship. Creative Commons makes its licenses and related
|
||||
information available on an "as-is" basis. Creative Commons gives no
|
||||
warranties regarding its licenses, any material licensed under their
|
||||
terms and conditions, or any related information. Creative Commons
|
||||
disclaims all liability for damages resulting from their use to the
|
||||
fullest extent possible.
|
||||
|
||||
Using Creative Commons Public Licenses
|
||||
|
||||
Creative Commons public licenses provide a standard set of terms and
|
||||
conditions that creators and other rights holders may use to share
|
||||
original works of authorship and other material subject to copyright
|
||||
and certain other rights specified in the public license below. The
|
||||
following considerations are for informational purposes only, are not
|
||||
exhaustive, and do not form part of our licenses.
|
||||
|
||||
Considerations for licensors: Our public licenses are
|
||||
intended for use by those authorized to give the public
|
||||
permission to use material in ways otherwise restricted by
|
||||
copyright and certain other rights. Our licenses are
|
||||
irrevocable. Licensors should read and understand the terms
|
||||
and conditions of the license they choose before applying it.
|
||||
Licensors should also secure all rights necessary before
|
||||
applying our licenses so that the public can reuse the
|
||||
material as expected. Licensors should clearly mark any
|
||||
material not subject to the license. This includes other CC-
|
||||
licensed material, or material used under an exception or
|
||||
limitation to copyright. More considerations for licensors:
|
||||
wiki.creativecommons.org/Considerations_for_licensors
|
||||
|
||||
Considerations for the public: By using one of our public
|
||||
licenses, a licensor grants the public permission to use the
|
||||
licensed material under specified terms and conditions. If
|
||||
the licensor's permission is not necessary for any reason--for
|
||||
example, because of any applicable exception or limitation to
|
||||
copyright--then that use is not regulated by the license. Our
|
||||
licenses grant only permissions under copyright and certain
|
||||
other rights that a licensor has authority to grant. Use of
|
||||
the licensed material may still be restricted for other
|
||||
reasons, including because others have copyright or other
|
||||
rights in the material. A licensor may make special requests,
|
||||
such as asking that all changes be marked or described.
|
||||
Although not required by our licenses, you are encouraged to
|
||||
respect those requests where reasonable. More_considerations
|
||||
for the public:
|
||||
wiki.creativecommons.org/Considerations_for_licensees
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons Attribution-ShareAlike 4.0 International Public
|
||||
License
|
||||
|
||||
By exercising the Licensed Rights (defined below), You accept and agree
|
||||
to be bound by the terms and conditions of this Creative Commons
|
||||
Attribution-ShareAlike 4.0 International Public License ("Public
|
||||
License"). To the extent this Public License may be interpreted as a
|
||||
contract, You are granted the Licensed Rights in consideration of Your
|
||||
acceptance of these terms and conditions, and the Licensor grants You
|
||||
such rights in consideration of benefits the Licensor receives from
|
||||
making the Licensed Material available under these terms and
|
||||
conditions.
|
||||
|
||||
|
||||
Section 1 -- Definitions.
|
||||
|
||||
a. Adapted Material means material subject to Copyright and Similar
|
||||
Rights that is derived from or based upon the Licensed Material
|
||||
and in which the Licensed Material is translated, altered,
|
||||
arranged, transformed, or otherwise modified in a manner requiring
|
||||
permission under the Copyright and Similar Rights held by the
|
||||
Licensor. For purposes of this Public License, where the Licensed
|
||||
Material is a musical work, performance, or sound recording,
|
||||
Adapted Material is always produced where the Licensed Material is
|
||||
synched in timed relation with a moving image.
|
||||
|
||||
b. Adapter's License means the license You apply to Your Copyright
|
||||
and Similar Rights in Your contributions to Adapted Material in
|
||||
accordance with the terms and conditions of this Public License.
|
||||
|
||||
c. BY-SA Compatible License means a license listed at
|
||||
creativecommons.org/compatiblelicenses, approved by Creative
|
||||
Commons as essentially the equivalent of this Public License.
|
||||
|
||||
d. Copyright and Similar Rights means copyright and/or similar rights
|
||||
closely related to copyright including, without limitation,
|
||||
performance, broadcast, sound recording, and Sui Generis Database
|
||||
Rights, without regard to how the rights are labeled or
|
||||
categorized. For purposes of this Public License, the rights
|
||||
specified in Section 2(b)(1)-(2) are not Copyright and Similar
|
||||
Rights.
|
||||
|
||||
e. Effective Technological Measures means those measures that, in the
|
||||
absence of proper authority, may not be circumvented under laws
|
||||
fulfilling obligations under Article 11 of the WIPO Copyright
|
||||
Treaty adopted on December 20, 1996, and/or similar international
|
||||
agreements.
|
||||
|
||||
f. Exceptions and Limitations means fair use, fair dealing, and/or
|
||||
any other exception or limitation to Copyright and Similar Rights
|
||||
that applies to Your use of the Licensed Material.
|
||||
|
||||
g. License Elements means the license attributes listed in the name
|
||||
of a Creative Commons Public License. The License Elements of this
|
||||
Public License are Attribution and ShareAlike.
|
||||
|
||||
h. Licensed Material means the artistic or literary work, database,
|
||||
or other material to which the Licensor applied this Public
|
||||
License.
|
||||
|
||||
i. Licensed Rights means the rights granted to You subject to the
|
||||
terms and conditions of this Public License, which are limited to
|
||||
all Copyright and Similar Rights that apply to Your use of the
|
||||
Licensed Material and that the Licensor has authority to license.
|
||||
|
||||
j. Licensor means the individual(s) or entity(ies) granting rights
|
||||
under this Public License.
|
||||
|
||||
k. Share means to provide material to the public by any means or
|
||||
process that requires permission under the Licensed Rights, such
|
||||
as reproduction, public display, public performance, distribution,
|
||||
dissemination, communication, or importation, and to make material
|
||||
available to the public including in ways that members of the
|
||||
public may access the material from a place and at a time
|
||||
individually chosen by them.
|
||||
|
||||
l. Sui Generis Database Rights means rights other than copyright
|
||||
resulting from Directive 96/9/EC of the European Parliament and of
|
||||
the Council of 11 March 1996 on the legal protection of databases,
|
||||
as amended and/or succeeded, as well as other essentially
|
||||
equivalent rights anywhere in the world.
|
||||
|
||||
m. You means the individual or entity exercising the Licensed Rights
|
||||
under this Public License. Your has a corresponding meaning.
|
||||
|
||||
|
||||
Section 2 -- Scope.
|
||||
|
||||
a. License grant.
|
||||
|
||||
1. Subject to the terms and conditions of this Public License,
|
||||
the Licensor hereby grants You a worldwide, royalty-free,
|
||||
non-sublicensable, non-exclusive, irrevocable license to
|
||||
exercise the Licensed Rights in the Licensed Material to:
|
||||
|
||||
a. reproduce and Share the Licensed Material, in whole or
|
||||
in part; and
|
||||
|
||||
b. produce, reproduce, and Share Adapted Material.
|
||||
|
||||
2. Exceptions and Limitations. For the avoidance of doubt, where
|
||||
Exceptions and Limitations apply to Your use, this Public
|
||||
License does not apply, and You do not need to comply with
|
||||
its terms and conditions.
|
||||
|
||||
3. Term. The term of this Public License is specified in Section
|
||||
6(a).
|
||||
|
||||
4. Media and formats; technical modifications allowed. The
|
||||
Licensor authorizes You to exercise the Licensed Rights in
|
||||
all media and formats whether now known or hereafter created,
|
||||
and to make technical modifications necessary to do so. The
|
||||
Licensor waives and/or agrees not to assert any right or
|
||||
authority to forbid You from making technical modifications
|
||||
necessary to exercise the Licensed Rights, including
|
||||
technical modifications necessary to circumvent Effective
|
||||
Technological Measures. For purposes of this Public License,
|
||||
simply making modifications authorized by this Section 2(a)
|
||||
(4) never produces Adapted Material.
|
||||
|
||||
5. Downstream recipients.
|
||||
|
||||
a. Offer from the Licensor -- Licensed Material. Every
|
||||
recipient of the Licensed Material automatically
|
||||
receives an offer from the Licensor to exercise the
|
||||
Licensed Rights under the terms and conditions of this
|
||||
Public License.
|
||||
|
||||
b. Additional offer from the Licensor -- Adapted Material.
|
||||
Every recipient of Adapted Material from You
|
||||
automatically receives an offer from the Licensor to
|
||||
exercise the Licensed Rights in the Adapted Material
|
||||
under the conditions of the Adapter's License You apply.
|
||||
|
||||
c. No downstream restrictions. You may not offer or impose
|
||||
any additional or different terms or conditions on, or
|
||||
apply any Effective Technological Measures to, the
|
||||
Licensed Material if doing so restricts exercise of the
|
||||
Licensed Rights by any recipient of the Licensed
|
||||
Material.
|
||||
|
||||
6. No endorsement. Nothing in this Public License constitutes or
|
||||
may be construed as permission to assert or imply that You
|
||||
are, or that Your use of the Licensed Material is, connected
|
||||
with, or sponsored, endorsed, or granted official status by,
|
||||
the Licensor or others designated to receive attribution as
|
||||
provided in Section 3(a)(1)(A)(i).
|
||||
|
||||
b. Other rights.
|
||||
|
||||
1. Moral rights, such as the right of integrity, are not
|
||||
licensed under this Public License, nor are publicity,
|
||||
privacy, and/or other similar personality rights; however, to
|
||||
the extent possible, the Licensor waives and/or agrees not to
|
||||
assert any such rights held by the Licensor to the limited
|
||||
extent necessary to allow You to exercise the Licensed
|
||||
Rights, but not otherwise.
|
||||
|
||||
2. Patent and trademark rights are not licensed under this
|
||||
Public License.
|
||||
|
||||
3. To the extent possible, the Licensor waives any right to
|
||||
collect royalties from You for the exercise of the Licensed
|
||||
Rights, whether directly or through a collecting society
|
||||
under any voluntary or waivable statutory or compulsory
|
||||
licensing scheme. In all other cases the Licensor expressly
|
||||
reserves any right to collect such royalties.
|
||||
|
||||
|
||||
Section 3 -- License Conditions.
|
||||
|
||||
Your exercise of the Licensed Rights is expressly made subject to the
|
||||
following conditions.
|
||||
|
||||
a. Attribution.
|
||||
|
||||
1. If You Share the Licensed Material (including in modified
|
||||
form), You must:
|
||||
|
||||
a. retain the following if it is supplied by the Licensor
|
||||
with the Licensed Material:
|
||||
|
||||
i. identification of the creator(s) of the Licensed
|
||||
Material and any others designated to receive
|
||||
attribution, in any reasonable manner requested by
|
||||
the Licensor (including by pseudonym if
|
||||
designated);
|
||||
|
||||
ii. a copyright notice;
|
||||
|
||||
iii. a notice that refers to this Public License;
|
||||
|
||||
iv. a notice that refers to the disclaimer of
|
||||
warranties;
|
||||
|
||||
v. a URI or hyperlink to the Licensed Material to the
|
||||
extent reasonably practicable;
|
||||
|
||||
b. indicate if You modified the Licensed Material and
|
||||
retain an indication of any previous modifications; and
|
||||
|
||||
c. indicate the Licensed Material is licensed under this
|
||||
Public License, and include the text of, or the URI or
|
||||
hyperlink to, this Public License.
|
||||
|
||||
2. You may satisfy the conditions in Section 3(a)(1) in any
|
||||
reasonable manner based on the medium, means, and context in
|
||||
which You Share the Licensed Material. For example, it may be
|
||||
reasonable to satisfy the conditions by providing a URI or
|
||||
hyperlink to a resource that includes the required
|
||||
information.
|
||||
|
||||
3. If requested by the Licensor, You must remove any of the
|
||||
information required by Section 3(a)(1)(A) to the extent
|
||||
reasonably practicable.
|
||||
|
||||
b. ShareAlike.
|
||||
|
||||
In addition to the conditions in Section 3(a), if You Share
|
||||
Adapted Material You produce, the following conditions also apply.
|
||||
|
||||
1. The Adapter's License You apply must be a Creative Commons
|
||||
license with the same License Elements, this version or
|
||||
later, or a BY-SA Compatible License.
|
||||
|
||||
2. You must include the text of, or the URI or hyperlink to, the
|
||||
Adapter's License You apply. You may satisfy this condition
|
||||
in any reasonable manner based on the medium, means, and
|
||||
context in which You Share Adapted Material.
|
||||
|
||||
3. You may not offer or impose any additional or different terms
|
||||
or conditions on, or apply any Effective Technological
|
||||
Measures to, Adapted Material that restrict exercise of the
|
||||
rights granted under the Adapter's License You apply.
|
||||
|
||||
|
||||
Section 4 -- Sui Generis Database Rights.
|
||||
|
||||
Where the Licensed Rights include Sui Generis Database Rights that
|
||||
apply to Your use of the Licensed Material:
|
||||
|
||||
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
|
||||
to extract, reuse, reproduce, and Share all or a substantial
|
||||
portion of the contents of the database;
|
||||
|
||||
b. if You include all or a substantial portion of the database
|
||||
contents in a database in which You have Sui Generis Database
|
||||
Rights, then the database in which You have Sui Generis Database
|
||||
Rights (but not its individual contents) is Adapted Material,
|
||||
|
||||
including for purposes of Section 3(b); and
|
||||
c. You must comply with the conditions in Section 3(a) if You Share
|
||||
all or a substantial portion of the contents of the database.
|
||||
|
||||
For the avoidance of doubt, this Section 4 supplements and does not
|
||||
replace Your obligations under this Public License where the Licensed
|
||||
Rights include other Copyright and Similar Rights.
|
||||
|
||||
|
||||
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
|
||||
|
||||
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
|
||||
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
|
||||
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
|
||||
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
|
||||
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
|
||||
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
||||
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
|
||||
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
|
||||
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
|
||||
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
|
||||
|
||||
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
|
||||
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
|
||||
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
|
||||
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
|
||||
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
|
||||
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
|
||||
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
|
||||
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
|
||||
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
|
||||
|
||||
c. The disclaimer of warranties and limitation of liability provided
|
||||
above shall be interpreted in a manner that, to the extent
|
||||
possible, most closely approximates an absolute disclaimer and
|
||||
waiver of all liability.
|
||||
|
||||
|
||||
Section 6 -- Term and Termination.
|
||||
|
||||
a. This Public License applies for the term of the Copyright and
|
||||
Similar Rights licensed here. However, if You fail to comply with
|
||||
this Public License, then Your rights under this Public License
|
||||
terminate automatically.
|
||||
|
||||
b. Where Your right to use the Licensed Material has terminated under
|
||||
Section 6(a), it reinstates:
|
||||
|
||||
1. automatically as of the date the violation is cured, provided
|
||||
it is cured within 30 days of Your discovery of the
|
||||
violation; or
|
||||
|
||||
2. upon express reinstatement by the Licensor.
|
||||
|
||||
For the avoidance of doubt, this Section 6(b) does not affect any
|
||||
right the Licensor may have to seek remedies for Your violations
|
||||
of this Public License.
|
||||
|
||||
c. For the avoidance of doubt, the Licensor may also offer the
|
||||
Licensed Material under separate terms or conditions or stop
|
||||
distributing the Licensed Material at any time; however, doing so
|
||||
will not terminate this Public License.
|
||||
|
||||
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
|
||||
License.
|
||||
|
||||
|
||||
Section 7 -- Other Terms and Conditions.
|
||||
|
||||
a. The Licensor shall not be bound by any additional or different
|
||||
terms or conditions communicated by You unless expressly agreed.
|
||||
|
||||
b. Any arrangements, understandings, or agreements regarding the
|
||||
Licensed Material not stated herein are separate from and
|
||||
independent of the terms and conditions of this Public License.
|
||||
|
||||
|
||||
Section 8 -- Interpretation.
|
||||
|
||||
a. For the avoidance of doubt, this Public License does not, and
|
||||
shall not be interpreted to, reduce, limit, restrict, or impose
|
||||
conditions on any use of the Licensed Material that could lawfully
|
||||
be made without permission under this Public License.
|
||||
|
||||
b. To the extent possible, if any provision of this Public License is
|
||||
deemed unenforceable, it shall be automatically reformed to the
|
||||
minimum extent necessary to make it enforceable. If the provision
|
||||
cannot be reformed, it shall be severed from this Public License
|
||||
without affecting the enforceability of the remaining terms and
|
||||
conditions.
|
||||
|
||||
c. No term or condition of this Public License will be waived and no
|
||||
failure to comply consented to unless expressly agreed to by the
|
||||
Licensor.
|
||||
|
||||
d. Nothing in this Public License constitutes or may be interpreted
|
||||
as a limitation upon, or waiver of, any privileges and immunities
|
||||
that apply to the Licensor or You, including from the legal
|
||||
processes of any jurisdiction or authority.
|
||||
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons is not a party to its public licenses.
|
||||
Notwithstanding, Creative Commons may elect to apply one of its public
|
||||
licenses to material it publishes and in those instances will be
|
||||
considered the "Licensor." Except for the limited purpose of indicating
|
||||
that material is shared under a Creative Commons public license or as
|
||||
otherwise permitted by the Creative Commons policies published at
|
||||
creativecommons.org/policies, Creative Commons does not authorize the
|
||||
use of the trademark "Creative Commons" or any other trademark or logo
|
||||
of Creative Commons without its prior written consent including,
|
||||
without limitation, in connection with any unauthorized modifications
|
||||
to any of its public licenses or any other arrangements,
|
||||
understandings, or agreements concerning use of licensed material. For
|
||||
the avoidance of doubt, this paragraph does not form part of the public
|
||||
licenses.
|
||||
|
||||
Creative Commons may be contacted at creativecommons.org.
|
9
vendor/github.com/opencontainers/go-digest/MAINTAINERS
generated
vendored
Normal file
9
vendor/github.com/opencontainers/go-digest/MAINTAINERS
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
Aaron Lehmann <aaron.lehmann@docker.com> (@aaronlehmann)
|
||||
Brandon Philips <brandon.philips@coreos.com> (@philips)
|
||||
Brendan Burns <bburns@microsoft.com> (@brendandburns)
|
||||
Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
|
||||
Jason Bouzane <jbouzane@google.com> (@jbouzane)
|
||||
John Starks <jostarks@microsoft.com> (@jstarks)
|
||||
Jonathan Boulle <jon.boulle@coreos.com> (@jonboulle)
|
||||
Stephen Day <stephen.day@docker.com> (@stevvooe)
|
||||
Vincent Batts <vbatts@redhat.com> (@vbatts)
|
104
vendor/github.com/opencontainers/go-digest/README.md
generated
vendored
Normal file
104
vendor/github.com/opencontainers/go-digest/README.md
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
# go-digest
|
||||
|
||||
[](https://godoc.org/github.com/opencontainers/go-digest) [](https://goreportcard.com/report/github.com/opencontainers/go-digest) [](https://travis-ci.org/opencontainers/go-digest)
|
||||
|
||||
Common digest package used across the container ecosystem.
|
||||
|
||||
Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information.
|
||||
|
||||
# What is a digest?
|
||||
|
||||
A digest is just a hash.
|
||||
|
||||
The most common use case for a digest is to create a content
|
||||
identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage)
|
||||
systems:
|
||||
|
||||
```go
|
||||
id := digest.FromBytes([]byte("my content"))
|
||||
```
|
||||
|
||||
In the example above, the id can be used to uniquely identify
|
||||
the byte slice "my content". This allows two disparate applications
|
||||
to agree on a verifiable identifier without having to trust one
|
||||
another.
|
||||
|
||||
An identifying digest can be verified, as follows:
|
||||
|
||||
```go
|
||||
if id != digest.FromBytes([]byte("my content")) {
|
||||
return errors.New("the content has changed!")
|
||||
}
|
||||
```
|
||||
|
||||
A `Verifier` type can be used to handle cases where an `io.Reader`
|
||||
makes more sense:
|
||||
|
||||
```go
|
||||
rd := getContent()
|
||||
verifier := id.Verifier()
|
||||
io.Copy(verifier, rd)
|
||||
|
||||
if !verifier.Verified() {
|
||||
return errors.New("the content has changed!")
|
||||
}
|
||||
```
|
||||
|
||||
Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this
|
||||
can power a rich, safe, content distribution system.
|
||||
|
||||
# Usage
|
||||
|
||||
While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is
|
||||
considered the best resource, a few important items need to be called
|
||||
out when using this package.
|
||||
|
||||
1. Make sure to import the hash implementations into your application
|
||||
or the package will panic. You should have something like the
|
||||
following in the main (or other entrypoint) of your application:
|
||||
|
||||
```go
|
||||
import (
|
||||
_ "crypto/sha256"
|
||||
_ "crypto/sha512"
|
||||
)
|
||||
```
|
||||
This may seem inconvenient but it allows you replace the hash
|
||||
implementations with others, such as https://github.com/stevvooe/resumable.
|
||||
|
||||
2. Even though `digest.Digest` may be assemable as a string, _always_
|
||||
verify your input with `digest.Parse` or use `Digest.Validate`
|
||||
when accepting untrusted input. While there are measures to
|
||||
avoid common problems, this will ensure you have valid digests
|
||||
in the rest of your application.
|
||||
|
||||
# Stability
|
||||
|
||||
The Go API, at this stage, is considered stable, unless otherwise noted.
|
||||
|
||||
As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest).
|
||||
|
||||
# Contributing
|
||||
|
||||
This package is considered fairly complete. It has been in production
|
||||
in thousands (millions?) of deployments and is fairly battle-hardened.
|
||||
New additions will be met with skepticism. If you think there is a
|
||||
missing feature, please file a bug clearly describing the problem and
|
||||
the alternatives you tried before submitting a PR.
|
||||
|
||||
# Reporting security issues
|
||||
|
||||
Please DO NOT file a public issue, instead send your report privately to
|
||||
security@opencontainers.org.
|
||||
|
||||
The maintainers take security seriously. If you discover a security issue,
|
||||
please bring it to their attention right away!
|
||||
|
||||
If you are reporting a security issue, do not create an issue or file a pull
|
||||
request on GitHub. Instead, disclose the issue responsibly by sending an email
|
||||
to security@opencontainers.org (which is inhabited only by the maintainers of
|
||||
the various OCI projects).
|
||||
|
||||
# Copyright and license
|
||||
|
||||
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
|
192
vendor/github.com/opencontainers/go-digest/algorithm.go
generated
vendored
Normal file
192
vendor/github.com/opencontainers/go-digest/algorithm.go
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
// Copyright 2017 Docker, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package digest
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// Algorithm identifies and implementation of a digester by an identifier.
|
||||
// Note the that this defines both the hash algorithm used and the string
|
||||
// encoding.
|
||||
type Algorithm string
|
||||
|
||||
// supported digest types
|
||||
const (
|
||||
SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only)
|
||||
SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only)
|
||||
SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only)
|
||||
|
||||
// Canonical is the primary digest algorithm used with the distribution
|
||||
// project. Other digests may be used but this one is the primary storage
|
||||
// digest.
|
||||
Canonical = SHA256
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO(stevvooe): Follow the pattern of the standard crypto package for
|
||||
// registration of digests. Effectively, we are a registerable set and
|
||||
// common symbol access.
|
||||
|
||||
// algorithms maps values to hash.Hash implementations. Other algorithms
|
||||
// may be available but they cannot be calculated by the digest package.
|
||||
algorithms = map[Algorithm]crypto.Hash{
|
||||
SHA256: crypto.SHA256,
|
||||
SHA384: crypto.SHA384,
|
||||
SHA512: crypto.SHA512,
|
||||
}
|
||||
|
||||
// anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests.
|
||||
// Note that /A-F/ disallowed.
|
||||
anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{
|
||||
SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`),
|
||||
SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`),
|
||||
SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`),
|
||||
}
|
||||
)
|
||||
|
||||
// Available returns true if the digest type is available for use. If this
|
||||
// returns false, Digester and Hash will return nil.
|
||||
func (a Algorithm) Available() bool {
|
||||
h, ok := algorithms[a]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// check availability of the hash, as well
|
||||
return h.Available()
|
||||
}
|
||||
|
||||
func (a Algorithm) String() string {
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// Size returns number of bytes returned by the hash.
|
||||
func (a Algorithm) Size() int {
|
||||
h, ok := algorithms[a]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
return h.Size()
|
||||
}
|
||||
|
||||
// Set implemented to allow use of Algorithm as a command line flag.
|
||||
func (a *Algorithm) Set(value string) error {
|
||||
if value == "" {
|
||||
*a = Canonical
|
||||
} else {
|
||||
// just do a type conversion, support is queried with Available.
|
||||
*a = Algorithm(value)
|
||||
}
|
||||
|
||||
if !a.Available() {
|
||||
return ErrDigestUnsupported
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Digester returns a new digester for the specified algorithm. If the algorithm
|
||||
// does not have a digester implementation, nil will be returned. This can be
|
||||
// checked by calling Available before calling Digester.
|
||||
func (a Algorithm) Digester() Digester {
|
||||
return &digester{
|
||||
alg: a,
|
||||
hash: a.Hash(),
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns a new hash as used by the algorithm. If not available, the
|
||||
// method will panic. Check Algorithm.Available() before calling.
|
||||
func (a Algorithm) Hash() hash.Hash {
|
||||
if !a.Available() {
|
||||
// Empty algorithm string is invalid
|
||||
if a == "" {
|
||||
panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()"))
|
||||
}
|
||||
|
||||
// NOTE(stevvooe): A missing hash is usually a programming error that
|
||||
// must be resolved at compile time. We don't import in the digest
|
||||
// package to allow users to choose their hash implementation (such as
|
||||
// when using stevvooe/resumable or a hardware accelerated package).
|
||||
//
|
||||
// Applications that may want to resolve the hash at runtime should
|
||||
// call Algorithm.Available before call Algorithm.Hash().
|
||||
panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
|
||||
}
|
||||
|
||||
return algorithms[a].New()
|
||||
}
|
||||
|
||||
// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into
|
||||
// the encoded portion of the digest.
|
||||
func (a Algorithm) Encode(d []byte) string {
|
||||
// TODO(stevvooe): Currently, all algorithms use a hex encoding. When we
|
||||
// add support for back registration, we can modify this accordingly.
|
||||
return fmt.Sprintf("%x", d)
|
||||
}
|
||||
|
||||
// FromReader returns the digest of the reader using the algorithm.
|
||||
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
|
||||
digester := a.Digester()
|
||||
|
||||
if _, err := io.Copy(digester.Hash(), rd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return digester.Digest(), nil
|
||||
}
|
||||
|
||||
// FromBytes digests the input and returns a Digest.
|
||||
func (a Algorithm) FromBytes(p []byte) Digest {
|
||||
digester := a.Digester()
|
||||
|
||||
if _, err := digester.Hash().Write(p); err != nil {
|
||||
// Writes to a Hash should never fail. None of the existing
|
||||
// hash implementations in the stdlib or hashes vendored
|
||||
// here can return errors from Write. Having a panic in this
|
||||
// condition instead of having FromBytes return an error value
|
||||
// avoids unnecessary error handling paths in all callers.
|
||||
panic("write to hash function returned error: " + err.Error())
|
||||
}
|
||||
|
||||
return digester.Digest()
|
||||
}
|
||||
|
||||
// FromString digests the string input and returns a Digest.
|
||||
func (a Algorithm) FromString(s string) Digest {
|
||||
return a.FromBytes([]byte(s))
|
||||
}
|
||||
|
||||
// Validate validates the encoded portion string
|
||||
func (a Algorithm) Validate(encoded string) error {
|
||||
r, ok := anchoredEncodedRegexps[a]
|
||||
if !ok {
|
||||
return ErrDigestUnsupported
|
||||
}
|
||||
// Digests much always be hex-encoded, ensuring that their hex portion will
|
||||
// always be size*2
|
||||
if a.Size()*2 != len(encoded) {
|
||||
return ErrDigestInvalidLength
|
||||
}
|
||||
if r.MatchString(encoded) {
|
||||
return nil
|
||||
}
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
156
vendor/github.com/opencontainers/go-digest/digest.go
generated
vendored
Normal file
156
vendor/github.com/opencontainers/go-digest/digest.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
// Copyright 2017 Docker, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package digest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Digest allows simple protection of hex formatted digest strings, prefixed
|
||||
// by their algorithm. Strings of type Digest have some guarantee of being in
|
||||
// the correct format and it provides quick access to the components of a
|
||||
// digest string.
|
||||
//
|
||||
// The following is an example of the contents of Digest types:
|
||||
//
|
||||
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||
//
|
||||
// This allows to abstract the digest behind this type and work only in those
|
||||
// terms.
|
||||
type Digest string
|
||||
|
||||
// NewDigest returns a Digest from alg and a hash.Hash object.
|
||||
func NewDigest(alg Algorithm, h hash.Hash) Digest {
|
||||
return NewDigestFromBytes(alg, h.Sum(nil))
|
||||
}
|
||||
|
||||
// NewDigestFromBytes returns a new digest from the byte contents of p.
|
||||
// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
|
||||
// functions. This is also useful for rebuilding digests from binary
|
||||
// serializations.
|
||||
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
|
||||
return NewDigestFromEncoded(alg, alg.Encode(p))
|
||||
}
|
||||
|
||||
// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded.
|
||||
func NewDigestFromHex(alg, hex string) Digest {
|
||||
return NewDigestFromEncoded(Algorithm(alg), hex)
|
||||
}
|
||||
|
||||
// NewDigestFromEncoded returns a Digest from alg and the encoded digest.
|
||||
func NewDigestFromEncoded(alg Algorithm, encoded string) Digest {
|
||||
return Digest(fmt.Sprintf("%s:%s", alg, encoded))
|
||||
}
|
||||
|
||||
// DigestRegexp matches valid digest types.
|
||||
var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`)
|
||||
|
||||
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
|
||||
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
|
||||
|
||||
var (
|
||||
// ErrDigestInvalidFormat returned when digest format invalid.
|
||||
ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
|
||||
|
||||
// ErrDigestInvalidLength returned when digest has invalid length.
|
||||
ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
|
||||
|
||||
// ErrDigestUnsupported returned when the digest algorithm is unsupported.
|
||||
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
|
||||
)
|
||||
|
||||
// Parse parses s and returns the validated digest object. An error will
|
||||
// be returned if the format is invalid.
|
||||
func Parse(s string) (Digest, error) {
|
||||
d := Digest(s)
|
||||
return d, d.Validate()
|
||||
}
|
||||
|
||||
// FromReader consumes the content of rd until io.EOF, returning canonical digest.
|
||||
func FromReader(rd io.Reader) (Digest, error) {
|
||||
return Canonical.FromReader(rd)
|
||||
}
|
||||
|
||||
// FromBytes digests the input and returns a Digest.
|
||||
func FromBytes(p []byte) Digest {
|
||||
return Canonical.FromBytes(p)
|
||||
}
|
||||
|
||||
// FromString digests the input and returns a Digest.
|
||||
func FromString(s string) Digest {
|
||||
return Canonical.FromString(s)
|
||||
}
|
||||
|
||||
// Validate checks that the contents of d is a valid digest, returning an
|
||||
// error if not.
|
||||
func (d Digest) Validate() error {
|
||||
s := string(d)
|
||||
i := strings.Index(s, ":")
|
||||
if i <= 0 || i+1 == len(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
algorithm, encoded := Algorithm(s[:i]), s[i+1:]
|
||||
if !algorithm.Available() {
|
||||
if !DigestRegexpAnchored.MatchString(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
return ErrDigestUnsupported
|
||||
}
|
||||
return algorithm.Validate(encoded)
|
||||
}
|
||||
|
||||
// Algorithm returns the algorithm portion of the digest. This will panic if
|
||||
// the underlying digest is not in a valid format.
|
||||
func (d Digest) Algorithm() Algorithm {
|
||||
return Algorithm(d[:d.sepIndex()])
|
||||
}
|
||||
|
||||
// Verifier returns a writer object that can be used to verify a stream of
|
||||
// content against the digest. If the digest is invalid, the method will panic.
|
||||
func (d Digest) Verifier() Verifier {
|
||||
return hashVerifier{
|
||||
hash: d.Algorithm().Hash(),
|
||||
digest: d,
|
||||
}
|
||||
}
|
||||
|
||||
// Encoded returns the encoded portion of the digest. This will panic if the
|
||||
// underlying digest is not in a valid format.
|
||||
func (d Digest) Encoded() string {
|
||||
return string(d[d.sepIndex()+1:])
|
||||
}
|
||||
|
||||
// Hex is deprecated. Please use Digest.Encoded.
|
||||
func (d Digest) Hex() string {
|
||||
return d.Encoded()
|
||||
}
|
||||
|
||||
func (d Digest) String() string {
|
||||
return string(d)
|
||||
}
|
||||
|
||||
func (d Digest) sepIndex() int {
|
||||
i := strings.Index(string(d), ":")
|
||||
|
||||
if i < 0 {
|
||||
panic(fmt.Sprintf("no ':' separator in digest %q", d))
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
39
vendor/github.com/opencontainers/go-digest/digester.go
generated
vendored
Normal file
39
vendor/github.com/opencontainers/go-digest/digester.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2017 Docker, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package digest
|
||||
|
||||
import "hash"
|
||||
|
||||
// Digester calculates the digest of written data. Writes should go directly
|
||||
// to the return value of Hash, while calling Digest will return the current
|
||||
// value of the digest.
|
||||
type Digester interface {
|
||||
Hash() hash.Hash // provides direct access to underlying hash instance.
|
||||
Digest() Digest
|
||||
}
|
||||
|
||||
// digester provides a simple digester definition that embeds a hasher.
|
||||
type digester struct {
|
||||
alg Algorithm
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
func (d *digester) Hash() hash.Hash {
|
||||
return d.hash
|
||||
}
|
||||
|
||||
func (d *digester) Digest() Digest {
|
||||
return NewDigest(d.alg, d.hash)
|
||||
}
|
56
vendor/github.com/opencontainers/go-digest/doc.go
generated
vendored
Normal file
56
vendor/github.com/opencontainers/go-digest/doc.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2017 Docker, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package digest provides a generalized type to opaquely represent message
|
||||
// digests and their operations within the registry. The Digest type is
|
||||
// designed to serve as a flexible identifier in a content-addressable system.
|
||||
// More importantly, it provides tools and wrappers to work with
|
||||
// hash.Hash-based digests with little effort.
|
||||
//
|
||||
// Basics
|
||||
//
|
||||
// The format of a digest is simply a string with two parts, dubbed the
|
||||
// "algorithm" and the "digest", separated by a colon:
|
||||
//
|
||||
// <algorithm>:<digest>
|
||||
//
|
||||
// An example of a sha256 digest representation follows:
|
||||
//
|
||||
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||
//
|
||||
// In this case, the string "sha256" is the algorithm and the hex bytes are
|
||||
// the "digest".
|
||||
//
|
||||
// Because the Digest type is simply a string, once a valid Digest is
|
||||
// obtained, comparisons are cheap, quick and simple to express with the
|
||||
// standard equality operator.
|
||||
//
|
||||
// Verification
|
||||
//
|
||||
// The main benefit of using the Digest type is simple verification against a
|
||||
// given digest. The Verifier interface, modeled after the stdlib hash.Hash
|
||||
// interface, provides a common write sink for digest verification. After
|
||||
// writing is complete, calling the Verifier.Verified method will indicate
|
||||
// whether or not the stream of bytes matches the target digest.
|
||||
//
|
||||
// Missing Features
|
||||
//
|
||||
// In addition to the above, we intend to add the following features to this
|
||||
// package:
|
||||
//
|
||||
// 1. A Digester type that supports write sink digest calculation.
|
||||
//
|
||||
// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
|
||||
//
|
||||
package digest
|
45
vendor/github.com/opencontainers/go-digest/verifiers.go
generated
vendored
Normal file
45
vendor/github.com/opencontainers/go-digest/verifiers.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2017 Docker, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package digest
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Verifier presents a general verification interface to be used with message
|
||||
// digests and other byte stream verifications. Users instantiate a Verifier
|
||||
// from one of the various methods, write the data under test to it then check
|
||||
// the result with the Verified method.
|
||||
type Verifier interface {
|
||||
io.Writer
|
||||
|
||||
// Verified will return true if the content written to Verifier matches
|
||||
// the digest.
|
||||
Verified() bool
|
||||
}
|
||||
|
||||
type hashVerifier struct {
|
||||
digest Digest
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
func (hv hashVerifier) Write(p []byte) (n int, err error) {
|
||||
return hv.hash.Write(p)
|
||||
}
|
||||
|
||||
func (hv hashVerifier) Verified() bool {
|
||||
return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
|
||||
}
|
20
vendor/github.com/schollz/peerdiscovery/.gitignore
generated
vendored
Normal file
20
vendor/github.com/schollz/peerdiscovery/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
|
||||
.glide/
|
||||
|
||||
examples/examples
|
||||
examples/ipv4/go.sum
|
||||
examples/ipv4/ipv4
|
||||
examples/ipv6/go.sum
|
||||
examples/ipv6/ipv6
|
7
vendor/github.com/schollz/peerdiscovery/Dockerfile
generated
vendored
Normal file
7
vendor/github.com/schollz/peerdiscovery/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
FROM golang
|
||||
|
||||
WORKDIR /peerdiscovery
|
||||
COPY . .
|
||||
RUN go build ./examples/ipv4/main.go
|
||||
|
||||
CMD ["/peerdiscovery/main"]
|
21
vendor/github.com/schollz/peerdiscovery/LICENSE
generated
vendored
Normal file
21
vendor/github.com/schollz/peerdiscovery/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018 Zack
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
72
vendor/github.com/schollz/peerdiscovery/README.md
generated
vendored
Normal file
72
vendor/github.com/schollz/peerdiscovery/README.md
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
# peerdiscovery
|
||||
|
||||
<img src="https://img.shields.io/badge/coverage-89%25-brightgreen.svg?style=flat-square" alt="Code coverage"> <a href="https://goreportcard.com/report/github.com/schollz/peerdiscovery"><img src="https://goreportcard.com/badge/github.com/schollz/peerdiscovery?style=flat-square" alt="Go Report"></a> <a href="https://godoc.org/github.com/schollz/peerdiscovery"><img src="http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square" alt="Go Doc"></a>
|
||||
|
||||
Pure-go library for cross-platform thread-safe local peer discovery using UDP multicast. I needed to use peer discovery for [croc](https://github.com/schollz/croc) and everything I tried had problems, so I made another one.
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
Make sure you have Go 1.5+.
|
||||
|
||||
```
|
||||
go get -u github.com/schollz/peerdiscovery
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The following is a code to find the first peer on the local network and print it out.
|
||||
|
||||
```golang
|
||||
discoveries, _ := peerdiscovery.Discover(peerdiscovery.Settings{Limit: 1})
|
||||
for _, d := range discoveries {
|
||||
fmt.Printf("discovered '%s'\n", d.Address)
|
||||
}
|
||||
```
|
||||
|
||||
Here's the output when running on two computers. (*Run these gifs in sync by hitting Ctl + F5*).
|
||||
|
||||
**Computer 1:**
|
||||
|
||||

|
||||
|
||||
**Computer 2:**
|
||||
|
||||

|
||||
|
||||
For more examples, see the scanning examples ([ipv4](https://github.com/schollz/peerdiscovery/blob/master/examples/ipv4/main.go) and [ipv6](https://github.com/schollz/peerdiscovery/blob/master/examples/ipv6/main.go)) or [the docs](https://pkg.go.dev/github.com/schollz/peerdiscovery).
|
||||
|
||||
|
||||
## Testing
|
||||
|
||||
To test the peer discovery with just one host, one can launch multiple containers. The provided `Dockerfile` will run the example code.
|
||||
Please make sure to enable [Docker's IPv6 support](https://docs.docker.com/v17.09/engine/userguide/networking/default_network/ipv6/) if you are using IPv6 for peer discovery.
|
||||
|
||||
```console
|
||||
# Build the container, named peertest
|
||||
$ docker build -t peertest .
|
||||
|
||||
# Execute the following command in multiple terminals
|
||||
$ docker run -t --rm peertest
|
||||
Scanning for 10 seconds to find LAN peers
|
||||
100% |████████████████████████████████████████| [9s:0s]Found 1 other computers
|
||||
0) '172.17.0.2' with payload 'zqrecHipCO'
|
||||
```
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
Pull requests are welcome. Feel free to...
|
||||
|
||||
- Revise documentation
|
||||
- Add new features
|
||||
- Fix bugs
|
||||
- Suggest improvements
|
||||
|
||||
## Thanks
|
||||
|
||||
Thanks [@geistesk](https://github.com/geistesk) for adding IPv6 support and a `Notify` func, and helping maintain! Thanks [@Kunde21](https://github.com/Kunde21) for providing a bug fix and massively refactoring the code in a much better way. Thanks [@robpre](https://github.com/robpre) for finding and fixing bugs. Thanks [@shvydky](https://github.com/shvydky) for adding dynamic payloads.
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
9
vendor/github.com/schollz/peerdiscovery/go.mod
generated
vendored
Normal file
9
vendor/github.com/schollz/peerdiscovery/go.mod
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
module github.com/schollz/peerdiscovery
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/stretchr/testify v1.6.1
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b
|
||||
golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02 // indirect
|
||||
)
|
24
vendor/github.com/schollz/peerdiscovery/go.sum
generated
vendored
Normal file
24
vendor/github.com/schollz/peerdiscovery/go.sum
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02 h1:5Ftd3YbC/kANXWCBjvppvUmv1BMakgFcBKA7MpYYp4M=
|
||||
golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
414
vendor/github.com/schollz/peerdiscovery/peerdiscovery.go
generated
vendored
Normal file
414
vendor/github.com/schollz/peerdiscovery/peerdiscovery.go
generated
vendored
Normal file
@@ -0,0 +1,414 @@
|
||||
package peerdiscovery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// IPVersion specifies the version of the Internet Protocol to be used.
|
||||
type IPVersion uint
|
||||
|
||||
const (
|
||||
IPv4 IPVersion = 4
|
||||
IPv6 IPVersion = 6
|
||||
)
|
||||
|
||||
// Discovered is the structure of the discovered peers,
|
||||
// which holds their local address (port removed) and
|
||||
// a payload if there is one.
|
||||
type Discovered struct {
|
||||
// Address is the local address of a discovered peer.
|
||||
Address string
|
||||
// Payload is the associated payload from discovered peer.
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
func (d Discovered) String() string {
|
||||
return fmt.Sprintf("address: %s, payload: %s", d.Address, d.Payload)
|
||||
}
|
||||
|
||||
// Settings are the settings that can be specified for
|
||||
// doing peer discovery.
|
||||
type Settings struct {
|
||||
// Limit is the number of peers to discover, use < 1 for unlimited.
|
||||
Limit int
|
||||
// Port is the port to broadcast on (the peers must also broadcast using the same port).
|
||||
// The default port is 9999.
|
||||
Port string
|
||||
// MulticastAddress specifies the multicast address.
|
||||
// You should be able to use any of 224.0.0.0/4 or ff00::/8.
|
||||
// By default it uses the Simple Service Discovery Protocol
|
||||
// address (239.255.255.250 for IPv4 or ff02::c for IPv6).
|
||||
MulticastAddress string
|
||||
// Payload is the bytes that are sent out with each broadcast. Must be short.
|
||||
Payload []byte
|
||||
// PayloadFunc is the function that will be called to dynamically generate payload
|
||||
// before every broadcast. If this pointer is nil `Payload` field will be broadcasted instead.
|
||||
PayloadFunc func() []byte
|
||||
// Delay is the amount of time between broadcasts. The default delay is 1 second.
|
||||
Delay time.Duration
|
||||
// TimeLimit is the amount of time to spend discovering, if the limit is not reached.
|
||||
// A negative limit indiciates scanning until the limit was reached or, if an
|
||||
// unlimited scanning was requested, no timeout.
|
||||
// The default time limit is 10 seconds.
|
||||
TimeLimit time.Duration
|
||||
// StopChan is a channel to stop the peer discvoery immediatley after reception.
|
||||
StopChan chan struct{}
|
||||
// AllowSelf will allow discovery the local machine (default false)
|
||||
AllowSelf bool
|
||||
// DisableBroadcast will not allow sending out a broadcast
|
||||
DisableBroadcast bool
|
||||
// IPVersion specifies the version of the Internet Protocol (default IPv4)
|
||||
IPVersion IPVersion
|
||||
// Notify will be called each time a new peer was discovered.
|
||||
// The default is nil, which means no notification whatsoever.
|
||||
Notify func(Discovered)
|
||||
|
||||
portNum int
|
||||
multicastAddressNumbers net.IP
|
||||
}
|
||||
|
||||
// peerDiscovery is the object that can do the discovery for finding LAN peers.
|
||||
type peerDiscovery struct {
|
||||
settings Settings
|
||||
|
||||
received map[string][]byte
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// initialize returns a new peerDiscovery object which can be used to discover peers.
|
||||
// The settings are optional. If any setting is not supplied, then defaults are used.
|
||||
// See the Settings for more information.
|
||||
func initialize(settings Settings) (p *peerDiscovery, err error) {
|
||||
p = new(peerDiscovery)
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
// initialize settings
|
||||
p.settings = settings
|
||||
|
||||
// defaults
|
||||
if p.settings.Port == "" {
|
||||
p.settings.Port = "9999"
|
||||
}
|
||||
if p.settings.IPVersion == 0 {
|
||||
p.settings.IPVersion = IPv4
|
||||
}
|
||||
if p.settings.MulticastAddress == "" {
|
||||
if p.settings.IPVersion == IPv4 {
|
||||
p.settings.MulticastAddress = "239.255.255.250"
|
||||
} else {
|
||||
p.settings.MulticastAddress = "ff02::c"
|
||||
}
|
||||
}
|
||||
if len(p.settings.Payload) == 0 {
|
||||
p.settings.Payload = []byte("hi")
|
||||
}
|
||||
if p.settings.Delay == 0 {
|
||||
p.settings.Delay = 1 * time.Second
|
||||
}
|
||||
if p.settings.TimeLimit == 0 {
|
||||
p.settings.TimeLimit = 10 * time.Second
|
||||
}
|
||||
if p.settings.StopChan == nil {
|
||||
p.settings.StopChan = make(chan struct{})
|
||||
}
|
||||
p.received = make(map[string][]byte)
|
||||
p.settings.multicastAddressNumbers = net.ParseIP(p.settings.MulticastAddress)
|
||||
if p.settings.multicastAddressNumbers == nil {
|
||||
err = fmt.Errorf("Multicast Address %s could not be converted to an IP",
|
||||
p.settings.MulticastAddress)
|
||||
return
|
||||
}
|
||||
p.settings.portNum, err = strconv.Atoi(p.settings.Port)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type NetPacketConn interface {
|
||||
JoinGroup(ifi *net.Interface, group net.Addr) error
|
||||
SetMulticastInterface(ini *net.Interface) error
|
||||
SetMulticastTTL(int) error
|
||||
ReadFrom(buf []byte) (int, net.Addr, error)
|
||||
WriteTo(buf []byte, dst net.Addr) (int, error)
|
||||
}
|
||||
|
||||
// Discover will use the created settings to scan for LAN peers. It will return
|
||||
// an array of the discovered peers and their associate payloads. It will not
|
||||
// return broadcasts sent to itself.
|
||||
func Discover(settings ...Settings) (discoveries []Discovered, err error) {
|
||||
s := Settings{}
|
||||
if len(settings) > 0 {
|
||||
s = settings[0]
|
||||
}
|
||||
p, err := initialize(s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
p.RLock()
|
||||
address := net.JoinHostPort(p.settings.MulticastAddress, p.settings.Port)
|
||||
portNum := p.settings.portNum
|
||||
|
||||
tickerDuration := p.settings.Delay
|
||||
timeLimit := p.settings.TimeLimit
|
||||
p.RUnlock()
|
||||
|
||||
// get interfaces
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Open up a connection
|
||||
c, err := net.ListenPacket(fmt.Sprintf("udp%d", p.settings.IPVersion), address)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
group := p.settings.multicastAddressNumbers
|
||||
|
||||
// ipv{4,6} have an own PacketConn, which does not implement net.PacketConn
|
||||
var p2 NetPacketConn
|
||||
if p.settings.IPVersion == IPv4 {
|
||||
p2 = PacketConn4{ipv4.NewPacketConn(c)}
|
||||
} else {
|
||||
p2 = PacketConn6{ipv6.NewPacketConn(c)}
|
||||
}
|
||||
|
||||
for i := range ifaces {
|
||||
p2.JoinGroup(&ifaces[i], &net.UDPAddr{IP: group, Port: portNum})
|
||||
}
|
||||
|
||||
go p.listen()
|
||||
ticker := time.NewTicker(tickerDuration)
|
||||
defer ticker.Stop()
|
||||
start := time.Now()
|
||||
|
||||
for {
|
||||
exit := false
|
||||
|
||||
p.RLock()
|
||||
if len(p.received) >= p.settings.Limit && p.settings.Limit > 0 {
|
||||
exit = true
|
||||
}
|
||||
p.RUnlock()
|
||||
|
||||
if !s.DisableBroadcast {
|
||||
payload := p.settings.Payload
|
||||
if p.settings.PayloadFunc != nil {
|
||||
payload = p.settings.PayloadFunc()
|
||||
}
|
||||
// write to multicast
|
||||
broadcast(p2, payload, ifaces, &net.UDPAddr{IP: group, Port: portNum})
|
||||
}
|
||||
|
||||
select {
|
||||
case <-p.settings.StopChan:
|
||||
exit = true
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
if exit || timeLimit > 0 && time.Since(start) > timeLimit {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !s.DisableBroadcast {
|
||||
payload := p.settings.Payload
|
||||
if p.settings.PayloadFunc != nil {
|
||||
payload = p.settings.PayloadFunc()
|
||||
}
|
||||
// send out broadcast that is finished
|
||||
broadcast(p2, payload, ifaces, &net.UDPAddr{IP: group, Port: portNum})
|
||||
}
|
||||
|
||||
p.RLock()
|
||||
discoveries = make([]Discovered, len(p.received))
|
||||
i := 0
|
||||
for ip, payload := range p.received {
|
||||
discoveries[i] = Discovered{
|
||||
Address: ip,
|
||||
Payload: payload,
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func broadcast(p2 NetPacketConn, payload []byte, ifaces []net.Interface, dst net.Addr) {
|
||||
for i := range ifaces {
|
||||
if errMulticast := p2.SetMulticastInterface(&ifaces[i]); errMulticast != nil {
|
||||
continue
|
||||
}
|
||||
p2.SetMulticastTTL(2)
|
||||
if _, errMulticast := p2.WriteTo([]byte(payload), dst); errMulticast != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
|
||||
maxDatagramSize = 66507
|
||||
)
|
||||
|
||||
// Listen binds to the UDP address and port given and writes packets received
|
||||
// from that address to a buffer which is passed to a hander
|
||||
func (p *peerDiscovery) listen() (recievedBytes []byte, err error) {
|
||||
p.RLock()
|
||||
address := net.JoinHostPort(p.settings.MulticastAddress, p.settings.Port)
|
||||
portNum := p.settings.portNum
|
||||
allowSelf := p.settings.AllowSelf
|
||||
notify := p.settings.Notify
|
||||
p.RUnlock()
|
||||
localIPs := getLocalIPs()
|
||||
|
||||
// get interfaces
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// log.Println(ifaces)
|
||||
|
||||
// Open up a connection
|
||||
c, err := net.ListenPacket(fmt.Sprintf("udp%d", p.settings.IPVersion), address)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
group := p.settings.multicastAddressNumbers
|
||||
var p2 NetPacketConn
|
||||
if p.settings.IPVersion == IPv4 {
|
||||
p2 = PacketConn4{ipv4.NewPacketConn(c)}
|
||||
} else {
|
||||
p2 = PacketConn6{ipv6.NewPacketConn(c)}
|
||||
}
|
||||
|
||||
for i := range ifaces {
|
||||
p2.JoinGroup(&ifaces[i], &net.UDPAddr{IP: group, Port: portNum})
|
||||
}
|
||||
|
||||
// Loop forever reading from the socket
|
||||
for {
|
||||
buffer := make([]byte, maxDatagramSize)
|
||||
var (
|
||||
n int
|
||||
src net.Addr
|
||||
errRead error
|
||||
)
|
||||
n, src, errRead = p2.ReadFrom(buffer)
|
||||
if errRead != nil {
|
||||
err = errRead
|
||||
return
|
||||
}
|
||||
|
||||
srcHost, _, _ := net.SplitHostPort(src.String())
|
||||
|
||||
if _, ok := localIPs[srcHost]; ok && !allowSelf {
|
||||
continue
|
||||
}
|
||||
|
||||
// log.Println(src, hex.Dump(buffer[:n]))
|
||||
|
||||
p.Lock()
|
||||
if _, ok := p.received[srcHost]; !ok {
|
||||
p.received[srcHost] = buffer[:n]
|
||||
}
|
||||
p.Unlock()
|
||||
|
||||
if notify != nil {
|
||||
notify(Discovered{
|
||||
Address: srcHost,
|
||||
Payload: buffer[:n],
|
||||
})
|
||||
}
|
||||
|
||||
p.RLock()
|
||||
if len(p.received) >= p.settings.Limit && p.settings.Limit > 0 {
|
||||
p.RUnlock()
|
||||
break
|
||||
}
|
||||
p.RUnlock()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// getLocalIPs returns the local ip address
|
||||
func getLocalIPs() (ips map[string]struct{}) {
|
||||
ips = make(map[string]struct{})
|
||||
ips["localhost"] = struct{}{}
|
||||
ips["127.0.0.1"] = struct{}{}
|
||||
ips["::1"] = struct{}{}
|
||||
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, iface := range ifaces {
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, address := range addrs {
|
||||
ip, _, err := net.ParseCIDR(address.String())
|
||||
if err != nil {
|
||||
// log.Printf("Failed to parse %s: %v", address.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
ips[ip.String()+"%"+iface.Name] = struct{}{}
|
||||
ips[ip.String()] = struct{}{}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type PacketConn4 struct {
|
||||
*ipv4.PacketConn
|
||||
}
|
||||
|
||||
// ReadFrom wraps the ipv4 ReadFrom without a control message
|
||||
func (pc4 PacketConn4) ReadFrom(buf []byte) (int, net.Addr, error) {
|
||||
n, _, addr, err := pc4.PacketConn.ReadFrom(buf)
|
||||
return n, addr, err
|
||||
}
|
||||
|
||||
// WriteTo wraps the ipv4 WriteTo without a control message
|
||||
func (pc4 PacketConn4) WriteTo(buf []byte, dst net.Addr) (int, error) {
|
||||
return pc4.PacketConn.WriteTo(buf, nil, dst)
|
||||
}
|
||||
|
||||
type PacketConn6 struct {
|
||||
*ipv6.PacketConn
|
||||
}
|
||||
|
||||
// ReadFrom wraps the ipv6 ReadFrom without a control message
|
||||
func (pc6 PacketConn6) ReadFrom(buf []byte) (int, net.Addr, error) {
|
||||
n, _, addr, err := pc6.PacketConn.ReadFrom(buf)
|
||||
return n, addr, err
|
||||
}
|
||||
|
||||
// WriteTo wraps the ipv6 WriteTo without a control message
|
||||
func (pc6 PacketConn6) WriteTo(buf []byte, dst net.Addr) (int, error) {
|
||||
return pc6.PacketConn.WriteTo(buf, nil, dst)
|
||||
}
|
||||
|
||||
// SetMulticastTTL wraps the hop limit of ipv6
|
||||
func (pc6 PacketConn6) SetMulticastTTL(i int) error {
|
||||
return pc6.SetMulticastHopLimit(i)
|
||||
}
|
8
vendor/github.com/sethvargo/go-envconfig/AUTHORS
generated
vendored
Normal file
8
vendor/github.com/sethvargo/go-envconfig/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# This is the list of envconfig authors for copyright purposes.
|
||||
#
|
||||
# This does not necessarily list everyone who has contributed code, since in
|
||||
# some cases, their employer may be the copyright holder. To see the full list
|
||||
# of contributors, see the revision history in source control.
|
||||
|
||||
Google LLC
|
||||
Seth Vargo
|
202
vendor/github.com/sethvargo/go-envconfig/LICENSE
generated
vendored
Normal file
202
vendor/github.com/sethvargo/go-envconfig/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
59
vendor/github.com/sethvargo/go-envconfig/Makefile
generated
vendored
Normal file
59
vendor/github.com/sethvargo/go-envconfig/Makefile
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
# Copyright The envconfig Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
VETTERS = "asmdecl,assign,atomic,bools,buildtag,cgocall,composites,copylocks,errorsas,httpresponse,loopclosure,lostcancel,nilfunc,printf,shift,stdmethods,structtag,tests,unmarshal,unreachable,unsafeptr,unusedresult"
|
||||
GOFMT_FILES = $(shell go list -f '{{.Dir}}' ./...)
|
||||
|
||||
fmtcheck:
|
||||
@command -v goimports > /dev/null 2>&1 || (cd tools && go get golang.org/x/tools/cmd/goimports && cd ..)
|
||||
@CHANGES="$$(goimports -d $(GOFMT_FILES))"; \
|
||||
if [ -n "$${CHANGES}" ]; then \
|
||||
echo "Unformatted (run goimports -w .):\n\n$${CHANGES}\n\n"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@# Annoyingly, goimports does not support the simplify flag.
|
||||
@CHANGES="$$(gofmt -s -d $(GOFMT_FILES))"; \
|
||||
if [ -n "$${CHANGES}" ]; then \
|
||||
echo "Unformatted (run gofmt -s -w .):\n\n$${CHANGES}\n\n"; \
|
||||
exit 1; \
|
||||
fi
|
||||
.PHONY: fmtcheck
|
||||
|
||||
spellcheck:
|
||||
@command -v misspell > /dev/null 2>&1 || (cd tools && go get github.com/client9/misspell/cmd/misspell && cd ..)
|
||||
@misspell -locale="US" -error -source="text" **/*
|
||||
.PHONY: spellcheck
|
||||
|
||||
staticcheck:
|
||||
@command -v staticcheck > /dev/null 2>&1 || (cd tools && go get honnef.co/go/tools/cmd/staticcheck && cd ..)
|
||||
@staticcheck -checks="all" -tests $(GOFMT_FILES)
|
||||
.PHONY: staticcheck
|
||||
|
||||
test:
|
||||
@go test \
|
||||
-count=1 \
|
||||
-short \
|
||||
-timeout=5m \
|
||||
-vet="${VETTERS}" \
|
||||
./...
|
||||
.PHONY: test
|
||||
|
||||
test-acc:
|
||||
@go test \
|
||||
-count=1 \
|
||||
-race \
|
||||
-timeout=10m \
|
||||
-vet="${VETTERS}" \
|
||||
./...
|
||||
.PHONY: test-acc
|
303
vendor/github.com/sethvargo/go-envconfig/README.md
generated
vendored
Normal file
303
vendor/github.com/sethvargo/go-envconfig/README.md
generated
vendored
Normal file
@@ -0,0 +1,303 @@
|
||||
# Envconfig
|
||||
|
||||
[](https://pkg.go.dev/mod/github.com/sethvargo/go-envconfig)
|
||||
[](https://github.com/sethvargo/go-envconfig/actions?query=workflow%3ATest)
|
||||
|
||||
Envconfig populates struct field values based on environment variables or
|
||||
arbitrary lookup functions. It supports pre-setting mutations, which is useful
|
||||
for things like converting values to uppercase, trimming whitespace, or looking
|
||||
up secrets.
|
||||
|
||||
**Note:** Versions prior to v0.2 used a different import path. This README and
|
||||
examples are for v0.2+.
|
||||
|
||||
## Usage
|
||||
|
||||
Define a struct with fields using the `env` tag:
|
||||
|
||||
```go
|
||||
type MyConfig struct {
|
||||
Port int `env:"PORT"`
|
||||
Username string `env:"USERNAME"`
|
||||
}
|
||||
```
|
||||
|
||||
Set some environment variables:
|
||||
|
||||
```sh
|
||||
export PORT=5555
|
||||
export USERNAME=yoyo
|
||||
```
|
||||
|
||||
Process it using envconfig:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/sethvargo/go-envconfig"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
|
||||
var c MyConfig
|
||||
if err := envconfig.Process(ctx, &c); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// c.Port = 5555
|
||||
// c.Username = "yoyo"
|
||||
}
|
||||
```
|
||||
|
||||
You can also use nested structs, just remember that any fields you want to
|
||||
process must be public:
|
||||
|
||||
```go
|
||||
type MyConfig struct {
|
||||
Database *DatabaseConfig
|
||||
}
|
||||
|
||||
type DatabaseConfig struct {
|
||||
Port int `env:"PORT"`
|
||||
Username string `env:"USERNAME"`
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Use the `env` struct tag to define configuration.
|
||||
|
||||
### Required
|
||||
|
||||
If a field is required, processing will error if the environment variable is
|
||||
unset.
|
||||
|
||||
```go
|
||||
type MyStruct struct {
|
||||
Port int `env:"PORT,required"`
|
||||
}
|
||||
```
|
||||
|
||||
It is invalid to have a field as both `required` and `default`.
|
||||
|
||||
### Default
|
||||
|
||||
If an environment variable is not set, the field will be set to the default
|
||||
value. Note that the environment variable must not be set (e.g. `unset PORT`).
|
||||
If the environment variable is the empty string, that counts as a "value" and
|
||||
the default will not be used.
|
||||
|
||||
```go
|
||||
type MyStruct struct {
|
||||
Port int `env:"PORT,default=5555"`
|
||||
}
|
||||
```
|
||||
|
||||
You can also set the default value to another field or value from the
|
||||
environment, for example:
|
||||
|
||||
```go
|
||||
type MyStruct struct {
|
||||
DefaultPort int `env:"DEFAULT_PORT,default=5555"`
|
||||
Port int `env:"OVERRIDE_PORT,default=$DEFAULT_PORT"`
|
||||
}
|
||||
```
|
||||
|
||||
The value for `Port` will default to the value of `DEFAULT_PORT`.
|
||||
|
||||
It is invalid to have a field as both `required` and `default`.
|
||||
|
||||
### Prefix
|
||||
|
||||
For shared, embedded structs, you can define a prefix to use when processing
|
||||
struct values for that embed.
|
||||
|
||||
```go
|
||||
type SharedConfig struct {
|
||||
Port int `env:"PORT,default=5555"`
|
||||
}
|
||||
|
||||
type Server1 struct {
|
||||
// This processes Port from $FOO_PORT.
|
||||
*SharedConfig `env:",prefix=FOO_"`
|
||||
}
|
||||
|
||||
type Server2 struct {
|
||||
// This processes Port from $BAR_PORT.
|
||||
*SharedConfig `env:",prefix=BAR_"`
|
||||
}
|
||||
```
|
||||
|
||||
It is invalid to specify a prefix on non-struct fields.
|
||||
|
||||
## Complex Types
|
||||
|
||||
### Durations
|
||||
|
||||
In the environment, `time.Duration` values are specified as a parsable Go
|
||||
duration:
|
||||
|
||||
```go
|
||||
type MyStruct struct {
|
||||
MyVar time.Duration `env:"MYVAR"`
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
export MYVAR="10m" # 10 * time.Minute
|
||||
```
|
||||
|
||||
### TextUnmarshaler / BinaryUnmarshaler
|
||||
|
||||
Types that implement `TextUnmarshaler` or `BinaryUnmarshaler` are processed as such.
|
||||
|
||||
### json.Unmarshaler
|
||||
|
||||
Types that implement `json.Unmarshaler` are processed as such.
|
||||
|
||||
### gob.Decoder
|
||||
|
||||
Types that implement `gob.Decoder` are processed as such.
|
||||
|
||||
|
||||
### Slices
|
||||
|
||||
Slices are specified as comma-separated values:
|
||||
|
||||
```go
|
||||
type MyStruct struct {
|
||||
MyVar []string `env:"MYVAR"`
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
export MYVAR="a,b,c,d" # []string{"a", "b", "c", "d"}
|
||||
```
|
||||
|
||||
Note that byte slices are special cased and interpreted as strings from the
|
||||
environment.
|
||||
|
||||
### Maps
|
||||
|
||||
Maps are specified as comma-separated key:value pairs:
|
||||
|
||||
```go
|
||||
type MyStruct struct {
|
||||
MyVar map[string]string `env:"MYVAR"`
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
export MYVAR="a:b,c:d" # map[string]string{"a":"b", "c":"d"}
|
||||
```
|
||||
|
||||
### Structs
|
||||
|
||||
Envconfig walks the entire struct, so deeply-nested fields are also supported. You can also define your own decoder (see below).
|
||||
|
||||
|
||||
## Prefixing
|
||||
|
||||
You can define a custom prefix using the `PrefixLookuper`. This will lookup
|
||||
values in the environment by prefixing the keys with the provided value:
|
||||
|
||||
```go
|
||||
type MyStruct struct {
|
||||
MyVar string `env:"MYVAR"`
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
// Process variables, but look for the "APP_" prefix.
|
||||
l := envconfig.PrefixLookuper("APP_", envconfig.OsLookuper())
|
||||
if err := envconfig.ProcessWith(ctx, &c, l); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
export APP_MYVAR="foo"
|
||||
```
|
||||
|
||||
|
||||
## Extension
|
||||
|
||||
All built-in types are supported except Func and Chan. If you need to define a
|
||||
custom decoder, implement `Decoder`:
|
||||
|
||||
```go
|
||||
type MyStruct struct {
|
||||
field string
|
||||
}
|
||||
|
||||
func (v *MyStruct) EnvDecode(val string) error {
|
||||
v.field = fmt.Sprintf("PREFIX-%s", val)
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
If you need to modify environment variable values before processing, you can
|
||||
specify a custom `Mutator`:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Password `env:"PASSWORD"`
|
||||
}
|
||||
|
||||
func resolveSecretFunc(ctx context.Context, key, value string) (string, error) {
|
||||
if strings.HasPrefix(key, "secret://") {
|
||||
return secretmanager.Resolve(ctx, value) // example
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
var config Config
|
||||
envconfig.ProcessWith(ctx, &config, envconfig.OsLookuper(), resolveSecretFunc)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
Relying on the environment in tests can be troublesome because environment
|
||||
variables are global, which makes it difficult to parallelize the tests.
|
||||
Envconfig supports extracting data from anything that returns a value:
|
||||
|
||||
```go
|
||||
lookuper := envconfig.MapLookuper(map[string]string{
|
||||
"FOO": "bar",
|
||||
"ZIP": "zap",
|
||||
})
|
||||
|
||||
var config Config
|
||||
envconfig.ProcessWith(ctx, &config, lookuper)
|
||||
```
|
||||
|
||||
Now you can parallelize all your tests by providing a map for the lookup
|
||||
function. In fact, that's how the tests in this repo work, so check there for an
|
||||
example.
|
||||
|
||||
You can also combine multiple lookupers with `MultiLookuper`. See the GoDoc for
|
||||
more information and examples.
|
||||
|
||||
|
||||
## Inspiration
|
||||
|
||||
This library is conceptually similar to [kelseyhightower/envconfig](https://github.com/kelseyhightower/envconfig), with the following
|
||||
major behavioral differences:
|
||||
|
||||
- Adds support for specifying a custom lookup function (such as a map), which
|
||||
is useful for testing.
|
||||
|
||||
- Only populates fields if they contain zero or nil values. This means you can
|
||||
pre-initialize a struct and any pre-populated fields will not be overwritten
|
||||
during processing.
|
||||
|
||||
- Support for interpolation. The default value for a field can be the value of
|
||||
another field.
|
||||
|
||||
- Support for arbitrary mutators that change/resolve data before type
|
||||
conversion.
|
57
vendor/github.com/sethvargo/go-envconfig/decoding.go
generated
vendored
Normal file
57
vendor/github.com/sethvargo/go-envconfig/decoding.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright The envconfig Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package envconfig
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Base64Bytes is a slice of bytes where the information is base64-encoded in
|
||||
// the environment variable.
|
||||
type Base64Bytes []byte
|
||||
|
||||
// EnvDecode implements env.Decoder.
|
||||
func (b *Base64Bytes) EnvDecode(val string) error {
|
||||
val = strings.ReplaceAll(val, "+", "-")
|
||||
val = strings.ReplaceAll(val, "/", "_")
|
||||
val = strings.TrimRight(val, "=")
|
||||
|
||||
var err error
|
||||
*b, err = base64.RawURLEncoding.DecodeString(val)
|
||||
return err
|
||||
}
|
||||
|
||||
// Bytes returns the underlying bytes.
|
||||
func (b Base64Bytes) Bytes() []byte {
|
||||
return []byte(b)
|
||||
}
|
||||
|
||||
// HexBytes is a slice of bytes where the information is hex-encoded in the
|
||||
// environment variable.
|
||||
type HexBytes []byte
|
||||
|
||||
// EnvDecode implements env.Decoder.
|
||||
func (b *HexBytes) EnvDecode(val string) error {
|
||||
var err error
|
||||
*b, err = hex.DecodeString(val)
|
||||
return err
|
||||
}
|
||||
|
||||
// Bytes returns the underlying bytes.
|
||||
func (b HexBytes) Bytes() []byte {
|
||||
return []byte(b)
|
||||
}
|
592
vendor/github.com/sethvargo/go-envconfig/envconfig.go
generated
vendored
Normal file
592
vendor/github.com/sethvargo/go-envconfig/envconfig.go
generated
vendored
Normal file
@@ -0,0 +1,592 @@
|
||||
// Copyright The envconfig Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package envconfig populates struct fields based on environment variable
|
||||
// values (or anything that responds to "Lookup"). Structs declare their
|
||||
// environment dependencies using the `env` tag with the key being the name of
|
||||
// the environment variable, case sensitive.
|
||||
//
|
||||
// type MyStruct struct {
|
||||
// A string `env:"A"` // resolves A to $A
|
||||
// B string `env:"B,required"` // resolves B to $B, errors if $B is unset
|
||||
// C string `env:"C,default=foo"` // resolves C to $C, defaults to "foo"
|
||||
//
|
||||
// D string `env:"D,required,default=foo"` // error, cannot be required and default
|
||||
// E string `env:""` // error, must specify key
|
||||
// }
|
||||
//
|
||||
// All built-in types are supported except Func and Chan. If you need to define
|
||||
// a custom decoder, implement Decoder:
|
||||
//
|
||||
// type MyStruct struct {
|
||||
// field string
|
||||
// }
|
||||
//
|
||||
// func (v *MyStruct) EnvDecode(val string) error {
|
||||
// v.field = fmt.Sprintf("PREFIX-%s", val)
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
// In the environment, slices are specified as comma-separated values:
|
||||
//
|
||||
// export MYVAR="a,b,c,d" // []string{"a", "b", "c", "d"}
|
||||
//
|
||||
// In the environment, maps are specified as comma-separated key:value pairs:
|
||||
//
|
||||
// export MYVAR="a:b,c:d" // map[string]string{"a":"b", "c":"d"}
|
||||
//
|
||||
// If you need to modify environment variable values before processing, you can
|
||||
// specify a custom mutator:
|
||||
//
|
||||
// type Config struct {
|
||||
// Password `env:"PASSWORD_SECRET"`
|
||||
// }
|
||||
//
|
||||
// func resolveSecretFunc(ctx context.Context, key, value string) (string, error) {
|
||||
// if strings.HasPrefix(key, "secret://") {
|
||||
// return secretmanager.Resolve(ctx, value) // example
|
||||
// }
|
||||
// return value, nil
|
||||
// }
|
||||
//
|
||||
// var config Config
|
||||
// ProcessWith(&config, OsLookuper(), resolveSecretFunc)
|
||||
//
|
||||
package envconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding"
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
envTag = "env"
|
||||
|
||||
optRequired = "required"
|
||||
optDefault = "default="
|
||||
optPrefix = "prefix="
|
||||
)
|
||||
|
||||
// Error is a custom error type for errors returned by envconfig.
|
||||
type Error string
|
||||
|
||||
// Error implements error.
|
||||
func (e Error) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
const (
|
||||
ErrInvalidMapItem = Error("invalid map item")
|
||||
ErrLookuperNil = Error("lookuper cannot be nil")
|
||||
ErrMissingKey = Error("missing key")
|
||||
ErrMissingRequired = Error("missing required value")
|
||||
ErrNotPtr = Error("input must be a pointer")
|
||||
ErrNotStruct = Error("input must be a struct")
|
||||
ErrPrefixNotStruct = Error("prefix is only valid on struct types")
|
||||
ErrPrivateField = Error("cannot parse private fields")
|
||||
ErrRequiredAndDefault = Error("field cannot be required and have a default value")
|
||||
ErrUnknownOption = Error("unknown option")
|
||||
)
|
||||
|
||||
// Lookuper is an interface that provides a lookup for a string-based key.
|
||||
type Lookuper interface {
|
||||
// Lookup searches for the given key and returns the corresponding string
|
||||
// value. If a value is found, it returns the value and true. If a value is
|
||||
// not found, it returns the empty string and false.
|
||||
Lookup(key string) (string, bool)
|
||||
}
|
||||
|
||||
// osLookuper looks up environment configuration from the local environment.
|
||||
type osLookuper struct{}
|
||||
|
||||
// Verify implements interface.
|
||||
var _ Lookuper = (*osLookuper)(nil)
|
||||
|
||||
func (o *osLookuper) Lookup(key string) (string, bool) {
|
||||
return os.LookupEnv(key)
|
||||
}
|
||||
|
||||
// OsLookuper returns a lookuper that uses the environment (os.LookupEnv) to
|
||||
// resolve values.
|
||||
func OsLookuper() Lookuper {
|
||||
return new(osLookuper)
|
||||
}
|
||||
|
||||
type mapLookuper map[string]string
|
||||
|
||||
var _ Lookuper = (*mapLookuper)(nil)
|
||||
|
||||
func (m mapLookuper) Lookup(key string) (string, bool) {
|
||||
v, ok := m[key]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// MapLookuper looks up environment configuration from a provided map. This is
|
||||
// useful for testing, especially in parallel, since it does not require you to
|
||||
// mutate the parent environment (which is stateful).
|
||||
func MapLookuper(m map[string]string) Lookuper {
|
||||
return mapLookuper(m)
|
||||
}
|
||||
|
||||
type multiLookuper struct {
|
||||
ls []Lookuper
|
||||
}
|
||||
|
||||
var _ Lookuper = (*multiLookuper)(nil)
|
||||
|
||||
func (m *multiLookuper) Lookup(key string) (string, bool) {
|
||||
for _, l := range m.ls {
|
||||
if v, ok := l.Lookup(key); ok {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// PrefixLookuper looks up environment configuration using the specified prefix.
|
||||
// This is useful if you want all your variables to start with a particular
|
||||
// prefix like "MY_APP_".
|
||||
func PrefixLookuper(prefix string, l Lookuper) Lookuper {
|
||||
if typ, ok := l.(*prefixLookuper); ok {
|
||||
return &prefixLookuper{prefix: typ.prefix + prefix, l: typ.l}
|
||||
}
|
||||
return &prefixLookuper{prefix: prefix, l: l}
|
||||
}
|
||||
|
||||
type prefixLookuper struct {
|
||||
prefix string
|
||||
l Lookuper
|
||||
}
|
||||
|
||||
func (p *prefixLookuper) Lookup(key string) (string, bool) {
|
||||
return p.l.Lookup(p.prefix + key)
|
||||
}
|
||||
|
||||
// MultiLookuper wraps a collection of lookupers. It does not combine them, and
|
||||
// lookups appear in the order in which they are provided to the initializer.
|
||||
func MultiLookuper(lookupers ...Lookuper) Lookuper {
|
||||
return &multiLookuper{ls: lookupers}
|
||||
}
|
||||
|
||||
// Decoder is an interface that custom types/fields can implement to control how
|
||||
// decoding takes place. For example:
|
||||
//
|
||||
// type MyType string
|
||||
//
|
||||
// func (mt MyType) EnvDecode(val string) error {
|
||||
// return "CUSTOM-"+val
|
||||
// }
|
||||
//
|
||||
type Decoder interface {
|
||||
EnvDecode(val string) error
|
||||
}
|
||||
|
||||
// MutatorFunc is a function that mutates a given value before it is passed
|
||||
// along for processing. This is useful if you want to mutate the environment
|
||||
// variable value before it's converted to the proper type.
|
||||
type MutatorFunc func(ctx context.Context, k, v string) (string, error)
|
||||
|
||||
// options are internal options for decoding.
|
||||
type options struct {
|
||||
Default string
|
||||
Required bool
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// Process processes the struct using the environment. See ProcessWith for a
|
||||
// more customizable version.
|
||||
func Process(ctx context.Context, i interface{}) error {
|
||||
return ProcessWith(ctx, i, OsLookuper())
|
||||
}
|
||||
|
||||
// ProcessWith processes the given interface with the given lookuper. See the
|
||||
// package-level documentation for specific examples and behaviors.
|
||||
func ProcessWith(ctx context.Context, i interface{}, l Lookuper, fns ...MutatorFunc) error {
|
||||
if l == nil {
|
||||
return ErrLookuperNil
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(i)
|
||||
if v.Kind() != reflect.Ptr {
|
||||
return ErrNotPtr
|
||||
}
|
||||
|
||||
e := v.Elem()
|
||||
if e.Kind() != reflect.Struct {
|
||||
return ErrNotStruct
|
||||
}
|
||||
|
||||
t := e.Type()
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
ef := e.Field(i)
|
||||
tf := t.Field(i)
|
||||
tag := tf.Tag.Get(envTag)
|
||||
|
||||
if !ef.CanSet() {
|
||||
if tag != "" {
|
||||
// There's an "env" tag on a private field, we can't alter it, and it's
|
||||
// likely a mistake. Return an error so the user can handle.
|
||||
return fmt.Errorf("%s: %w", tf.Name, ErrPrivateField)
|
||||
}
|
||||
|
||||
// Otherwise continue to the next field.
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse the key and options.
|
||||
key, opts, err := keyAndOpts(tag)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", tf.Name, err)
|
||||
}
|
||||
|
||||
// Initialize pointer structs.
|
||||
for ef.Kind() == reflect.Ptr {
|
||||
if ef.IsNil() {
|
||||
if ef.Type().Elem().Kind() != reflect.Struct {
|
||||
// This is a nil pointer to something that isn't a struct, like
|
||||
// *string. Move along.
|
||||
break
|
||||
}
|
||||
|
||||
// Nil pointer to a struct, create so we can traverse.
|
||||
ef.Set(reflect.New(ef.Type().Elem()))
|
||||
}
|
||||
|
||||
ef = ef.Elem()
|
||||
}
|
||||
|
||||
// Special case handle structs. This has to come after the value resolution in
|
||||
// case the struct has a custom decoder.
|
||||
if ef.Kind() == reflect.Struct {
|
||||
for ef.CanAddr() {
|
||||
ef = ef.Addr()
|
||||
}
|
||||
|
||||
// Lookup the value, ignoring an error if the key isn't defined. This is
|
||||
// required for nested structs that don't declare their own `env` keys,
|
||||
// but have internal fields with an `env` defined.
|
||||
val, err := lookup(key, opts, l)
|
||||
if err != nil && !errors.Is(err, ErrMissingKey) {
|
||||
return fmt.Errorf("%s: %w", tf.Name, err)
|
||||
}
|
||||
|
||||
if ok, err := processAsDecoder(val, ef); ok {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
plu := l
|
||||
if opts.Prefix != "" {
|
||||
plu = PrefixLookuper(opts.Prefix, l)
|
||||
}
|
||||
|
||||
if err := ProcessWith(ctx, ef.Interface(), plu, fns...); err != nil {
|
||||
return fmt.Errorf("%s: %w", tf.Name, err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// It's invalid to have a prefix on a non-struct field.
|
||||
if opts.Prefix != "" {
|
||||
return ErrPrefixNotStruct
|
||||
}
|
||||
|
||||
// Stop processing if there's no env tag (this comes after nested parsing),
|
||||
// in case there's an env tag in an embedded struct.
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// The field already has a non-zero value, do not overwrite.
|
||||
if !ef.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
val, err := lookup(key, opts, l)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", tf.Name, err)
|
||||
}
|
||||
|
||||
// Apply any mutators. Mutators are applied after the lookup, but before any
|
||||
// type conversions. They always resolve to a string (or error)
|
||||
for _, fn := range fns {
|
||||
if fn != nil {
|
||||
val, err = fn(ctx, key, val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", tf.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set value.
|
||||
if err := processField(val, ef); err != nil {
|
||||
return fmt.Errorf("%s(%q): %w", tf.Name, val, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// keyAndOpts parses the given tag value (e.g. env:"foo,required") and
|
||||
// returns the key name and options as a list.
|
||||
func keyAndOpts(tag string) (string, *options, error) {
|
||||
parts := strings.Split(tag, ",")
|
||||
key, tagOpts := strings.TrimSpace(parts[0]), parts[1:]
|
||||
|
||||
var opts options
|
||||
|
||||
LOOP:
|
||||
for i, o := range tagOpts {
|
||||
o = strings.TrimSpace(o)
|
||||
switch {
|
||||
case o == optRequired:
|
||||
opts.Required = true
|
||||
case strings.HasPrefix(o, optPrefix):
|
||||
opts.Prefix = strings.TrimPrefix(o, optPrefix)
|
||||
case strings.HasPrefix(o, optDefault):
|
||||
// If a default value was given, assume everything after is the provided
|
||||
// value, including comma-seprated items.
|
||||
o = strings.TrimLeft(strings.Join(tagOpts[i:], ","), " ")
|
||||
opts.Default = strings.TrimPrefix(o, optDefault)
|
||||
break LOOP
|
||||
default:
|
||||
return "", nil, fmt.Errorf("%q: %w", o, ErrUnknownOption)
|
||||
}
|
||||
}
|
||||
|
||||
return key, &opts, nil
|
||||
}
|
||||
|
||||
// lookup looks up the given key using the provided Lookuper and options.
|
||||
func lookup(key string, opts *options, l Lookuper) (string, error) {
|
||||
if key == "" {
|
||||
// The struct has something like `env:",required"`, which is likely a
|
||||
// mistake. We could try to infer the envvar from the field name, but that
|
||||
// feels too magical.
|
||||
return "", ErrMissingKey
|
||||
}
|
||||
|
||||
if opts.Required && opts.Default != "" {
|
||||
// Having a default value on a required value doesn't make sense.
|
||||
return "", ErrRequiredAndDefault
|
||||
}
|
||||
|
||||
// Lookup value.
|
||||
val, ok := l.Lookup(key)
|
||||
if !ok {
|
||||
if opts.Required {
|
||||
return "", fmt.Errorf("%w: %s", ErrMissingRequired, key)
|
||||
}
|
||||
|
||||
if opts.Default != "" {
|
||||
// Expand the default value. This allows for a default value that maps to
|
||||
// a different variable.
|
||||
val = os.Expand(opts.Default, func(i string) string {
|
||||
s, ok := l.Lookup(i)
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return ""
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// processAsDecoder processes the given value as a decoder or custom
|
||||
// unmarshaller.
|
||||
func processAsDecoder(v string, ef reflect.Value) (bool, error) {
|
||||
// Keep a running error. It's possible that a property might implement
|
||||
// multiple decoders, and we don't know *which* decoder will succeed. If we
|
||||
// get through all of them, we'll return the most recent error.
|
||||
var imp bool
|
||||
var err error
|
||||
|
||||
// Resolve any pointers.
|
||||
for ef.CanAddr() {
|
||||
ef = ef.Addr()
|
||||
}
|
||||
|
||||
if ef.CanInterface() {
|
||||
iface := ef.Interface()
|
||||
|
||||
if dec, ok := iface.(Decoder); ok {
|
||||
imp = true
|
||||
if err = dec.EnvDecode(v); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
if tu, ok := iface.(encoding.BinaryUnmarshaler); ok {
|
||||
imp = true
|
||||
if err = tu.UnmarshalBinary([]byte(v)); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
if tu, ok := iface.(gob.GobDecoder); ok {
|
||||
imp = true
|
||||
if err = tu.GobDecode([]byte(v)); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
if tu, ok := iface.(json.Unmarshaler); ok {
|
||||
imp = true
|
||||
if err = tu.UnmarshalJSON([]byte(v)); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
if tu, ok := iface.(encoding.TextUnmarshaler); ok {
|
||||
imp = true
|
||||
if err = tu.UnmarshalText([]byte(v)); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return imp, err
|
||||
}
|
||||
|
||||
func processField(v string, ef reflect.Value) error {
|
||||
// Handle pointers and uninitialized pointers.
|
||||
for ef.Type().Kind() == reflect.Ptr {
|
||||
if ef.IsNil() {
|
||||
ef.Set(reflect.New(ef.Type().Elem()))
|
||||
}
|
||||
ef = ef.Elem()
|
||||
}
|
||||
|
||||
tf := ef.Type()
|
||||
tk := tf.Kind()
|
||||
|
||||
// Handle existing decoders.
|
||||
if ok, err := processAsDecoder(v, ef); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
// We don't check if the value is empty earlier, because the user might want
|
||||
// to define a custom decoder and treat the empty variable as a special case.
|
||||
// However, if we got this far, none of the remaining parsers will succeed, so
|
||||
// bail out now.
|
||||
if v == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch tk {
|
||||
case reflect.Bool:
|
||||
b, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ef.SetBool(b)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
f, err := strconv.ParseFloat(v, tf.Bits())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ef.SetFloat(f)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
|
||||
i, err := strconv.ParseInt(v, 0, tf.Bits())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ef.SetInt(i)
|
||||
case reflect.Int64:
|
||||
// Special case time.Duration values.
|
||||
if tf.PkgPath() == "time" && tf.Name() == "Duration" {
|
||||
d, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ef.SetInt(int64(d))
|
||||
} else {
|
||||
i, err := strconv.ParseInt(v, 0, tf.Bits())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ef.SetInt(i)
|
||||
}
|
||||
case reflect.String:
|
||||
ef.SetString(v)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
i, err := strconv.ParseUint(v, 0, tf.Bits())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ef.SetUint(i)
|
||||
|
||||
case reflect.Interface:
|
||||
return fmt.Errorf("cannot decode into interfaces")
|
||||
|
||||
// Maps
|
||||
case reflect.Map:
|
||||
vals := strings.Split(v, ",")
|
||||
mp := reflect.MakeMapWithSize(tf, len(vals))
|
||||
for _, val := range vals {
|
||||
pair := strings.SplitN(val, ":", 2)
|
||||
if len(pair) < 2 {
|
||||
return fmt.Errorf("%s: %w", val, ErrInvalidMapItem)
|
||||
}
|
||||
mKey, mVal := strings.TrimSpace(pair[0]), strings.TrimSpace(pair[1])
|
||||
|
||||
k := reflect.New(tf.Key()).Elem()
|
||||
if err := processField(mKey, k); err != nil {
|
||||
return fmt.Errorf("%s: %w", mKey, err)
|
||||
}
|
||||
|
||||
v := reflect.New(tf.Elem()).Elem()
|
||||
if err := processField(mVal, v); err != nil {
|
||||
return fmt.Errorf("%s: %w", mVal, err)
|
||||
}
|
||||
|
||||
mp.SetMapIndex(k, v)
|
||||
}
|
||||
ef.Set(mp)
|
||||
|
||||
// Slices
|
||||
case reflect.Slice:
|
||||
// Special case: []byte
|
||||
if tf.Elem().Kind() == reflect.Uint8 {
|
||||
ef.Set(reflect.ValueOf([]byte(v)))
|
||||
} else {
|
||||
vals := strings.Split(v, ",")
|
||||
s := reflect.MakeSlice(tf, len(vals), len(vals))
|
||||
for i, val := range vals {
|
||||
val = strings.TrimSpace(val)
|
||||
if err := processField(val, s.Index(i)); err != nil {
|
||||
return fmt.Errorf("%s: %w", val, err)
|
||||
}
|
||||
}
|
||||
ef.Set(s)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
5
vendor/github.com/sethvargo/go-envconfig/go.mod
generated
vendored
Normal file
5
vendor/github.com/sethvargo/go-envconfig/go.mod
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
module github.com/sethvargo/go-envconfig
|
||||
|
||||
go 1.14
|
||||
|
||||
require github.com/google/go-cmp v0.4.1
|
4
vendor/github.com/sethvargo/go-envconfig/go.sum
generated
vendored
Normal file
4
vendor/github.com/sethvargo/go-envconfig/go.sum
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
201
vendor/github.com/suborbital/grav/LICENSE
generated
vendored
Normal file
201
vendor/github.com/suborbital/grav/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
84
vendor/github.com/suborbital/grav/discovery/local/discovery.go
generated
vendored
Normal file
84
vendor/github.com/suborbital/grav/discovery/local/discovery.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/schollz/peerdiscovery"
|
||||
"github.com/suborbital/grav/grav"
|
||||
"github.com/suborbital/vektor/vlog"
|
||||
)
|
||||
|
||||
// Discovery is a grav Discovery plugin using local network multicast
|
||||
type Discovery struct {
|
||||
opts *grav.DiscoveryOpts
|
||||
log *vlog.Logger
|
||||
|
||||
discoveryFunc grav.DiscoveryFunc
|
||||
}
|
||||
|
||||
// payload is a discovery payload
|
||||
type payload struct {
|
||||
UUID string `json:"uuid"`
|
||||
Port string `json:"port"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// New creates a new local discovery plugin
|
||||
func New() *Discovery {
|
||||
g := &Discovery{}
|
||||
|
||||
return g
|
||||
}
|
||||
|
||||
// Start starts discovery
|
||||
func (d *Discovery) Start(opts *grav.DiscoveryOpts, discoveryFunc grav.DiscoveryFunc) error {
|
||||
d.opts = opts
|
||||
d.log = opts.Logger
|
||||
d.discoveryFunc = discoveryFunc
|
||||
|
||||
d.log.Info("[discovery-local] starting discovery, advertising endpoint", opts.TransportPort, opts.TransportURI)
|
||||
|
||||
payloadFunc := func() []byte {
|
||||
payload := payload{
|
||||
UUID: d.opts.NodeUUID,
|
||||
Port: opts.TransportPort,
|
||||
Path: opts.TransportURI,
|
||||
}
|
||||
|
||||
payloadBytes, _ := json.Marshal(payload)
|
||||
return payloadBytes
|
||||
}
|
||||
|
||||
notifyFunc := func(peer peerdiscovery.Discovered) {
|
||||
d.log.Debug("[discovery-local] potential peer found:", peer.Address)
|
||||
|
||||
payload := payload{}
|
||||
if err := json.Unmarshal(peer.Payload, &payload); err != nil {
|
||||
d.log.Debug("[discovery-local] peer did not offer correct payload, discarding")
|
||||
return
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("%s:%s%s", peer.Address, payload.Port, payload.Path)
|
||||
|
||||
// send the discovery to Grav. Grav is responsible for ensuring uniqueness of the connections.
|
||||
d.discoveryFunc(endpoint, payload.UUID)
|
||||
}
|
||||
|
||||
_, err := peerdiscovery.Discover(peerdiscovery.Settings{
|
||||
Limit: -1,
|
||||
PayloadFunc: payloadFunc,
|
||||
Delay: 10 * time.Second,
|
||||
TimeLimit: -1,
|
||||
Notify: notifyFunc,
|
||||
AllowSelf: true,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// UseDiscoveryFunc sets the function to be used when a new peer is discovered
|
||||
func (d *Discovery) UseDiscoveryFunc(dFunc func(endpoint string, uuid string)) {
|
||||
d.discoveryFunc = dFunc
|
||||
}
|
83
vendor/github.com/suborbital/grav/grav/bus.go
generated
vendored
Normal file
83
vendor/github.com/suborbital/grav/grav/bus.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
package grav
|
||||
|
||||
const (
|
||||
defaultBusChanSize = 256
|
||||
)
|
||||
|
||||
// messageBus is responsible for emitting messages among the connected pods
|
||||
// and managing the failure cases for those pods
|
||||
type messageBus struct {
|
||||
busChan MsgChan
|
||||
pool *connectionPool
|
||||
buffer *MsgBuffer
|
||||
}
|
||||
|
||||
// newMessageBus creates a new messageBus
|
||||
func newMessageBus() *messageBus {
|
||||
b := &messageBus{
|
||||
busChan: make(chan Message, defaultBusChanSize),
|
||||
pool: newConnectionPool(),
|
||||
buffer: NewMsgBuffer(defaultBufferSize),
|
||||
}
|
||||
|
||||
b.start()
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// addPod adds a pod to the connection pool
|
||||
func (b *messageBus) addPod(pod *Pod) {
|
||||
b.pool.insert(pod)
|
||||
}
|
||||
|
||||
func (b *messageBus) start() {
|
||||
go func() {
|
||||
// continually take new messages and for each,
|
||||
// grab the next active connection from the ring and then
|
||||
// start traversing around the ring to emit the message to
|
||||
// each connection until landing back at the beginning of the
|
||||
// ring, and repeat forever when each new message arrives
|
||||
for msg := range b.busChan {
|
||||
for {
|
||||
// make sure the next pod is ready for messages
|
||||
if err := b.pool.prepareNext(b.buffer); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
startingConn := b.pool.next()
|
||||
|
||||
b.traverse(msg, startingConn)
|
||||
|
||||
b.buffer.Push(msg)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (b *messageBus) traverse(msg Message, start *podConnection) {
|
||||
startID := start.ID
|
||||
conn := start
|
||||
|
||||
for {
|
||||
// send the message to the pod
|
||||
conn.send(msg)
|
||||
|
||||
// run checks on the next podConnection to see if
|
||||
// anything needs to be done (including potentially deleting it)
|
||||
next := b.pool.peek()
|
||||
if err := b.pool.prepareNext(b.buffer); err != nil {
|
||||
if startID == next.ID {
|
||||
startID = next.next.ID
|
||||
}
|
||||
}
|
||||
|
||||
// now advance the ring
|
||||
conn = b.pool.next()
|
||||
|
||||
if startID == conn.ID {
|
||||
// if we have arrived back at the starting point on the ring
|
||||
// we have done our job and are ready for the next message
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
21
vendor/github.com/suborbital/grav/grav/discovery.go
generated
vendored
Normal file
21
vendor/github.com/suborbital/grav/grav/discovery.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package grav
|
||||
|
||||
import "github.com/suborbital/vektor/vlog"
|
||||
|
||||
// DiscoveryFunc is a function that allows a plugin to report a newly discovered node
|
||||
type DiscoveryFunc func(endpoint string, uuid string)
|
||||
|
||||
// Discovery represents a discovery plugin
|
||||
type Discovery interface {
|
||||
// Start is called to start the Discovery plugin
|
||||
Start(*DiscoveryOpts, DiscoveryFunc) error
|
||||
}
|
||||
|
||||
// DiscoveryOpts is a set of options for transports
|
||||
type DiscoveryOpts struct {
|
||||
NodeUUID string
|
||||
TransportPort string
|
||||
TransportURI string
|
||||
Logger *vlog.Logger
|
||||
Custom interface{}
|
||||
}
|
71
vendor/github.com/suborbital/grav/grav/filter.go
generated
vendored
Normal file
71
vendor/github.com/suborbital/grav/grav/filter.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
package grav
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// messageFilter is a series of maps that associate things about a message (its UUID, type, etc) with a boolean value to say if
|
||||
// it should be allowed or denied. For each of the maps, if an entry is included, the value of the boolean is respected (true = allow, false = deny)
|
||||
// Maps are either inclusive (meaning that a missing entry defaults to allow), or exclusive (meaning that a missing entry defaults to deny)
|
||||
// This can be configured per map by modifiying the UUIDInclusive, TypeInclusive (etc) fields.
|
||||
type messageFilter struct {
|
||||
UUIDMap map[string]bool
|
||||
UUIDInclusive bool
|
||||
|
||||
TypeMap map[string]bool
|
||||
TypeInclusive bool
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func newMessageFilter() *messageFilter {
|
||||
mf := &messageFilter{
|
||||
UUIDMap: map[string]bool{},
|
||||
UUIDInclusive: true,
|
||||
TypeMap: map[string]bool{},
|
||||
TypeInclusive: true,
|
||||
lock: sync.RWMutex{},
|
||||
}
|
||||
|
||||
return mf
|
||||
}
|
||||
|
||||
func (mf *messageFilter) allow(msg Message) bool {
|
||||
mf.lock.RLock()
|
||||
defer mf.lock.RUnlock()
|
||||
|
||||
// for each map, deny the message if:
|
||||
// - a filter entry exists and it's value is false
|
||||
// - a filter entry doesn't exist and its inclusive rule is false
|
||||
|
||||
allowType, typeExists := mf.TypeMap[msg.Type()]
|
||||
if typeExists && !allowType {
|
||||
return false
|
||||
} else if !typeExists && !mf.TypeInclusive {
|
||||
return false
|
||||
}
|
||||
|
||||
allowUUID, uuidExists := mf.UUIDMap[msg.UUID()]
|
||||
if uuidExists && !allowUUID {
|
||||
return false
|
||||
} else if !uuidExists && !mf.UUIDInclusive {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// FilterUUID likely should not be used in normal cases, it adds a message UUID to the pod's filter.
|
||||
func (mf *messageFilter) FilterUUID(uuid string, allow bool) {
|
||||
mf.lock.Lock()
|
||||
defer mf.lock.Unlock()
|
||||
|
||||
mf.UUIDMap[uuid] = allow
|
||||
}
|
||||
|
||||
func (mf *messageFilter) FilterType(msgType string, allow bool) {
|
||||
mf.lock.Lock()
|
||||
defer mf.lock.Unlock()
|
||||
|
||||
mf.TypeMap[msgType] = allow
|
||||
}
|
72
vendor/github.com/suborbital/grav/grav/grav.go
generated
vendored
Normal file
72
vendor/github.com/suborbital/grav/grav/grav.go
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
package grav
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/suborbital/vektor/vlog"
|
||||
)
|
||||
|
||||
// ErrTransportNotConfigured represent package-level vars
|
||||
var (
|
||||
ErrTransportNotConfigured = errors.New("transport plugin not configured")
|
||||
)
|
||||
|
||||
// Grav represents a Grav message bus instance
|
||||
type Grav struct {
|
||||
NodeUUID string
|
||||
bus *messageBus
|
||||
logger *vlog.Logger
|
||||
hub *hub
|
||||
}
|
||||
|
||||
// New creates a new Grav with the provided options
|
||||
func New(opts ...OptionsModifier) *Grav {
|
||||
nodeUUID := uuid.New().String()
|
||||
|
||||
options := newOptionsWithModifiers(opts...)
|
||||
|
||||
g := &Grav{
|
||||
NodeUUID: nodeUUID,
|
||||
bus: newMessageBus(),
|
||||
logger: options.Logger,
|
||||
}
|
||||
|
||||
// the hub handles coordinating the transport and discovery plugins
|
||||
g.hub = initHub(nodeUUID, options, options.Transport, options.Discovery, g.Connect)
|
||||
|
||||
return g
|
||||
}
|
||||
|
||||
// Connect creates a new connection (pod) to the bus
|
||||
func (g *Grav) Connect() *Pod {
|
||||
opts := &podOpts{WantsReplay: false}
|
||||
|
||||
return g.connectWithOpts(opts)
|
||||
}
|
||||
|
||||
// ConnectWithReplay creates a new connection (pod) to the bus
|
||||
// and replays recent messages when the pod sets its onFunc
|
||||
func (g *Grav) ConnectWithReplay() *Pod {
|
||||
opts := &podOpts{WantsReplay: true}
|
||||
|
||||
return g.connectWithOpts(opts)
|
||||
}
|
||||
|
||||
// ConnectEndpoint uses the configured transport to connect the bus to an external endpoint
|
||||
func (g *Grav) ConnectEndpoint(endpoint string) error {
|
||||
return g.hub.connectEndpoint(endpoint, "")
|
||||
}
|
||||
|
||||
// ConnectBridgeTopic connects the Grav instance to a particular topic on the connected bridge
|
||||
func (g *Grav) ConnectBridgeTopic(topic string) error {
|
||||
return g.hub.connectBridgeTopic(topic)
|
||||
}
|
||||
|
||||
func (g *Grav) connectWithOpts(opts *podOpts) *Pod {
|
||||
pod := newPod(g.bus.busChan, opts)
|
||||
|
||||
g.bus.addPod(pod)
|
||||
|
||||
return pod
|
||||
}
|
290
vendor/github.com/suborbital/grav/grav/hub.go
generated
vendored
Normal file
290
vendor/github.com/suborbital/grav/grav/hub.go
generated
vendored
Normal file
@@ -0,0 +1,290 @@
|
||||
package grav
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/suborbital/vektor/vlog"
|
||||
)
|
||||
|
||||
// hub is responsible for coordinating the transport and discovery plugins
|
||||
type hub struct {
|
||||
nodeUUID string
|
||||
transport Transport
|
||||
discovery Discovery
|
||||
log *vlog.Logger
|
||||
pod *Pod
|
||||
connectFunc func() *Pod
|
||||
|
||||
connections map[string]Connection
|
||||
topicConnections map[string]TopicConnection
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func initHub(nodeUUID string, options *Options, tspt Transport, dscv Discovery, connectFunc func() *Pod) *hub {
|
||||
h := &hub{
|
||||
nodeUUID: nodeUUID,
|
||||
transport: tspt,
|
||||
discovery: dscv,
|
||||
log: options.Logger,
|
||||
pod: connectFunc(),
|
||||
connectFunc: connectFunc,
|
||||
connections: map[string]Connection{},
|
||||
topicConnections: map[string]TopicConnection{},
|
||||
lock: sync.RWMutex{},
|
||||
}
|
||||
|
||||
// start transport, then discovery if each have been configured (can have transport but no discovery)
|
||||
if h.transport != nil {
|
||||
transportOpts := &TransportOpts{
|
||||
NodeUUID: nodeUUID,
|
||||
Port: options.Port,
|
||||
URI: options.URI,
|
||||
Logger: options.Logger,
|
||||
}
|
||||
|
||||
// setup messages to be sent to all active connections
|
||||
h.pod.On(h.outgoingMessageHandler())
|
||||
|
||||
go func() {
|
||||
if err := h.transport.Setup(transportOpts, h.handleIncomingConnection, h.findConnection); err != nil {
|
||||
options.Logger.Error(errors.Wrap(err, "failed to Setup transport"))
|
||||
}
|
||||
}()
|
||||
|
||||
if h.discovery != nil {
|
||||
discoveryOpts := &DiscoveryOpts{
|
||||
NodeUUID: nodeUUID,
|
||||
TransportPort: transportOpts.Port,
|
||||
TransportURI: transportOpts.URI,
|
||||
Logger: options.Logger,
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := h.discovery.Start(discoveryOpts, h.discoveryHandler()); err != nil {
|
||||
options.Logger.Error(errors.Wrap(err, "failed to Start discovery"))
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *hub) discoveryHandler() func(endpoint string, uuid string) {
|
||||
return func(endpoint string, uuid string) {
|
||||
if uuid == h.nodeUUID {
|
||||
h.log.Debug("discovered self, discarding")
|
||||
return
|
||||
}
|
||||
|
||||
// connectEndpoint does this check as well, but it's better to do it here as well
|
||||
// as it reduces the number of extraneous outgoing handshakes that get attempted.
|
||||
if existing, exists := h.findConnection(uuid); exists {
|
||||
if !existing.CanReplace() {
|
||||
h.log.Debug("encountered duplicate connection, discarding")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.connectEndpoint(endpoint, uuid); err != nil {
|
||||
h.log.Error(errors.Wrap(err, "failed to connectEndpoint for discovered peer"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// connectEndpoint creates a new outgoing connection
|
||||
func (h *hub) connectEndpoint(endpoint, uuid string) error {
|
||||
if h.transport == nil {
|
||||
return ErrTransportNotConfigured
|
||||
}
|
||||
|
||||
if h.transport.Type() == TransportTypeBridge {
|
||||
return ErrBridgeOnlyTransport
|
||||
}
|
||||
|
||||
h.log.Debug("connecting to endpoint", endpoint)
|
||||
|
||||
conn, err := h.transport.CreateConnection(endpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to transport.CreateConnection")
|
||||
}
|
||||
|
||||
h.setupOutgoingConnection(conn, uuid)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// connectBridgeTopic creates a new outgoing connection
|
||||
func (h *hub) connectBridgeTopic(topic string) error {
|
||||
if h.transport == nil {
|
||||
return ErrTransportNotConfigured
|
||||
}
|
||||
|
||||
if h.transport.Type() != TransportTypeBridge {
|
||||
return ErrNotBridgeTransport
|
||||
}
|
||||
|
||||
h.log.Debug("connecting to topic", topic)
|
||||
|
||||
conn, err := h.transport.ConnectBridgeTopic(topic)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to transport.CreateConnection")
|
||||
}
|
||||
|
||||
h.addTopicConnection(conn, topic)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *hub) setupOutgoingConnection(connection Connection, uuid string) {
|
||||
handshake := &TransportHandshake{h.nodeUUID}
|
||||
|
||||
ack, err := connection.DoOutgoingHandshake(handshake)
|
||||
if err != nil {
|
||||
h.log.Error(errors.Wrap(err, "failed to connection.DoOutgoingHandshake"))
|
||||
connection.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if uuid == "" {
|
||||
if ack.UUID == "" {
|
||||
h.log.ErrorString("connection handshake returned empty UUID, terminating connection")
|
||||
connection.Close()
|
||||
return
|
||||
}
|
||||
|
||||
uuid = ack.UUID
|
||||
} else if ack.UUID != uuid {
|
||||
h.log.ErrorString("connection handshake Ack did not match Discovery Ack, terminating connection")
|
||||
connection.Close()
|
||||
return
|
||||
}
|
||||
|
||||
h.setupNewConnection(connection, uuid)
|
||||
}
|
||||
|
||||
func (h *hub) handleIncomingConnection(connection Connection) {
|
||||
ack := &TransportHandshakeAck{h.nodeUUID}
|
||||
|
||||
handshake, err := connection.DoIncomingHandshake(ack)
|
||||
if err != nil {
|
||||
h.log.Error(errors.Wrap(err, "failed to connection.DoIncomingHandshake"))
|
||||
connection.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if handshake.UUID == "" {
|
||||
h.log.ErrorString("connection handshake returned empty UUID, terminating connection")
|
||||
connection.Close()
|
||||
return
|
||||
}
|
||||
|
||||
h.setupNewConnection(connection, handshake.UUID)
|
||||
}
|
||||
|
||||
func (h *hub) setupNewConnection(connection Connection, uuid string) {
|
||||
// if an existing connection is found, check if it can be replaced and do so if possible
|
||||
if existing, exists := h.findConnection(uuid); exists {
|
||||
if !existing.CanReplace() {
|
||||
connection.Close()
|
||||
h.log.Debug("encountered duplicate connection, discarding")
|
||||
} else {
|
||||
existing.Close()
|
||||
h.replaceConnection(connection, uuid)
|
||||
}
|
||||
} else {
|
||||
h.addConnection(connection, uuid)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hub) outgoingMessageHandler() MsgFunc {
|
||||
return func(msg Message) error {
|
||||
// read-lock while dispatching all of the goroutines to prevent concurrent read/write
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
|
||||
for u := range h.connections {
|
||||
uuid := u
|
||||
conn := h.connections[uuid]
|
||||
|
||||
go func() {
|
||||
h.log.Debug("sending message", msg.UUID(), "to", uuid)
|
||||
|
||||
if err := conn.Send(msg); err != nil {
|
||||
if errors.Is(err, ErrConnectionClosed) {
|
||||
h.log.Debug("attempted to send on closed connection, will remove")
|
||||
} else {
|
||||
h.log.Warn("error sending to connection", uuid, ":", err.Error())
|
||||
}
|
||||
|
||||
h.removeConnection(uuid)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hub) incomingMessageHandler(uuid string) ReceiveFunc {
|
||||
return func(msg Message) {
|
||||
h.log.Debug("received message ", msg.UUID(), "from node", uuid)
|
||||
|
||||
h.pod.Send(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hub) addConnection(connection Connection, uuid string) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
h.log.Debug("adding connection for", uuid)
|
||||
|
||||
connection.Start(h.incomingMessageHandler(uuid))
|
||||
|
||||
h.connections[uuid] = connection
|
||||
}
|
||||
|
||||
func (h *hub) addTopicConnection(connection TopicConnection, topic string) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
h.log.Debug("adding bridge connection for", topic)
|
||||
|
||||
connection.Start(h.connectFunc())
|
||||
|
||||
h.topicConnections[topic] = connection
|
||||
}
|
||||
|
||||
func (h *hub) replaceConnection(newConnection Connection, uuid string) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
h.log.Debug("replacing connection for", uuid)
|
||||
|
||||
delete(h.connections, uuid)
|
||||
|
||||
newConnection.Start(h.incomingMessageHandler(uuid))
|
||||
|
||||
h.connections[uuid] = newConnection
|
||||
}
|
||||
|
||||
func (h *hub) removeConnection(uuid string) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
h.log.Debug("removing connection for", uuid)
|
||||
|
||||
delete(h.connections, uuid)
|
||||
}
|
||||
|
||||
func (h *hub) findConnection(uuid string) (Connection, bool) {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
|
||||
conn, exists := h.connections[uuid]
|
||||
|
||||
return conn, exists
|
||||
}
|
170
vendor/github.com/suborbital/grav/grav/message.go
generated
vendored
Normal file
170
vendor/github.com/suborbital/grav/grav/message.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
package grav
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// MsgTypeDefault and other represent message consts
|
||||
const (
|
||||
MsgTypeDefault string = "grav.default"
|
||||
msgTypePodFeedback string = "grav.feedback"
|
||||
)
|
||||
|
||||
// MsgFunc is a callback function that accepts a message and returns an error
|
||||
type MsgFunc func(Message) error
|
||||
|
||||
// MsgChan is a channel that accepts a message
|
||||
type MsgChan chan Message
|
||||
|
||||
// Message represents a message
|
||||
type Message interface {
|
||||
// Unique ID for this message
|
||||
UUID() string
|
||||
// ID of the parent event or request, such as HTTP request
|
||||
ParentID() string
|
||||
// The UUID of the message being replied to, if any
|
||||
ReplyTo() string
|
||||
// Allow setting a message UUID that this message is a response to
|
||||
SetReplyTo(string)
|
||||
// Type of message (application-specific)
|
||||
Type() string
|
||||
// Time the message was sent
|
||||
Timestamp() time.Time
|
||||
// Raw data of message
|
||||
Data() []byte
|
||||
// Unmarshal the message's data into a struct
|
||||
UnmarshalData(interface{}) error
|
||||
// Marshal the message itself to encoded bytes (JSON or otherwise)
|
||||
Marshal() ([]byte, error)
|
||||
// Unmarshal encoded Message into object
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// NewMsg creates a new Message with the built-in `_message` type
|
||||
func NewMsg(msgType string, data []byte) Message {
|
||||
return new(msgType, "", data)
|
||||
}
|
||||
|
||||
// NewMsgWithParentID returns a new message with the provided parent ID
|
||||
func NewMsgWithParentID(msgType, parentID string, data []byte) Message {
|
||||
return new(msgType, parentID, data)
|
||||
}
|
||||
|
||||
// NewMsgReplyTo creates a new message in response to a previous message
|
||||
func NewMsgReplyTo(ticket MsgReceipt, msgType string, data []byte) Message {
|
||||
m := new(msgType, "", data)
|
||||
m.SetReplyTo(ticket.UUID)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// MsgFromBytes returns a default _message that has been unmarshalled from bytes.
|
||||
// Should only be used if the default _message type is being used.
|
||||
func MsgFromBytes(bytes []byte) (Message, error) {
|
||||
m := &_message{}
|
||||
if err := m.Unmarshal(bytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// MsgFromRequest extracts an encoded Message from an HTTP request
|
||||
func MsgFromRequest(r *http.Request) (Message, error) {
|
||||
defer r.Body.Close()
|
||||
bytes, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return MsgFromBytes(bytes)
|
||||
}
|
||||
|
||||
func new(msgType, parentID string, data []byte) Message {
|
||||
uuid := uuid.New()
|
||||
|
||||
m := &_message{
|
||||
Meta: _meta{
|
||||
UUID: uuid.String(),
|
||||
ParentID: parentID,
|
||||
ReplyTo: "",
|
||||
MsgType: msgType,
|
||||
Timestamp: time.Now(),
|
||||
},
|
||||
Payload: _payload{
|
||||
Data: data,
|
||||
},
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// _message is a basic built-in implementation of Message
|
||||
// most applications should define their own data structure
|
||||
// that implements the interface
|
||||
type _message struct {
|
||||
Meta _meta `json:"meta"`
|
||||
Payload _payload `json:"payload"`
|
||||
}
|
||||
|
||||
type _meta struct {
|
||||
UUID string `json:"uuid"`
|
||||
ParentID string `json:"parent_id"`
|
||||
ReplyTo string `json:"response_to"`
|
||||
MsgType string `json:"msg_type"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
type _payload struct {
|
||||
Data []byte `json:"data"`
|
||||
}
|
||||
|
||||
func (m *_message) UUID() string {
|
||||
return m.Meta.UUID
|
||||
}
|
||||
|
||||
func (m *_message) ParentID() string {
|
||||
return m.Meta.ParentID
|
||||
}
|
||||
|
||||
func (m *_message) ReplyTo() string {
|
||||
return m.Meta.ReplyTo
|
||||
}
|
||||
|
||||
func (m *_message) SetReplyTo(uuid string) {
|
||||
m.Meta.ReplyTo = uuid
|
||||
}
|
||||
|
||||
func (m *_message) Type() string {
|
||||
return m.Meta.MsgType
|
||||
}
|
||||
|
||||
func (m *_message) Timestamp() time.Time {
|
||||
return m.Meta.Timestamp
|
||||
}
|
||||
|
||||
func (m *_message) Data() []byte {
|
||||
return m.Payload.Data
|
||||
}
|
||||
|
||||
func (m *_message) UnmarshalData(target interface{}) error {
|
||||
return json.Unmarshal(m.Payload.Data, target)
|
||||
}
|
||||
|
||||
func (m *_message) Marshal() ([]byte, error) {
|
||||
bytes, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
func (m *_message) Unmarshal(bytes []byte) error {
|
||||
return json.Unmarshal(bytes, m)
|
||||
}
|
90
vendor/github.com/suborbital/grav/grav/msgbuffer.go
generated
vendored
Normal file
90
vendor/github.com/suborbital/grav/grav/msgbuffer.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
package grav
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBufferSize = 128
|
||||
)
|
||||
|
||||
// MsgBuffer is a buffer of messages with a particular size limit.
|
||||
// Oldest messages are automatically evicted as new ones are added
|
||||
// past said limit. Push() and Iter() are thread-safe.
|
||||
type MsgBuffer struct {
|
||||
msgs map[string]Message
|
||||
order []string
|
||||
limit int
|
||||
startIndex int
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewMsgBuffer(limit int) *MsgBuffer {
|
||||
m := &MsgBuffer{
|
||||
msgs: map[string]Message{},
|
||||
order: []string{},
|
||||
limit: limit,
|
||||
startIndex: 0,
|
||||
lock: sync.RWMutex{},
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Push pushes a new message onto the end of the buffer and evicts the oldest, if needed (based on limit)
|
||||
func (m *MsgBuffer) Push(msg Message) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
m.msgs[msg.UUID()] = msg
|
||||
|
||||
lastIndex := len(m.order) - 1
|
||||
|
||||
if len(m.order) == m.limit {
|
||||
delete(m.msgs, m.order[m.startIndex]) // delete the current "first"
|
||||
|
||||
m.order[m.startIndex] = msg.UUID()
|
||||
|
||||
if m.startIndex == lastIndex {
|
||||
m.startIndex = 0
|
||||
} else {
|
||||
m.startIndex++
|
||||
}
|
||||
} else {
|
||||
m.order = append(m.order, msg.UUID())
|
||||
}
|
||||
}
|
||||
|
||||
// Iter calls msgFunc once per message in the buffer
|
||||
func (m *MsgBuffer) Iter(msgFunc MsgFunc) {
|
||||
m.lock.RLock()
|
||||
defer m.lock.RUnlock()
|
||||
|
||||
if len(m.order) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
index := m.startIndex
|
||||
lastIndex := len(m.order) - 1
|
||||
|
||||
more := true
|
||||
for more {
|
||||
uuid := m.order[index]
|
||||
msg := m.msgs[uuid]
|
||||
|
||||
msgFunc(msg)
|
||||
|
||||
newIndex := index
|
||||
if newIndex == lastIndex {
|
||||
newIndex = 0
|
||||
} else {
|
||||
newIndex++
|
||||
}
|
||||
|
||||
if newIndex == m.startIndex {
|
||||
more = false
|
||||
}
|
||||
|
||||
index = newIndex
|
||||
}
|
||||
}
|
72
vendor/github.com/suborbital/grav/grav/options.go
generated
vendored
Normal file
72
vendor/github.com/suborbital/grav/grav/options.go
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
package grav
|
||||
|
||||
import "github.com/suborbital/vektor/vlog"
|
||||
|
||||
// Options represent Grav options
|
||||
type Options struct {
|
||||
Logger *vlog.Logger
|
||||
Transport Transport
|
||||
Discovery Discovery
|
||||
Port string
|
||||
URI string
|
||||
}
|
||||
|
||||
// OptionsModifier is function that modifies an option
|
||||
type OptionsModifier func(*Options)
|
||||
|
||||
func newOptionsWithModifiers(mods ...OptionsModifier) *Options {
|
||||
opts := defaultOptions()
|
||||
|
||||
for _, m := range mods {
|
||||
m(opts)
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// UseLogger allows a custom logger to be used
|
||||
func UseLogger(logger *vlog.Logger) OptionsModifier {
|
||||
return func(o *Options) {
|
||||
o.Logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// UseTransport sets the transport plugin to be used.
|
||||
func UseTransport(transport Transport) OptionsModifier {
|
||||
return func(o *Options) {
|
||||
o.Transport = transport
|
||||
}
|
||||
}
|
||||
|
||||
// UseEndpoint sets the endpoint settings for the instance to broadcast for discovery
|
||||
// Pass empty strings for either if you would like to keep the defaults (8080 and /meta/message)
|
||||
func UseEndpoint(port, uri string) OptionsModifier {
|
||||
return func(o *Options) {
|
||||
if port != "" {
|
||||
o.Port = port
|
||||
}
|
||||
|
||||
if uri != "" {
|
||||
o.URI = uri
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UseDiscovery sets the discovery plugin to be used
|
||||
func UseDiscovery(discovery Discovery) OptionsModifier {
|
||||
return func(o *Options) {
|
||||
o.Discovery = discovery
|
||||
}
|
||||
}
|
||||
|
||||
func defaultOptions() *Options {
|
||||
o := &Options{
|
||||
Logger: vlog.Default(),
|
||||
Port: "8080",
|
||||
URI: "/meta/message",
|
||||
Transport: nil,
|
||||
Discovery: nil,
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user