mirror of
https://github.com/1Password/onepassword-operator.git
synced 2025-10-24 00:10:46 +00:00
Webhook that injects secrets into pods
This commit is contained in:
23
Makefile
23
Makefile
@@ -11,7 +11,9 @@ versionFile = $(CURDIR)/.VERSION
|
|||||||
curVersion := $(shell cat $(versionFile) | sed 's/^v//')
|
curVersion := $(shell cat $(versionFile) | sed 's/^v//')
|
||||||
|
|
||||||
OPERATOR_NAME := onepassword-connect-operator
|
OPERATOR_NAME := onepassword-connect-operator
|
||||||
DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
|
OPERATOR_NAME := onepassword-secrets-injector
|
||||||
|
OPERATOR_DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
|
||||||
|
INJECTOR_DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
|
||||||
|
|
||||||
test: ## Run test suite
|
test: ## Run test suite
|
||||||
go test ./...
|
go test ./...
|
||||||
@@ -20,18 +22,31 @@ test/coverage: ## Run test suite with coverage report
|
|||||||
go test -v ./... -cover
|
go test -v ./... -cover
|
||||||
|
|
||||||
build/operator: ## Build operator Docker image
|
build/operator: ## Build operator Docker image
|
||||||
@docker build -f operator/Dockerfile --build-arg operator_version=$(curVersion) -t $(DOCKER_IMG_TAG) .
|
@docker build -f operator/Dockerfile --build-arg operator_version=$(curVersion) -t $(OPERATOR_DOCKER_IMG_TAG) .
|
||||||
@echo "Successfully built and tagged image."
|
@echo "Successfully built and tagged image."
|
||||||
@echo "Tag: $(DOCKER_IMG_TAG)"
|
@echo "Tag: $(OPERATOR_DOCKER_IMG_TAG)"
|
||||||
|
|
||||||
build/operator/local: ## Build local version of the operator Docker image
|
build/operator/local: ## Build local version of the operator Docker image
|
||||||
@docker build -f operator/Dockerfile -t local/$(DOCKER_IMG_TAG) .
|
@docker build -f operator/Dockerfile -t local/$(OPERATOR_DOCKER_IMG_TAG) .
|
||||||
|
|
||||||
build/operator/binary: clean ## Build operator binary
|
build/operator/binary: clean ## Build operator binary
|
||||||
@mkdir -p dist
|
@mkdir -p dist
|
||||||
@go build -mod vendor -a -o manager ./operator/cmd/manager/main.go
|
@go build -mod vendor -a -o manager ./operator/cmd/manager/main.go
|
||||||
@mv manager ./dist
|
@mv manager ./dist
|
||||||
|
|
||||||
|
build/secret-injector: ## Build secret-injector Docker image
|
||||||
|
@docker build -f secret-injector/Dockerfile --build-arg operator_version=$(curVersion) -t $(INJECTOR_DOCKER_IMG_TAG) .
|
||||||
|
@echo "Successfully built and tagged image."
|
||||||
|
@echo "Tag: $(INJECTOR_DOCKER_IMG_TAG)"
|
||||||
|
|
||||||
|
build/secret-injector/local: ## Build local version of the secret-injector Docker image
|
||||||
|
@docker build -f secret-injector/Dockerfile -t local/$(INJECTOR_DOCKER_IMG_TAG) .
|
||||||
|
|
||||||
|
build/secret-injector/binary: clean ## Build secret-injector binary
|
||||||
|
@mkdir -p dist
|
||||||
|
@go build -mod vendor -a -o manager ./secret-injector/cmd/manager/main.go
|
||||||
|
@mv manager ./dist
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf ./dist
|
rm -rf ./dist
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,10 @@ The 1Password Connect Kubernetes Operator will continually check for updates fro
|
|||||||
|
|
||||||
[Click here for more details on the 1Password Kubernetes Operator](operator/README.md)
|
[Click here for more details on the 1Password Kubernetes Operator](operator/README.md)
|
||||||
|
|
||||||
|
## 1Password Secret Injector
|
||||||
|
|
||||||
|
[Click here for more details on the 1Password Secret Injector](secret-injector/README.md)
|
||||||
|
|
||||||
|
|
||||||
# Security
|
# Security
|
||||||
|
|
||||||
|
|||||||
2
go.mod
2
go.mod
@@ -4,6 +4,7 @@ go 1.13
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/1Password/connect-sdk-go v1.0.1
|
github.com/1Password/connect-sdk-go v1.0.1
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||||
github.com/operator-framework/operator-sdk v0.19.0
|
github.com/operator-framework/operator-sdk v0.19.0
|
||||||
github.com/prometheus/common v0.14.0 // indirect
|
github.com/prometheus/common v0.14.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
@@ -14,6 +15,7 @@ require (
|
|||||||
k8s.io/apimachinery v0.18.2
|
k8s.io/apimachinery v0.18.2
|
||||||
k8s.io/client-go v12.0.0+incompatible
|
k8s.io/client-go v12.0.0+incompatible
|
||||||
k8s.io/kubectl v0.18.2
|
k8s.io/kubectl v0.18.2
|
||||||
|
k8s.io/kubernetes v1.13.0
|
||||||
sigs.k8s.io/controller-runtime v0.6.0
|
sigs.k8s.io/controller-runtime v0.6.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
5
go.sum
5
go.sum
@@ -1381,6 +1381,7 @@ k8s.io/api v0.0.0-20191122220107-b5267f2975e0/go.mod h1:vYpRfxYkMrmPPSesoHEkGNHx
|
|||||||
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
|
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
|
||||||
k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
|
k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
|
||||||
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
|
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
|
||||||
|
k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw=
|
||||||
k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY=
|
k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY=
|
||||||
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
|
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
|
||||||
k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8=
|
k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8=
|
||||||
@@ -1396,7 +1397,9 @@ k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftc
|
|||||||
k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
|
k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
|
||||||
k8s.io/apiserver v0.0.0-20191122221311-9d521947b1e1/go.mod h1:RbsZY5zzBIWnz4KbctZsTVjwIuOpTp4Z8oCgFHN4kZQ=
|
k8s.io/apiserver v0.0.0-20191122221311-9d521947b1e1/go.mod h1:RbsZY5zzBIWnz4KbctZsTVjwIuOpTp4Z8oCgFHN4kZQ=
|
||||||
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
|
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
|
||||||
|
k8s.io/apiserver v0.18.2 h1:fwKxdTWwwYhxvtjo0UUfX+/fsitsNtfErPNegH2x9ic=
|
||||||
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
|
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
|
||||||
|
k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4=
|
||||||
k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA=
|
k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA=
|
||||||
k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ=
|
k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ=
|
||||||
k8s.io/cli-runtime v0.18.2/go.mod h1:yfFR2sQQzDsV0VEKGZtrJwEy4hLZ2oj4ZIfodgxAHWQ=
|
k8s.io/cli-runtime v0.18.2/go.mod h1:yfFR2sQQzDsV0VEKGZtrJwEy4hLZ2oj4ZIfodgxAHWQ=
|
||||||
@@ -1408,6 +1411,7 @@ k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRV
|
|||||||
k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA=
|
k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA=
|
||||||
k8s.io/component-base v0.0.0-20191122220729-2684fb322cb9/go.mod h1:NFuUusy/X4Tk21m21tcNUihnmp4OI7lXU7/xA+rYXkc=
|
k8s.io/component-base v0.0.0-20191122220729-2684fb322cb9/go.mod h1:NFuUusy/X4Tk21m21tcNUihnmp4OI7lXU7/xA+rYXkc=
|
||||||
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
|
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
|
||||||
|
k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y=
|
||||||
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
|
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
|
||||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
@@ -1432,6 +1436,7 @@ k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+
|
|||||||
k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU=
|
k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU=
|
||||||
k8s.io/kubectl v0.18.2 h1:9jnGSOC2DDVZmMUTMi0D1aed438mfQcgqa5TAzVjA1k=
|
k8s.io/kubectl v0.18.2 h1:9jnGSOC2DDVZmMUTMi0D1aed438mfQcgqa5TAzVjA1k=
|
||||||
k8s.io/kubectl v0.18.2/go.mod h1:OdgFa3AlsPKRpFFYE7ICTwulXOcMGXHTc+UKhHKvrb4=
|
k8s.io/kubectl v0.18.2/go.mod h1:OdgFa3AlsPKRpFFYE7ICTwulXOcMGXHTc+UKhHKvrb4=
|
||||||
|
k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=
|
||||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||||
k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4=
|
k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4=
|
||||||
k8s.io/metrics v0.18.2/go.mod h1:qga8E7QfYNR9Q89cSCAjinC9pTZ7yv1XSVGUB0vJypg=
|
k8s.io/metrics v0.18.2/go.mod h1:qga8E7QfYNR9Q89cSCAjinC9pTZ7yv1XSVGUB0vJypg=
|
||||||
|
|||||||
30
secret-injector/Dockerfile
Normal file
30
secret-injector/Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
# Build the manager binary
|
||||||
|
FROM golang:1.13 as builder
|
||||||
|
|
||||||
|
WORKDIR /workspace
|
||||||
|
# Copy the Go Modules manifests
|
||||||
|
COPY go.mod go.mod
|
||||||
|
COPY go.sum go.sum
|
||||||
|
|
||||||
|
# Copy the go source
|
||||||
|
COPY secret-injector/cmd/main.go secret-injector/main.go
|
||||||
|
COPY secret-injector/pkg/ secret-injector/pkg/
|
||||||
|
COPY vendor/ vendor/
|
||||||
|
# Build
|
||||||
|
ARG secret_injector_version=dev
|
||||||
|
RUN CGO_ENABLED=0 \
|
||||||
|
GO111MODULE=on \
|
||||||
|
go build \
|
||||||
|
-ldflags "-X \"github.com/1Password/onepassword-operator/operator/version.Version=$secret_injector_version\"" \
|
||||||
|
-mod vendor \
|
||||||
|
-a -o injector secret-injector/main.go
|
||||||
|
|
||||||
|
# Use distroless as minimal base image to package the secret-injector binary
|
||||||
|
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||||
|
FROM gcr.io/distroless/static:nonroot
|
||||||
|
WORKDIR /
|
||||||
|
COPY --from=builder /workspace/injector .
|
||||||
|
USER nonroot:nonroot
|
||||||
|
|
||||||
|
ENTRYPOINT ["/injector"]
|
||||||
|
|
||||||
98
secret-injector/Makefile
Normal file
98
secret-injector/Makefile
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
# Image URL to use all building/pushing image targets;
|
||||||
|
# Use your own docker registry and image name for dev/test by overridding the
|
||||||
|
# IMAGE_REPO, IMAGE_NAME and IMAGE_TAG environment variable.
|
||||||
|
IMAGE_REPO ?= docker.io/morvencao
|
||||||
|
IMAGE_NAME ?= op-secret-injector
|
||||||
|
|
||||||
|
# Github host to use for checking the source tree;
|
||||||
|
# Override this variable ue with your own value if you're working on forked repo.
|
||||||
|
GIT_HOST ?= github.com/morvencao
|
||||||
|
|
||||||
|
PWD := $(shell pwd)
|
||||||
|
BASE_DIR := $(shell basename $(PWD))
|
||||||
|
|
||||||
|
# Keep an existing GOPATH, make a private one if it is undefined
|
||||||
|
GOPATH_DEFAULT := $(PWD)/.go
|
||||||
|
export GOPATH ?= $(GOPATH_DEFAULT)
|
||||||
|
TESTARGS_DEFAULT := "-v"
|
||||||
|
export TESTARGS ?= $(TESTARGS_DEFAULT)
|
||||||
|
DEST := $(GOPATH)/src/$(GIT_HOST)/$(BASE_DIR)
|
||||||
|
IMAGE_TAG ?= $(shell date +v%Y%m%d)-$(shell git describe --match=$(git rev-parse --short=8 HEAD) --tags --always --dirty)
|
||||||
|
|
||||||
|
|
||||||
|
LOCAL_OS := $(shell uname)
|
||||||
|
ifeq ($(LOCAL_OS),Linux)
|
||||||
|
TARGET_OS ?= linux
|
||||||
|
XARGS_FLAGS="-r"
|
||||||
|
else ifeq ($(LOCAL_OS),Darwin)
|
||||||
|
TARGET_OS ?= darwin
|
||||||
|
XARGS_FLAGS=
|
||||||
|
else
|
||||||
|
$(error "This system's OS $(LOCAL_OS) isn't recognized/supported")
|
||||||
|
endif
|
||||||
|
|
||||||
|
all: fmt lint test build image
|
||||||
|
|
||||||
|
ifeq (,$(wildcard go.mod))
|
||||||
|
ifneq ("$(realpath $(DEST))", "$(realpath $(PWD))")
|
||||||
|
$(error Please run 'make' from $(DEST). Current directory is $(PWD))
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
############################################################
|
||||||
|
# format section
|
||||||
|
############################################################
|
||||||
|
|
||||||
|
fmt:
|
||||||
|
@echo "Run go fmt..."
|
||||||
|
|
||||||
|
############################################################
|
||||||
|
# lint section
|
||||||
|
############################################################
|
||||||
|
|
||||||
|
lint:
|
||||||
|
@echo "Runing the golangci-lint..."
|
||||||
|
|
||||||
|
############################################################
|
||||||
|
# test section
|
||||||
|
############################################################
|
||||||
|
|
||||||
|
test:
|
||||||
|
@echo "Running the tests for $(IMAGE_NAME)..."
|
||||||
|
@go test $(TESTARGS) ./...
|
||||||
|
|
||||||
|
############################################################
|
||||||
|
# build section
|
||||||
|
############################################################
|
||||||
|
|
||||||
|
build:
|
||||||
|
@echo "Building the $(IMAGE_NAME) binary..."
|
||||||
|
@CGO_ENABLED=0 go build -o build/_output/bin/$(IMAGE_NAME) ./cmd/
|
||||||
|
|
||||||
|
build-linux:
|
||||||
|
@echo "Building the $(IMAGE_NAME) binary for Docker (linux)..."
|
||||||
|
@GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o build/_output/linux/bin/$(IMAGE_NAME) ./cmd/
|
||||||
|
|
||||||
|
############################################################
|
||||||
|
# image section
|
||||||
|
############################################################
|
||||||
|
|
||||||
|
image: build-image push-image
|
||||||
|
|
||||||
|
build-image: build-linux
|
||||||
|
@echo "Building the docker image: $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG)..."
|
||||||
|
@docker build -t $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG) -f build/Dockerfile .
|
||||||
|
|
||||||
|
push-image: build-image
|
||||||
|
@echo "Pushing the docker image for $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG) and $(IMAGE_REPO)/$(IMAGE_NAME):latest..."
|
||||||
|
@docker tag $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG) $(IMAGE_REPO)/$(IMAGE_NAME):latest
|
||||||
|
@docker push $(IMAGE_REPO)/$(IMAGE_NAME):$(IMAGE_TAG)
|
||||||
|
@docker push $(IMAGE_REPO)/$(IMAGE_NAME):latest
|
||||||
|
|
||||||
|
############################################################
|
||||||
|
# clean section
|
||||||
|
############################################################
|
||||||
|
clean:
|
||||||
|
@rm -rf build/_output
|
||||||
|
|
||||||
|
.PHONY: all fmt lint check test build image clean
|
||||||
84
secret-injector/README.md
Normal file
84
secret-injector/README.md
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
## Deploy
|
||||||
|
|
||||||
|
1. Create namespace `op-secret-injector` in which the 1Password secret injector webhook is deployed:
|
||||||
|
|
||||||
|
```
|
||||||
|
# kubectl create ns op-secret-injector
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create a signed cert/key pair and store it in a Kubernetes `secret` that will be consumed by 1Password secret injector deployment:
|
||||||
|
|
||||||
|
```
|
||||||
|
# ./deploy/webhook-create-signed-cert.sh \
|
||||||
|
--service op-secret-injector-webhook-svc \
|
||||||
|
--secret op-secret-injector-webhook-certs \
|
||||||
|
--namespace op-secret-injector
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Patch the `MutatingWebhookConfiguration` by set `caBundle` with correct value from Kubernetes cluster:
|
||||||
|
|
||||||
|
```
|
||||||
|
# cat deploy/mutatingwebhook.yaml | \
|
||||||
|
deploy/webhook-patch-ca-bundle.sh > \
|
||||||
|
deploy/mutatingwebhook-ca-bundle.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Deploy resources:
|
||||||
|
|
||||||
|
```
|
||||||
|
# kubectl create -f deploy/deployment.yaml
|
||||||
|
# kubectl create -f deploy/service.yaml
|
||||||
|
# kubectl create -f deploy/mutatingwebhook-ca-bundle.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Verify
|
||||||
|
|
||||||
|
1. The sidecar inject webhook should be in running state:
|
||||||
|
|
||||||
|
```
|
||||||
|
# kubectl -n sidecar-injector get pod
|
||||||
|
NAME READY STATUS RESTARTS AGE
|
||||||
|
sidecar-injector-webhook-deployment-7c8bc5f4c9-28c84 1/1 Running 0 30s
|
||||||
|
# kubectl -n sidecar-injector get deploy
|
||||||
|
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||||
|
sidecar-injector-webhook-deployment 1/1 1 1 67s
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create new namespace `injection` and label it with `sidecar-injector=enabled`:
|
||||||
|
|
||||||
|
```
|
||||||
|
# kubectl create ns injection
|
||||||
|
# kubectl label namespace injection sidecar-injection=enabled
|
||||||
|
# kubectl get namespace -L sidecar-injection
|
||||||
|
NAME STATUS AGE SIDECAR-INJECTION
|
||||||
|
default Active 26m
|
||||||
|
injection Active 13s enabled
|
||||||
|
kube-public Active 26m
|
||||||
|
kube-system Active 26m
|
||||||
|
sidecar-injector Active 17m
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Deploy an app in Kubernetes cluster, take `alpine` app as an example
|
||||||
|
|
||||||
|
```
|
||||||
|
# kubectl run alpine --image=alpine --restart=Never -n injection --overrides='{"apiVersion":"v1","metadata":{"annotations":{"sidecar-injector-webhook.morven.me/inject":"yes"}}}' --command -- sleep infinity
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Verify sidecar container is injected:
|
||||||
|
|
||||||
|
```
|
||||||
|
# kubectl get pod
|
||||||
|
NAME READY STATUS RESTARTS AGE
|
||||||
|
alpine 2/2 Running 0 1m
|
||||||
|
# kubectl -n injection get pod alpine -o jsonpath="{.spec.containers[*].name}"
|
||||||
|
alpine sidecar-nginx
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
Sometimes you may find that pod is injected with sidecar container as expected, check the following items:
|
||||||
|
|
||||||
|
1. The sidecar-injector webhook is in running state and no error logs.
|
||||||
|
2. The namespace in which application pod is deployed has the correct labels as configured in `mutatingwebhookconfiguration`.
|
||||||
|
3. Check the `caBundle` is patched to `mutatingwebhookconfiguration` object by checking if `caBundle` fields is empty.
|
||||||
|
4. Check if the application pod has annotation `sidecar-injector-webhook.morven.me/inject":"yes"`.
|
||||||
52
secret-injector/app_example.yaml
Normal file
52
secret-injector/app_example.yaml
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: app-example
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
selector:
|
||||||
|
app: app-example
|
||||||
|
ports:
|
||||||
|
- port: 5000
|
||||||
|
name: app-example
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: app-example
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: app-example
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
operator.1password.io/inject: "app-example"
|
||||||
|
labels:
|
||||||
|
app: app-example
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: app-example
|
||||||
|
command: ["./example"]
|
||||||
|
image: connect-app-example:latest
|
||||||
|
imagePullPolicy: Never
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "0.2"
|
||||||
|
ports:
|
||||||
|
- containerPort: 5000
|
||||||
|
env:
|
||||||
|
- name: OP_VAULT
|
||||||
|
value: ApplicationConfiguration
|
||||||
|
- name: APP_TITLE
|
||||||
|
value: op://ApplicationConfiguration/Webapp/title
|
||||||
|
- name: BUTTON_TEXT
|
||||||
|
value: op://ApplicationConfiguration/Webapp/action
|
||||||
|
- name: OP_CONNECT_HOST
|
||||||
|
value: http://onepassword-connect:8080/
|
||||||
|
- name: OP_CONNECT_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: onepassword-token
|
||||||
|
key: token
|
||||||
85
secret-injector/cmd/main.go
Normal file
85
secret-injector/cmd/main.go
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/1Password/onepassword-operator/secret-injector/pkg/webhook"
|
||||||
|
"github.com/golang/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
connectTokenSecretKeyEnv = "OP_CONNECT_TOKEN_KEY"
|
||||||
|
connectTokenSecretNameEnv = "OP_CONNECT_TOKEN_NAME"
|
||||||
|
connectHostEnv = "OP_CONNECT_HOST"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var parameters webhook.WebhookServerParameters
|
||||||
|
|
||||||
|
glog.Info("Starting webhook")
|
||||||
|
// get command line parameters
|
||||||
|
flag.IntVar(¶meters.Port, "port", 8443, "Webhook server port.")
|
||||||
|
flag.StringVar(¶meters.CertFile, "tlsCertFile", "/etc/webhook/certs/cert.pem", "File containing the x509 Certificate for HTTPS.")
|
||||||
|
flag.StringVar(¶meters.KeyFile, "tlsKeyFile", "/etc/webhook/certs/key.pem", "File containing the x509 private key to --tlsCertFile.")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
pair, err := tls.LoadX509KeyPair(parameters.CertFile, parameters.KeyFile)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Failed to load key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
connectHost, present := os.LookupEnv(connectHostEnv)
|
||||||
|
if !present {
|
||||||
|
glog.Error("")
|
||||||
|
}
|
||||||
|
|
||||||
|
connectTokenName, present := os.LookupEnv(connectTokenSecretNameEnv)
|
||||||
|
if !present {
|
||||||
|
glog.Error("")
|
||||||
|
}
|
||||||
|
|
||||||
|
connectTokenKey, present := os.LookupEnv(connectTokenSecretKeyEnv)
|
||||||
|
if !present {
|
||||||
|
glog.Error("")
|
||||||
|
}
|
||||||
|
|
||||||
|
webhookConfig := webhook.Config{
|
||||||
|
ConnectHost: connectHost,
|
||||||
|
ConnectTokenName: connectTokenName,
|
||||||
|
ConnectTokenKey: connectTokenKey,
|
||||||
|
}
|
||||||
|
webhookServer := &webhook.WebhookServer{
|
||||||
|
Config: webhookConfig,
|
||||||
|
Server: &http.Server{
|
||||||
|
Addr: fmt.Sprintf(":%v", parameters.Port),
|
||||||
|
TLSConfig: &tls.Config{Certificates: []tls.Certificate{pair}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// define http server and server handler
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.HandleFunc("/inject", webhookServer.Serve)
|
||||||
|
webhookServer.Server.Handler = mux
|
||||||
|
|
||||||
|
// start webhook server in new rountine
|
||||||
|
go func() {
|
||||||
|
if err := webhookServer.Server.ListenAndServeTLS("", ""); err != nil {
|
||||||
|
glog.Errorf("Failed to listen and serve webhook server: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// listening OS shutdown singal
|
||||||
|
signalChan := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
<-signalChan
|
||||||
|
|
||||||
|
glog.Infof("Got OS shutdown signal, shutting down webhook server gracefully...")
|
||||||
|
webhookServer.Server.Shutdown(context.Background())
|
||||||
|
}
|
||||||
42
secret-injector/deploy/deployment.yaml
Normal file
42
secret-injector/deploy/deployment.yaml
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: op-secret-injector-webhook-deployment
|
||||||
|
namespace: op-secret-injector
|
||||||
|
labels:
|
||||||
|
app: op-secret-injector
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: op-secret-injector
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: op-secret-injector
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: op-secret-injector
|
||||||
|
image: local/onepassword-secrets-injector:v1.1.0
|
||||||
|
imagePullPolicy: Never
|
||||||
|
args:
|
||||||
|
- -tlsCertFile=/etc/webhook/certs/cert.pem
|
||||||
|
- -tlsKeyFile=/etc/webhook/certs/key.pem
|
||||||
|
- -alsologtostderr
|
||||||
|
- -v=4
|
||||||
|
- 2>&1
|
||||||
|
env:
|
||||||
|
- name: OP_CONNECT_HOST
|
||||||
|
value: http://onepassword-connect:8080/
|
||||||
|
- name: OP_CONNECT_TOKEN_NAME
|
||||||
|
value: onepassword-token
|
||||||
|
- name: OP_CONNECT_TOKEN_KEY
|
||||||
|
value: token
|
||||||
|
volumeMounts:
|
||||||
|
- name: webhook-certs
|
||||||
|
mountPath: /etc/webhook/certs
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: webhook-certs
|
||||||
|
secret:
|
||||||
|
secretName: op-secret-injector-webhook-certs
|
||||||
22
secret-injector/deploy/mutatingwebhook-ca-bundle.yaml
Normal file
22
secret-injector/deploy/mutatingwebhook-ca-bundle.yaml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||||
|
kind: MutatingWebhookConfiguration
|
||||||
|
metadata:
|
||||||
|
name: op-secret-injector-webhook-cfg
|
||||||
|
labels:
|
||||||
|
app: op-secret-injector
|
||||||
|
webhooks:
|
||||||
|
- name: op-secret-injector.morven.me
|
||||||
|
clientConfig:
|
||||||
|
service:
|
||||||
|
name: op-secret-injector-webhook-svc
|
||||||
|
namespace: op-secret-injector
|
||||||
|
path: "/inject"
|
||||||
|
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJd01Ea3lOekl3TkRjMU9Wb1hEVE13TURreU5qSXdORGMxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTmlICjZzZVJsZG9CWlRpRVJMeVhwbXFCU3ZOcmJyMWFMcVhpZVVWcXdCcytOUUora1hsazBIRWFldnJRU2QvNnVqY2UKSHpuNFR6Smh3Qk9pYU5BSDN6QUZkeXZxRGwwZVFzNm50R2pDbVFFK0xrUU5PQlVXYmk3WEc2am1tdDA5aFFUVwpTOXg2UDdpai9lUUtLRUJFQTFlRWYvTFZibDZPMVBqa0lXV2E0SjFRMEZoQUtnSjdxUmVJaEg1dkRoVHF3TXVzClZLTEF2bU9xRk03aDNmZ1UzWVltZldpMUFoVnF0VklMYmhkOS8xbzFYM2ZESitFK0dESGMyb0NKK1QvQkxJTmsKOWhTWEhWOTdONFhib1BUWktzZXJFa3JQTnlFYkY4alpvWndBc3FuRVhBNW5sem5vTlJnTFNqSEE0NFZXOGZyawo1RWtJdFNPdkFMMHM1K0FDMnFzQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTSG5DcFRMSDRQeDRkai9oWTVNWEx6TndNeWhqQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFvamR6ZzlySgpZTjlJSTJjaUNRS0djZEZGbWtHcGxiandRczBVMVdKY0plZWs3WDh3WWdPMnI3UFhLRklDTEM3aGM5bkUxYnluCkxha2YwMzhUNzRBQzlQOHZXUUJSb2lFMlBKV1BGMjhGTFJWeWgwTWdYQ3dZU20zeitIRDR5TjViWFpNSmJ4WlYKamRza0IzeVpHSW9jZ2RBSk1rU0ptQTN6RkowaHpsY09EZTNOTVA0Ujl4Z0VWczU4bHV5bjl6bm5sL2FDODlHdwpuVnVPRkk0S0dwOFF5NXFjQUxKZndiRGNrNzBjbnRQUEhBN2trT1JtUG41Z2hNSFJPZGxsamxmdXYxVE5RcVo2CjhjUENRRW1zc1ZyajFrVEh6Y3FOUXpqOWVMK2VPMGtyRWw2dzZMcm5YY0dleUxIZVc1cHF6YUY2bWZrTitEZEQKSENjV0U2V1pvTUp2UFE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||||
|
rules:
|
||||||
|
- operations: ["CREATE", "UPDATE"]
|
||||||
|
apiGroups: [""]
|
||||||
|
apiVersions: ["v1"]
|
||||||
|
resources: ["pods"]
|
||||||
|
namespaceSelector:
|
||||||
|
matchLabels:
|
||||||
|
op-secret-injection: enabled
|
||||||
22
secret-injector/deploy/mutatingwebhook.yaml
Normal file
22
secret-injector/deploy/mutatingwebhook.yaml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||||
|
kind: MutatingWebhookConfiguration
|
||||||
|
metadata:
|
||||||
|
name: op-secret-injector-webhook-cfg
|
||||||
|
labels:
|
||||||
|
app: op-secret-injector
|
||||||
|
webhooks:
|
||||||
|
- name: op-secret-injector.morven.me
|
||||||
|
clientConfig:
|
||||||
|
service:
|
||||||
|
name: op-secret-injector-webhook-svc
|
||||||
|
namespace: op-secret-injector
|
||||||
|
path: "/inject"
|
||||||
|
caBundle: ${CA_BUNDLE}
|
||||||
|
rules:
|
||||||
|
- operations: ["CREATE", "UPDATE"]
|
||||||
|
apiGroups: [""]
|
||||||
|
apiVersions: ["v1"]
|
||||||
|
resources: ["pods"]
|
||||||
|
namespaceSelector:
|
||||||
|
matchLabels:
|
||||||
|
op-secret-injection: enabled
|
||||||
13
secret-injector/deploy/service.yaml
Normal file
13
secret-injector/deploy/service.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: op-secret-injector-webhook-svc
|
||||||
|
namespace: op-secret-injector
|
||||||
|
labels:
|
||||||
|
app: op-secret-injector
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 443
|
||||||
|
targetPort: 8443
|
||||||
|
selector:
|
||||||
|
app: op-secret-injector
|
||||||
131
secret-injector/deploy/webhook-create-signed-cert.sh
Executable file
131
secret-injector/deploy/webhook-create-signed-cert.sh
Executable file
@@ -0,0 +1,131 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<EOF
|
||||||
|
Generate certificate suitable for use with an op-secret-injector webhook service.
|
||||||
|
|
||||||
|
This script uses k8s' CertificateSigningRequest API to a generate a
|
||||||
|
certificate signed by k8s CA suitable for use with op-secret-injector webhook
|
||||||
|
services. This requires permissions to create and approve CSR. See
|
||||||
|
https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster for
|
||||||
|
detailed explanation and additional instructions.
|
||||||
|
|
||||||
|
The server key/cert k8s CA cert are stored in a k8s secret.
|
||||||
|
|
||||||
|
usage: ${0} [OPTIONS]
|
||||||
|
|
||||||
|
The following flags are required.
|
||||||
|
|
||||||
|
--service Service name of webhook.
|
||||||
|
--namespace Namespace where webhook service and secret reside.
|
||||||
|
--secret Secret name for CA certificate and server certificate/key pair.
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case ${1} in
|
||||||
|
--service)
|
||||||
|
service="$2"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--secret)
|
||||||
|
secret="$2"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--namespace)
|
||||||
|
namespace="$2"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
|
||||||
|
[ -z "${service}" ] && service=op-secret-injector-webhook-svc
|
||||||
|
[ -z "${secret}" ] && secret=op-secret-injector-webhook-certs
|
||||||
|
[ -z "${namespace}" ] && namespace=default
|
||||||
|
|
||||||
|
if [ ! -x "$(command -v openssl)" ]; then
|
||||||
|
echo "openssl not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
csrName=${service}.${namespace}
|
||||||
|
tmpdir=$(mktemp -d)
|
||||||
|
echo "creating certs in tmpdir ${tmpdir} "
|
||||||
|
|
||||||
|
cat <<EOF >> "${tmpdir}"/csr.conf
|
||||||
|
[req]
|
||||||
|
req_extensions = v3_req
|
||||||
|
distinguished_name = req_distinguished_name
|
||||||
|
[req_distinguished_name]
|
||||||
|
[ v3_req ]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = serverAuth
|
||||||
|
subjectAltName = @alt_names
|
||||||
|
[alt_names]
|
||||||
|
DNS.1 = ${service}
|
||||||
|
DNS.2 = ${service}.${namespace}
|
||||||
|
DNS.3 = ${service}.${namespace}.svc
|
||||||
|
EOF
|
||||||
|
|
||||||
|
openssl genrsa -out "${tmpdir}"/server-key.pem 2048
|
||||||
|
openssl req -new -key "${tmpdir}"/server-key.pem -subj "/CN=${service}.${namespace}.svc" -out "${tmpdir}"/server.csr -config "${tmpdir}"/csr.conf
|
||||||
|
|
||||||
|
# clean-up any previously created CSR for our service. Ignore errors if not present.
|
||||||
|
kubectl delete csr ${csrName} 2>/dev/null || true
|
||||||
|
|
||||||
|
# create server cert/key CSR and send to k8s API
|
||||||
|
cat <<EOF | kubectl create -f -
|
||||||
|
apiVersion: certificates.k8s.io/v1beta1
|
||||||
|
kind: CertificateSigningRequest
|
||||||
|
metadata:
|
||||||
|
name: ${csrName}
|
||||||
|
spec:
|
||||||
|
groups:
|
||||||
|
- system:authenticated
|
||||||
|
request: $(< "${tmpdir}"/server.csr base64 | tr -d '\n')
|
||||||
|
usages:
|
||||||
|
- digital signature
|
||||||
|
- key encipherment
|
||||||
|
- server auth
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# verify CSR has been created
|
||||||
|
while true; do
|
||||||
|
if kubectl get csr ${csrName}; then
|
||||||
|
break
|
||||||
|
else
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# approve and fetch the signed certificate
|
||||||
|
kubectl certificate approve ${csrName}
|
||||||
|
# verify certificate has been signed
|
||||||
|
for _ in $(seq 10); do
|
||||||
|
serverCert=$(kubectl get csr ${csrName} -o jsonpath='{.status.certificate}')
|
||||||
|
if [[ ${serverCert} != '' ]]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
if [[ ${serverCert} == '' ]]; then
|
||||||
|
echo "ERROR: After approving csr ${csrName}, the signed certificate did not appear on the resource. Giving up after 10 attempts." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "${serverCert}" | openssl base64 -d -A -out "${tmpdir}"/server-cert.pem
|
||||||
|
|
||||||
|
|
||||||
|
# create the secret with CA cert and server cert/key
|
||||||
|
kubectl create secret generic ${secret} \
|
||||||
|
--from-file=key.pem="${tmpdir}"/server-key.pem \
|
||||||
|
--from-file=cert.pem="${tmpdir}"/server-cert.pem \
|
||||||
|
--dry-run -o yaml |
|
||||||
|
kubectl -n ${namespace} apply -f -
|
||||||
19
secret-injector/deploy/webhook-patch-ca-bundle.sh
Executable file
19
secret-injector/deploy/webhook-patch-ca-bundle.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
CA_BUNDLE=$(kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}')
|
||||||
|
|
||||||
|
if [ -z "${CA_BUNDLE}" ]; then
|
||||||
|
CA_BUNDLE=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.ca\.crt}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
export CA_BUNDLE
|
||||||
|
|
||||||
|
if command -v envsubst >/dev/null 2>&1; then
|
||||||
|
envsubst
|
||||||
|
else
|
||||||
|
sed -e "s|\${CA_BUNDLE}|${CA_BUNDLE}|g"
|
||||||
|
fi
|
||||||
214
secret-injector/golangci.yml
Normal file
214
secret-injector/golangci.yml
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
service:
|
||||||
|
# When updating this, also update the version stored in docker/build-tools/Dockerfile in the multicloudlab/tools repo.
|
||||||
|
golangci-lint-version: 1.18.x # use the fixed version to not introduce new linters unexpectedly
|
||||||
|
run:
|
||||||
|
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||||
|
deadline: 20m
|
||||||
|
|
||||||
|
# which dirs to skip: they won't be analyzed;
|
||||||
|
# can use regexp here: generated.*, regexp is applied on full path;
|
||||||
|
# default value is empty list, but next dirs are always skipped independently
|
||||||
|
# from this option's value:
|
||||||
|
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||||
|
skip-dirs:
|
||||||
|
- genfiles$
|
||||||
|
- vendor$
|
||||||
|
|
||||||
|
# which files to skip: they will be analyzed, but issues from them
|
||||||
|
# won't be reported. Default value is empty list, but there is
|
||||||
|
# no need to include all autogenerated files, we confidently recognize
|
||||||
|
# autogenerated files. If it's not please let us know.
|
||||||
|
skip-files:
|
||||||
|
- ".*\\.pb\\.go"
|
||||||
|
- ".*\\.gen\\.go"
|
||||||
|
|
||||||
|
linters:
|
||||||
|
# please, do not use `enable-all`: it's deprecated and will be removed soon.
|
||||||
|
# inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint
|
||||||
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- deadcode
|
||||||
|
- errcheck
|
||||||
|
- gocyclo
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
- golint
|
||||||
|
- gosec
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
|
- interfacer
|
||||||
|
- lll
|
||||||
|
- misspell
|
||||||
|
- staticcheck
|
||||||
|
- structcheck
|
||||||
|
- typecheck
|
||||||
|
- unconvert
|
||||||
|
- unparam
|
||||||
|
- unused
|
||||||
|
- varcheck
|
||||||
|
# don't enable:
|
||||||
|
# - gocritic
|
||||||
|
# - bodyclose
|
||||||
|
# - depguard
|
||||||
|
# - dogsled
|
||||||
|
# - dupl
|
||||||
|
# - funlen
|
||||||
|
# - gochecknoglobals
|
||||||
|
# - gochecknoinits
|
||||||
|
# - gocognit
|
||||||
|
# - godox
|
||||||
|
# - maligned
|
||||||
|
# - nakedret
|
||||||
|
# - prealloc
|
||||||
|
# - scopelint
|
||||||
|
# - whitespace
|
||||||
|
# - stylecheck
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
errcheck:
|
||||||
|
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
||||||
|
# default is false: such cases aren't reported by default.
|
||||||
|
check-type-assertions: false
|
||||||
|
|
||||||
|
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||||
|
# default is false: such cases aren't reported by default.
|
||||||
|
check-blank: false
|
||||||
|
govet:
|
||||||
|
# report about shadowed variables
|
||||||
|
check-shadowing: false
|
||||||
|
golint:
|
||||||
|
# minimal confidence for issues, default is 0.8
|
||||||
|
min-confidence: 0.0
|
||||||
|
gofmt:
|
||||||
|
# simplify code: gofmt with `-s` option, true by default
|
||||||
|
simplify: true
|
||||||
|
goimports:
|
||||||
|
# put imports beginning with prefix after 3rd-party packages;
|
||||||
|
# it's a comma-separated list of prefixes
|
||||||
|
local-prefixes: github.com/IBM/
|
||||||
|
maligned:
|
||||||
|
# print struct with more effective memory layout or not, false by default
|
||||||
|
suggest-new: true
|
||||||
|
misspell:
|
||||||
|
# Correct spellings using locale preferences for US or UK.
|
||||||
|
# Default is to use a neutral variety of English.
|
||||||
|
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||||
|
locale: US
|
||||||
|
ignore-words:
|
||||||
|
- cancelled
|
||||||
|
lll:
|
||||||
|
# max line length, lines longer will be reported. Default is 120.
|
||||||
|
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||||
|
line-length: 300
|
||||||
|
# tab width in spaces. Default to 1.
|
||||||
|
tab-width: 1
|
||||||
|
unused:
|
||||||
|
# treat code as a program (not a library) and report unused exported identifiers; default is false.
|
||||||
|
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
|
||||||
|
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
|
||||||
|
# with golangci-lint call it on a directory with the changed file.
|
||||||
|
check-exported: false
|
||||||
|
unparam:
|
||||||
|
# call graph construction algorithm (cha, rta). In general, use cha for libraries,
|
||||||
|
# and rta for programs with main packages. Default is cha.
|
||||||
|
algo: cha
|
||||||
|
|
||||||
|
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
||||||
|
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
||||||
|
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||||
|
# with golangci-lint call it on a directory with the changed file.
|
||||||
|
check-exported: false
|
||||||
|
gocritic:
|
||||||
|
enabled-checks:
|
||||||
|
- appendCombine
|
||||||
|
- argOrder
|
||||||
|
- assignOp
|
||||||
|
- badCond
|
||||||
|
- boolExprSimplify
|
||||||
|
- builtinShadow
|
||||||
|
- captLocal
|
||||||
|
- caseOrder
|
||||||
|
- codegenComment
|
||||||
|
- commentedOutCode
|
||||||
|
- commentedOutImport
|
||||||
|
- defaultCaseOrder
|
||||||
|
- deprecatedComment
|
||||||
|
- docStub
|
||||||
|
- dupArg
|
||||||
|
- dupBranchBody
|
||||||
|
- dupCase
|
||||||
|
- dupSubExpr
|
||||||
|
- elseif
|
||||||
|
- emptyFallthrough
|
||||||
|
- equalFold
|
||||||
|
- flagDeref
|
||||||
|
- flagName
|
||||||
|
- hexLiteral
|
||||||
|
- indexAlloc
|
||||||
|
- initClause
|
||||||
|
- methodExprCall
|
||||||
|
- nilValReturn
|
||||||
|
- octalLiteral
|
||||||
|
- offBy1
|
||||||
|
- rangeExprCopy
|
||||||
|
- regexpMust
|
||||||
|
- sloppyLen
|
||||||
|
- stringXbytes
|
||||||
|
- switchTrue
|
||||||
|
- typeAssertChain
|
||||||
|
- typeSwitchVar
|
||||||
|
- typeUnparen
|
||||||
|
- underef
|
||||||
|
- unlambda
|
||||||
|
- unnecessaryBlock
|
||||||
|
- unslice
|
||||||
|
- valSwap
|
||||||
|
- weakCond
|
||||||
|
|
||||||
|
# Unused
|
||||||
|
# - yodaStyleExpr
|
||||||
|
# - appendAssign
|
||||||
|
# - commentFormatting
|
||||||
|
# - emptyStringTest
|
||||||
|
# - exitAfterDefer
|
||||||
|
# - ifElseChain
|
||||||
|
# - hugeParam
|
||||||
|
# - importShadow
|
||||||
|
# - nestingReduce
|
||||||
|
# - paramTypeCombine
|
||||||
|
# - ptrToRefParam
|
||||||
|
# - rangeValCopy
|
||||||
|
# - singleCaseSwitch
|
||||||
|
# - sloppyReassign
|
||||||
|
# - unlabelStmt
|
||||||
|
# - unnamedResult
|
||||||
|
# - wrapperFunc
|
||||||
|
|
||||||
|
issues:
|
||||||
|
# List of regexps of issue texts to exclude, empty list by default.
|
||||||
|
# But independently from this option we use default exclude patterns,
|
||||||
|
# it can be disabled by `exclude-use-default: false`. To list all
|
||||||
|
# excluded by default patterns execute `golangci-lint run --help`
|
||||||
|
exclude:
|
||||||
|
- composite literal uses unkeyed fields
|
||||||
|
|
||||||
|
exclude-rules:
|
||||||
|
# Exclude some linters from running on test files.
|
||||||
|
- path: _test\.go$|^tests/|^samples/
|
||||||
|
linters:
|
||||||
|
- errcheck
|
||||||
|
- maligned
|
||||||
|
|
||||||
|
# Independently from option `exclude` we use default exclude patterns,
|
||||||
|
# it can be disabled by this option. To list all
|
||||||
|
# excluded by default patterns execute `golangci-lint run --help`.
|
||||||
|
# Default value for this option is true.
|
||||||
|
exclude-use-default: true
|
||||||
|
|
||||||
|
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||||
|
max-per-linter: 0
|
||||||
|
|
||||||
|
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||||
|
max-same-issues: 0
|
||||||
|
|
||||||
469
secret-injector/pkg/webhook/webhook.go
Normal file
469
secret-injector/pkg/webhook/webhook.go
Normal file
@@ -0,0 +1,469 @@
|
|||||||
|
package webhook
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/api/admission/v1beta1"
|
||||||
|
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
|
||||||
|
v1 "k8s.io/api/apps/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// binVolumeName is the name of the volume where the OP CLI binary is stored.
|
||||||
|
binVolumeName = "op-bin"
|
||||||
|
|
||||||
|
// binVolumeMountPath is the mount path where the OP CLI binary can be found.
|
||||||
|
binVolumeMountPath = "/op/bin/"
|
||||||
|
)
|
||||||
|
|
||||||
|
// binVolume is the shared, in-memory volume where the OP CLI binary lives.
|
||||||
|
var binVolume = corev1.Volume{
|
||||||
|
Name: binVolumeName,
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: &corev1.EmptyDirVolumeSource{
|
||||||
|
Medium: corev1.StorageMediumMemory,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// binVolumeMount is the shared volume mount where the OP CLI binary lives.
|
||||||
|
var binVolumeMount = corev1.VolumeMount{
|
||||||
|
Name: binVolumeName,
|
||||||
|
MountPath: binVolumeMountPath,
|
||||||
|
ReadOnly: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
runtimeScheme = runtime.NewScheme()
|
||||||
|
codecs = serializer.NewCodecFactory(runtimeScheme)
|
||||||
|
deserializer = codecs.UniversalDeserializer()
|
||||||
|
|
||||||
|
// (https://github.com/kubernetes/kubernetes/issues/57982)
|
||||||
|
defaulter = runtime.ObjectDefaulter(runtimeScheme)
|
||||||
|
)
|
||||||
|
|
||||||
|
var ignoredNamespaces = []string{
|
||||||
|
metav1.NamespaceSystem,
|
||||||
|
metav1.NamespacePublic,
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
injectionStatus = "operator.1password.io/status"
|
||||||
|
injectAnnotation = "operator.1password.io/inject"
|
||||||
|
versionAnnotation = "operator.1password.io/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WebhookServer struct {
|
||||||
|
Config Config
|
||||||
|
Server *http.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
// Webhook Server parameters
|
||||||
|
type WebhookServerParameters struct {
|
||||||
|
Port int // webhook server port
|
||||||
|
CertFile string // path to the x509 certificate for https
|
||||||
|
KeyFile string // path to the x509 private key matching `CertFile`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
ConnectHost string
|
||||||
|
ConnectTokenName string
|
||||||
|
ConnectTokenKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
type patchOperation struct {
|
||||||
|
Op string `json:"op"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Value interface{} `json:"value,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
_ = corev1.AddToScheme(runtimeScheme)
|
||||||
|
_ = admissionregistrationv1beta1.AddToScheme(runtimeScheme)
|
||||||
|
_ = v1.AddToScheme(runtimeScheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyDefaultsWorkaround(containers []corev1.Container, volumes []corev1.Volume) {
|
||||||
|
defaulter.Default(&corev1.Pod{
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: containers,
|
||||||
|
Volumes: volumes,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether the target resoured need to be mutated
|
||||||
|
func mutationRequired(ignoredList []string, metadata *metav1.ObjectMeta) bool {
|
||||||
|
// skip special kubernete system namespaces
|
||||||
|
for _, namespace := range ignoredList {
|
||||||
|
if metadata.Namespace == namespace {
|
||||||
|
glog.Infof("Skip mutation for %v for it's in special namespace:%v", metadata.Name, metadata.Namespace)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
annotations := metadata.GetAnnotations()
|
||||||
|
if annotations == nil {
|
||||||
|
annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
status := annotations[injectionStatus]
|
||||||
|
_, enabled := annotations[injectAnnotation]
|
||||||
|
|
||||||
|
// determine whether to perform mutation based on annotation for the target resource
|
||||||
|
required := false
|
||||||
|
if strings.ToLower(status) != "injected" && enabled {
|
||||||
|
required = true
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof("Mutation policy for %v/%v: status: %q required:%v", metadata.Namespace, metadata.Name, status, required)
|
||||||
|
return required
|
||||||
|
}
|
||||||
|
|
||||||
|
func addContainers(target, added []corev1.Container, basePath string) (patch []patchOperation) {
|
||||||
|
first := len(target) == 0
|
||||||
|
var value interface{}
|
||||||
|
for _, add := range added {
|
||||||
|
value = add
|
||||||
|
path := basePath
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
value = []corev1.Container{add}
|
||||||
|
} else {
|
||||||
|
path = path + "/-"
|
||||||
|
}
|
||||||
|
patch = append(patch, patchOperation{
|
||||||
|
Op: "add",
|
||||||
|
Path: path,
|
||||||
|
Value: value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return patch
|
||||||
|
}
|
||||||
|
|
||||||
|
func addVolume(target, added []corev1.Volume, basePath string) (patch []patchOperation) {
|
||||||
|
first := len(target) == 0
|
||||||
|
var value interface{}
|
||||||
|
for _, add := range added {
|
||||||
|
value = add
|
||||||
|
path := basePath
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
value = []corev1.Volume{add}
|
||||||
|
} else {
|
||||||
|
path = path + "/-"
|
||||||
|
}
|
||||||
|
patch = append(patch, patchOperation{
|
||||||
|
Op: "add",
|
||||||
|
Path: path,
|
||||||
|
Value: value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return patch
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateAnnotation(target map[string]string, added map[string]string) (patch []patchOperation) {
|
||||||
|
for key, value := range added {
|
||||||
|
if target == nil || target[key] == "" {
|
||||||
|
target = map[string]string{}
|
||||||
|
patch = append(patch, patchOperation{
|
||||||
|
Op: "add",
|
||||||
|
Path: "/metadata/annotations",
|
||||||
|
Value: map[string]string{
|
||||||
|
key: value,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
patch = append(patch, patchOperation{
|
||||||
|
Op: "replace",
|
||||||
|
Path: "/metadata/annotations/" + key,
|
||||||
|
Value: value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return patch
|
||||||
|
}
|
||||||
|
|
||||||
|
// main mutation process
|
||||||
|
func (whsvr *WebhookServer) mutate(ar *v1beta1.AdmissionReview) *v1beta1.AdmissionResponse {
|
||||||
|
ctx := context.Background()
|
||||||
|
req := ar.Request
|
||||||
|
var pod corev1.Pod
|
||||||
|
if err := json.Unmarshal(req.Object.Raw, &pod); err != nil {
|
||||||
|
glog.Errorf("Could not unmarshal raw object: %v", err)
|
||||||
|
return &v1beta1.AdmissionResponse{
|
||||||
|
Result: &metav1.Status{
|
||||||
|
Message: err.Error(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof("AdmissionReview for Kind=%v, Namespace=%v Name=%v (%v) UID=%v patchOperation=%v UserInfo=%v",
|
||||||
|
req.Kind, req.Namespace, req.Name, pod.Name, req.UID, req.Operation, req.UserInfo)
|
||||||
|
|
||||||
|
// determine whether to perform mutation
|
||||||
|
if !mutationRequired(ignoredNamespaces, &pod.ObjectMeta) {
|
||||||
|
glog.Infof("Skipping mutation for %s/%s due to policy check", pod.Namespace, pod.Name)
|
||||||
|
return &v1beta1.AdmissionResponse{
|
||||||
|
Allowed: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
containersStr := pod.Annotations[injectAnnotation]
|
||||||
|
|
||||||
|
containers := map[string]struct{}{}
|
||||||
|
|
||||||
|
for _, container := range strings.Split(containersStr, ",") {
|
||||||
|
containers[container] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
version, ok := pod.Annotations[versionAnnotation]
|
||||||
|
if !ok {
|
||||||
|
version = "latest"
|
||||||
|
}
|
||||||
|
|
||||||
|
mutated := false
|
||||||
|
|
||||||
|
var patch []patchOperation
|
||||||
|
for i, c := range pod.Spec.InitContainers {
|
||||||
|
_, mutate := containers[c.Name]
|
||||||
|
if !mutate {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c, didMutate, initContainerPatch, err := whsvr.mutateContainer(ctx, &c, i)
|
||||||
|
if err != nil {
|
||||||
|
return &v1beta1.AdmissionResponse{
|
||||||
|
Result: &metav1.Status{
|
||||||
|
Message: err.Error(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if didMutate {
|
||||||
|
mutated = true
|
||||||
|
pod.Spec.InitContainers[i] = *c
|
||||||
|
}
|
||||||
|
patch = append(patch, initContainerPatch...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, c := range pod.Spec.Containers {
|
||||||
|
_, mutate := containers[c.Name]
|
||||||
|
if !mutate {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
c, didMutate, containerPatch, err := whsvr.mutateContainer(ctx, &c, i)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Error occured mutating container: ", err)
|
||||||
|
return &v1beta1.AdmissionResponse{
|
||||||
|
Result: &metav1.Status{
|
||||||
|
Message: err.Error(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
patch = append(patch, containerPatch...)
|
||||||
|
if didMutate {
|
||||||
|
mutated = true
|
||||||
|
pod.Spec.Containers[i] = *c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// binInitContainer is the container that pulls the OP CLI
|
||||||
|
// into a shared volume mount.
|
||||||
|
var binInitContainer = corev1.Container{
|
||||||
|
Name: "copy-op-bin",
|
||||||
|
Image: "op-example" + ":" + version,
|
||||||
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||||
|
Command: []string{"sh", "-c",
|
||||||
|
fmt.Sprintf("cp /usr/local/bin/op %s", binVolumeMountPath)},
|
||||||
|
VolumeMounts: []corev1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: binVolumeName,
|
||||||
|
MountPath: binVolumeMountPath,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if !mutated {
|
||||||
|
glog.Infof("No mutations made for %s/%s", pod.Namespace, pod.Name)
|
||||||
|
return &v1beta1.AdmissionResponse{
|
||||||
|
Allowed: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
annotations := map[string]string{injectionStatus: "injected"}
|
||||||
|
patchBytes, err := createOPCLIPatch(&pod, annotations, []corev1.Container{binInitContainer}, patch)
|
||||||
|
if err != nil {
|
||||||
|
return &v1beta1.AdmissionResponse{
|
||||||
|
Result: &metav1.Status{
|
||||||
|
Message: err.Error(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof("AdmissionResponse: patch=%v\n", string(patchBytes))
|
||||||
|
return &v1beta1.AdmissionResponse{
|
||||||
|
Allowed: true,
|
||||||
|
Patch: patchBytes,
|
||||||
|
PatchType: func() *v1beta1.PatchType {
|
||||||
|
pt := v1beta1.PatchTypeJSONPatch
|
||||||
|
return &pt
|
||||||
|
}(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create mutation patch for resoures
|
||||||
|
func createOPCLIPatch(pod *corev1.Pod, annotations map[string]string, containers []corev1.Container, patch []patchOperation) ([]byte, error) {
|
||||||
|
|
||||||
|
patch = append(patch, addVolume(pod.Spec.Volumes, []corev1.Volume{binVolume}, "/spec/volumes")...)
|
||||||
|
patch = append(patch, addContainers(pod.Spec.InitContainers, containers, "/spec/initContainers")...)
|
||||||
|
patch = append(patch, updateAnnotation(pod.Annotations, annotations)...)
|
||||||
|
|
||||||
|
return json.Marshal(patch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createOPConnectPatch(container *corev1.Container, containerIndex int, host, tokenSecretName, tokenSecretKey string) []patchOperation {
|
||||||
|
var patch []patchOperation
|
||||||
|
connectHostEnvVar := corev1.EnvVar{
|
||||||
|
Name: "OP_CONNECT_HOST",
|
||||||
|
Value: host,
|
||||||
|
}
|
||||||
|
|
||||||
|
connectTokenEnvVar := corev1.EnvVar{
|
||||||
|
Name: "OP_CONNECT_TOKEN",
|
||||||
|
ValueFrom: &corev1.EnvVarSource{
|
||||||
|
SecretKeyRef: &corev1.SecretKeySelector{
|
||||||
|
Key: tokenSecretKey,
|
||||||
|
LocalObjectReference: corev1.LocalObjectReference{
|
||||||
|
Name: tokenSecretName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
envs := []corev1.EnvVar{
|
||||||
|
connectHostEnvVar,
|
||||||
|
connectTokenEnvVar,
|
||||||
|
}
|
||||||
|
|
||||||
|
patch = append(patch, setEnvironment(*container, containerIndex, envs, "/spec/containers")...)
|
||||||
|
|
||||||
|
return patch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (whsvr *WebhookServer) mutateContainer(_ context.Context, container *corev1.Container, containerIndex int) (*corev1.Container, bool, []patchOperation, error) {
|
||||||
|
// Because we are running a command in the pod before starting the container app,
|
||||||
|
// we need to prepend the pod comand with the op run command
|
||||||
|
if len(container.Command) == 0 {
|
||||||
|
return container, false, nil, fmt.Errorf("not attaching OP to the container %s: the podspec does not define a command", container.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepend the command with op run --
|
||||||
|
container.Command = append([]string{binVolumeMountPath + "op", "run", "--"}, container.Command...)
|
||||||
|
|
||||||
|
var patch []patchOperation
|
||||||
|
|
||||||
|
// adding the cli to the container using a volume mount
|
||||||
|
path := fmt.Sprintf("%s/%d/volumeMounts", "/spec/containers", containerIndex)
|
||||||
|
patch = append(patch, patchOperation{
|
||||||
|
Op: "add",
|
||||||
|
Path: path,
|
||||||
|
Value: []corev1.VolumeMount{binVolumeMount},
|
||||||
|
})
|
||||||
|
|
||||||
|
// replacing the container command with a command prepended with op run
|
||||||
|
path = fmt.Sprintf("%s/%d/command", "/spec/containers", containerIndex)
|
||||||
|
patch = append(patch, patchOperation{
|
||||||
|
Op: "replace",
|
||||||
|
Path: path,
|
||||||
|
Value: container.Command,
|
||||||
|
})
|
||||||
|
|
||||||
|
//creating patch for adding conenct environment variables to container
|
||||||
|
patch = append(patch, createOPConnectPatch(container, containerIndex, whsvr.Config.ConnectHost, whsvr.Config.ConnectTokenName, whsvr.Config.ConnectTokenKey)...)
|
||||||
|
return container, true, patch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setEnvironment(container corev1.Container, containerIndex int, addedEnv []corev1.EnvVar, basePath string) (patch []patchOperation) {
|
||||||
|
first := len(container.Env) == 0
|
||||||
|
var value interface{}
|
||||||
|
for _, add := range addedEnv {
|
||||||
|
path := fmt.Sprintf("%s/%d/env", basePath, containerIndex)
|
||||||
|
value = add
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
value = []corev1.EnvVar{add}
|
||||||
|
} else {
|
||||||
|
path = path + "/-"
|
||||||
|
}
|
||||||
|
patch = append(patch, patchOperation{
|
||||||
|
Op: "add",
|
||||||
|
Path: path,
|
||||||
|
Value: value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return patch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serve method for webhook server
|
||||||
|
func (whsvr *WebhookServer) Serve(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var body []byte
|
||||||
|
if r.Body != nil {
|
||||||
|
if data, err := ioutil.ReadAll(r.Body); err == nil {
|
||||||
|
body = data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(body) == 0 {
|
||||||
|
glog.Error("empty body")
|
||||||
|
http.Error(w, "empty body", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify the content type is accurate
|
||||||
|
contentType := r.Header.Get("Content-Type")
|
||||||
|
if contentType != "application/json" {
|
||||||
|
glog.Errorf("Content-Type=%s, expect application/json", contentType)
|
||||||
|
http.Error(w, "invalid Content-Type, expect `application/json`", http.StatusUnsupportedMediaType)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var admissionResponse *v1beta1.AdmissionResponse
|
||||||
|
ar := v1beta1.AdmissionReview{}
|
||||||
|
if _, _, err := deserializer.Decode(body, nil, &ar); err != nil {
|
||||||
|
glog.Errorf("Can't decode body: %v", err)
|
||||||
|
admissionResponse = &v1beta1.AdmissionResponse{
|
||||||
|
Result: &metav1.Status{
|
||||||
|
Message: err.Error(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
admissionResponse = whsvr.mutate(&ar)
|
||||||
|
}
|
||||||
|
|
||||||
|
admissionReview := v1beta1.AdmissionReview{}
|
||||||
|
if admissionResponse != nil {
|
||||||
|
admissionReview.Response = admissionResponse
|
||||||
|
if ar.Request != nil {
|
||||||
|
admissionReview.Response.UID = ar.Request.UID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := json.Marshal(admissionReview)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Can't encode response: %v", err)
|
||||||
|
http.Error(w, fmt.Sprintf("could not encode response: %v", err), http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
glog.Infof("Ready to write reponse ...")
|
||||||
|
if _, err := w.Write(resp); err != nil {
|
||||||
|
glog.Errorf("Can't write response: %v", err)
|
||||||
|
http.Error(w, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
}
|
||||||
9
secret-injector/test/Dockerfile
Normal file
9
secret-injector/test/Dockerfile
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
FROM ubuntu:latest
|
||||||
|
ARG VERSION
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y curl unzip jq && \
|
||||||
|
curl -o 1password.zip https://bucket.agilebits.com/cli-private-beta/v2/op_linux_amd64_v2-alpha2.zip && \
|
||||||
|
unzip 1password.zip -d /usr/local/bin && \
|
||||||
|
rm 1password.zip
|
||||||
|
|
||||||
|
CMD ["op"]
|
||||||
202
vendor/github.com/docker/distribution/LICENSE
generated
vendored
Normal file
202
vendor/github.com/docker/distribution/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
||||||
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
Normal file
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
Normal file
@@ -0,0 +1,247 @@
|
|||||||
|
package digestset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrDigestNotFound is used when a matching digest
|
||||||
|
// could not be found in a set.
|
||||||
|
ErrDigestNotFound = errors.New("digest not found")
|
||||||
|
|
||||||
|
// ErrDigestAmbiguous is used when multiple digests
|
||||||
|
// are found in a set. None of the matching digests
|
||||||
|
// should be considered valid matches.
|
||||||
|
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Set is used to hold a unique set of digests which
|
||||||
|
// may be easily referenced by easily referenced by a string
|
||||||
|
// representation of the digest as well as short representation.
|
||||||
|
// The uniqueness of the short representation is based on other
|
||||||
|
// digests in the set. If digests are omitted from this set,
|
||||||
|
// collisions in a larger set may not be detected, therefore it
|
||||||
|
// is important to always do short representation lookups on
|
||||||
|
// the complete set of digests. To mitigate collisions, an
|
||||||
|
// appropriately long short code should be used.
|
||||||
|
type Set struct {
|
||||||
|
mutex sync.RWMutex
|
||||||
|
entries digestEntries
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSet creates an empty set of digests
|
||||||
|
// which may have digests added.
|
||||||
|
func NewSet() *Set {
|
||||||
|
return &Set{
|
||||||
|
entries: digestEntries{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkShortMatch checks whether two digests match as either whole
|
||||||
|
// values or short values. This function does not test equality,
|
||||||
|
// rather whether the second value could match against the first
|
||||||
|
// value.
|
||||||
|
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
|
||||||
|
if len(hex) == len(shortHex) {
|
||||||
|
if hex != shortHex {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else if !strings.HasPrefix(hex, shortHex) {
|
||||||
|
return false
|
||||||
|
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup looks for a digest matching the given string representation.
|
||||||
|
// If no digests could be found ErrDigestNotFound will be returned
|
||||||
|
// with an empty digest value. If multiple matches are found
|
||||||
|
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||||
|
func (dst *Set) Lookup(d string) (digest.Digest, error) {
|
||||||
|
dst.mutex.RLock()
|
||||||
|
defer dst.mutex.RUnlock()
|
||||||
|
if len(dst.entries) == 0 {
|
||||||
|
return "", ErrDigestNotFound
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
searchFunc func(int) bool
|
||||||
|
alg digest.Algorithm
|
||||||
|
hex string
|
||||||
|
)
|
||||||
|
dgst, err := digest.Parse(d)
|
||||||
|
if err == digest.ErrDigestInvalidFormat {
|
||||||
|
hex = d
|
||||||
|
searchFunc = func(i int) bool {
|
||||||
|
return dst.entries[i].val >= d
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
hex = dgst.Hex()
|
||||||
|
alg = dgst.Algorithm()
|
||||||
|
searchFunc = func(i int) bool {
|
||||||
|
if dst.entries[i].val == hex {
|
||||||
|
return dst.entries[i].alg >= alg
|
||||||
|
}
|
||||||
|
return dst.entries[i].val >= hex
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idx := sort.Search(len(dst.entries), searchFunc)
|
||||||
|
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||||
|
return "", ErrDigestNotFound
|
||||||
|
}
|
||||||
|
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||||
|
return dst.entries[idx].digest, nil
|
||||||
|
}
|
||||||
|
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||||
|
return "", ErrDigestAmbiguous
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst.entries[idx].digest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds the given digest to the set. An error will be returned
|
||||||
|
// if the given digest is invalid. If the digest already exists in the
|
||||||
|
// set, this operation will be a no-op.
|
||||||
|
func (dst *Set) Add(d digest.Digest) error {
|
||||||
|
if err := d.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dst.mutex.Lock()
|
||||||
|
defer dst.mutex.Unlock()
|
||||||
|
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||||
|
searchFunc := func(i int) bool {
|
||||||
|
if dst.entries[i].val == entry.val {
|
||||||
|
return dst.entries[i].alg >= entry.alg
|
||||||
|
}
|
||||||
|
return dst.entries[i].val >= entry.val
|
||||||
|
}
|
||||||
|
idx := sort.Search(len(dst.entries), searchFunc)
|
||||||
|
if idx == len(dst.entries) {
|
||||||
|
dst.entries = append(dst.entries, entry)
|
||||||
|
return nil
|
||||||
|
} else if dst.entries[idx].digest == d {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
entries := append(dst.entries, nil)
|
||||||
|
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||||
|
entries[idx] = entry
|
||||||
|
dst.entries = entries
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the given digest from the set. An err will be
|
||||||
|
// returned if the given digest is invalid. If the digest does
|
||||||
|
// not exist in the set, this operation will be a no-op.
|
||||||
|
func (dst *Set) Remove(d digest.Digest) error {
|
||||||
|
if err := d.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dst.mutex.Lock()
|
||||||
|
defer dst.mutex.Unlock()
|
||||||
|
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||||
|
searchFunc := func(i int) bool {
|
||||||
|
if dst.entries[i].val == entry.val {
|
||||||
|
return dst.entries[i].alg >= entry.alg
|
||||||
|
}
|
||||||
|
return dst.entries[i].val >= entry.val
|
||||||
|
}
|
||||||
|
idx := sort.Search(len(dst.entries), searchFunc)
|
||||||
|
// Not found if idx is after or value at idx is not digest
|
||||||
|
if idx == len(dst.entries) || dst.entries[idx].digest != d {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
entries := dst.entries
|
||||||
|
copy(entries[idx:], entries[idx+1:])
|
||||||
|
entries = entries[:len(entries)-1]
|
||||||
|
dst.entries = entries
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// All returns all the digests in the set
|
||||||
|
func (dst *Set) All() []digest.Digest {
|
||||||
|
dst.mutex.RLock()
|
||||||
|
defer dst.mutex.RUnlock()
|
||||||
|
retValues := make([]digest.Digest, len(dst.entries))
|
||||||
|
for i := range dst.entries {
|
||||||
|
retValues[i] = dst.entries[i].digest
|
||||||
|
}
|
||||||
|
|
||||||
|
return retValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||||
|
// length represents the minimum value, the maximum length may be the
|
||||||
|
// entire value of digest if uniqueness cannot be achieved without the
|
||||||
|
// full value. This function will attempt to make short codes as short
|
||||||
|
// as possible to be unique.
|
||||||
|
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
|
||||||
|
dst.mutex.RLock()
|
||||||
|
defer dst.mutex.RUnlock()
|
||||||
|
m := make(map[digest.Digest]string, len(dst.entries))
|
||||||
|
l := length
|
||||||
|
resetIdx := 0
|
||||||
|
for i := 0; i < len(dst.entries); i++ {
|
||||||
|
var short string
|
||||||
|
extended := true
|
||||||
|
for extended {
|
||||||
|
extended = false
|
||||||
|
if len(dst.entries[i].val) <= l {
|
||||||
|
short = dst.entries[i].digest.String()
|
||||||
|
} else {
|
||||||
|
short = dst.entries[i].val[:l]
|
||||||
|
for j := i + 1; j < len(dst.entries); j++ {
|
||||||
|
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||||
|
if j > resetIdx {
|
||||||
|
resetIdx = j
|
||||||
|
}
|
||||||
|
extended = true
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if extended {
|
||||||
|
l++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m[dst.entries[i].digest] = short
|
||||||
|
if i >= resetIdx {
|
||||||
|
l = length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
type digestEntry struct {
|
||||||
|
alg digest.Algorithm
|
||||||
|
val string
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
type digestEntries []*digestEntry
|
||||||
|
|
||||||
|
func (d digestEntries) Len() int {
|
||||||
|
return len(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d digestEntries) Less(i, j int) bool {
|
||||||
|
if d[i].val != d[j].val {
|
||||||
|
return d[i].val < d[j].val
|
||||||
|
}
|
||||||
|
return d[i].alg < d[j].alg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d digestEntries) Swap(i, j int) {
|
||||||
|
d[i], d[j] = d[j], d[i]
|
||||||
|
}
|
||||||
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
Normal file
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package reference
|
||||||
|
|
||||||
|
import "path"
|
||||||
|
|
||||||
|
// IsNameOnly returns true if reference only contains a repo name.
|
||||||
|
func IsNameOnly(ref Named) bool {
|
||||||
|
if _, ok := ref.(NamedTagged); ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if _, ok := ref.(Canonical); ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// FamiliarName returns the familiar name string
|
||||||
|
// for the given named, familiarizing if needed.
|
||||||
|
func FamiliarName(ref Named) string {
|
||||||
|
if nn, ok := ref.(normalizedNamed); ok {
|
||||||
|
return nn.Familiar().Name()
|
||||||
|
}
|
||||||
|
return ref.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FamiliarString returns the familiar string representation
|
||||||
|
// for the given reference, familiarizing if needed.
|
||||||
|
func FamiliarString(ref Reference) string {
|
||||||
|
if nn, ok := ref.(normalizedNamed); ok {
|
||||||
|
return nn.Familiar().String()
|
||||||
|
}
|
||||||
|
return ref.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||||
|
// See https://godoc.org/path#Match for supported patterns.
|
||||||
|
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
|
||||||
|
matched, err := path.Match(pattern, FamiliarString(ref))
|
||||||
|
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||||
|
matched, _ = path.Match(pattern, FamiliarName(namedRef))
|
||||||
|
}
|
||||||
|
return matched, err
|
||||||
|
}
|
||||||
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
Normal file
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
package reference
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/digestset"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
legacyDefaultDomain = "index.docker.io"
|
||||||
|
defaultDomain = "docker.io"
|
||||||
|
officialRepoName = "library"
|
||||||
|
defaultTag = "latest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// normalizedNamed represents a name which has been
|
||||||
|
// normalized and has a familiar form. A familiar name
|
||||||
|
// is what is used in Docker UI. An example normalized
|
||||||
|
// name is "docker.io/library/ubuntu" and corresponding
|
||||||
|
// familiar name of "ubuntu".
|
||||||
|
type normalizedNamed interface {
|
||||||
|
Named
|
||||||
|
Familiar() Named
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseNormalizedNamed parses a string into a named reference
|
||||||
|
// transforming a familiar name from Docker UI to a fully
|
||||||
|
// qualified reference. If the value may be an identifier
|
||||||
|
// use ParseAnyReference.
|
||||||
|
func ParseNormalizedNamed(s string) (Named, error) {
|
||||||
|
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
|
||||||
|
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
|
||||||
|
}
|
||||||
|
domain, remainder := splitDockerDomain(s)
|
||||||
|
var remoteName string
|
||||||
|
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
|
||||||
|
remoteName = remainder[:tagSep]
|
||||||
|
} else {
|
||||||
|
remoteName = remainder
|
||||||
|
}
|
||||||
|
if strings.ToLower(remoteName) != remoteName {
|
||||||
|
return nil, errors.New("invalid reference format: repository name must be lowercase")
|
||||||
|
}
|
||||||
|
|
||||||
|
ref, err := Parse(domain + "/" + remainder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
named, isNamed := ref.(Named)
|
||||||
|
if !isNamed {
|
||||||
|
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||||
|
}
|
||||||
|
return named, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||||
|
// If no valid domain is found, the default domain is used. Repository name
|
||||||
|
// needs to be already validated before.
|
||||||
|
func splitDockerDomain(name string) (domain, remainder string) {
|
||||||
|
i := strings.IndexRune(name, '/')
|
||||||
|
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||||
|
domain, remainder = defaultDomain, name
|
||||||
|
} else {
|
||||||
|
domain, remainder = name[:i], name[i+1:]
|
||||||
|
}
|
||||||
|
if domain == legacyDefaultDomain {
|
||||||
|
domain = defaultDomain
|
||||||
|
}
|
||||||
|
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
|
||||||
|
remainder = officialRepoName + "/" + remainder
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// familiarizeName returns a shortened version of the name familiar
|
||||||
|
// to to the Docker UI. Familiar names have the default domain
|
||||||
|
// "docker.io" and "library/" repository prefix removed.
|
||||||
|
// For example, "docker.io/library/redis" will have the familiar
|
||||||
|
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||||
|
// Returns a familiarized named only reference.
|
||||||
|
func familiarizeName(named namedRepository) repository {
|
||||||
|
repo := repository{
|
||||||
|
domain: named.Domain(),
|
||||||
|
path: named.Path(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if repo.domain == defaultDomain {
|
||||||
|
repo.domain = ""
|
||||||
|
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||||
|
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
|
||||||
|
repo.path = split[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return repo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reference) Familiar() Named {
|
||||||
|
return reference{
|
||||||
|
namedRepository: familiarizeName(r.namedRepository),
|
||||||
|
tag: r.tag,
|
||||||
|
digest: r.digest,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r repository) Familiar() Named {
|
||||||
|
return familiarizeName(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t taggedReference) Familiar() Named {
|
||||||
|
return taggedReference{
|
||||||
|
namedRepository: familiarizeName(t.namedRepository),
|
||||||
|
tag: t.tag,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c canonicalReference) Familiar() Named {
|
||||||
|
return canonicalReference{
|
||||||
|
namedRepository: familiarizeName(c.namedRepository),
|
||||||
|
digest: c.digest,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagNameOnly adds the default tag "latest" to a reference if it only has
|
||||||
|
// a repo name.
|
||||||
|
func TagNameOnly(ref Named) Named {
|
||||||
|
if IsNameOnly(ref) {
|
||||||
|
namedTagged, err := WithTag(ref, defaultTag)
|
||||||
|
if err != nil {
|
||||||
|
// Default tag must be valid, to create a NamedTagged
|
||||||
|
// type with non-validated input the WithTag function
|
||||||
|
// should be used instead
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return namedTagged
|
||||||
|
}
|
||||||
|
return ref
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAnyReference parses a reference string as a possible identifier,
|
||||||
|
// full digest, or familiar name.
|
||||||
|
func ParseAnyReference(ref string) (Reference, error) {
|
||||||
|
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
|
||||||
|
return digestReference("sha256:" + ref), nil
|
||||||
|
}
|
||||||
|
if dgst, err := digest.Parse(ref); err == nil {
|
||||||
|
return digestReference(dgst), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return ParseNormalizedNamed(ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAnyReferenceWithSet parses a reference string as a possible short
|
||||||
|
// identifier to be matched in a digest set, a full digest, or familiar name.
|
||||||
|
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
|
||||||
|
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
|
||||||
|
dgst, err := ds.Lookup(ref)
|
||||||
|
if err == nil {
|
||||||
|
return digestReference(dgst), nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if dgst, err := digest.Parse(ref); err == nil {
|
||||||
|
return digestReference(dgst), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ParseNormalizedNamed(ref)
|
||||||
|
}
|
||||||
433
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
433
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
@@ -0,0 +1,433 @@
|
|||||||
|
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||||
|
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||||
|
//
|
||||||
|
// Grammar
|
||||||
|
//
|
||||||
|
// reference := name [ ":" tag ] [ "@" digest ]
|
||||||
|
// name := [domain '/'] path-component ['/' path-component]*
|
||||||
|
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||||
|
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||||
|
// port-number := /[0-9]+/
|
||||||
|
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||||
|
// alpha-numeric := /[a-z0-9]+/
|
||||||
|
// separator := /[_.]|__|[-]*/
|
||||||
|
//
|
||||||
|
// tag := /[\w][\w.-]{0,127}/
|
||||||
|
//
|
||||||
|
// digest := digest-algorithm ":" digest-hex
|
||||||
|
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
|
||||||
|
// digest-algorithm-separator := /[+.-_]/
|
||||||
|
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||||
|
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||||
|
//
|
||||||
|
// identifier := /[a-f0-9]{64}/
|
||||||
|
// short-identifier := /[a-f0-9]{6,64}/
|
||||||
|
package reference
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||||
|
NameTotalLengthMax = 255
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||||
|
ErrReferenceInvalidFormat = errors.New("invalid reference format")
|
||||||
|
|
||||||
|
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||||
|
ErrTagInvalidFormat = errors.New("invalid tag format")
|
||||||
|
|
||||||
|
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||||
|
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||||
|
|
||||||
|
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||||
|
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
|
||||||
|
|
||||||
|
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||||
|
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||||
|
|
||||||
|
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||||
|
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
|
||||||
|
|
||||||
|
// ErrNameNotCanonical is returned when a name is not canonical.
|
||||||
|
ErrNameNotCanonical = errors.New("repository name must be canonical")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference is an opaque object reference identifier that may include
|
||||||
|
// modifiers such as a hostname, name, tag, and digest.
|
||||||
|
type Reference interface {
|
||||||
|
// String returns the full reference
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field provides a wrapper type for resolving correct reference types when
|
||||||
|
// working with encoding.
|
||||||
|
type Field struct {
|
||||||
|
reference Reference
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsField wraps a reference in a Field for encoding.
|
||||||
|
func AsField(reference Reference) Field {
|
||||||
|
return Field{reference}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference unwraps the reference type from the field to
|
||||||
|
// return the Reference object. This object should be
|
||||||
|
// of the appropriate type to further check for different
|
||||||
|
// reference types.
|
||||||
|
func (f Field) Reference() Reference {
|
||||||
|
return f.reference
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText serializes the field to byte text which
|
||||||
|
// is the string of the reference.
|
||||||
|
func (f Field) MarshalText() (p []byte, err error) {
|
||||||
|
return []byte(f.reference.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText parses text bytes by invoking the
|
||||||
|
// reference parser to ensure the appropriately
|
||||||
|
// typed reference object is wrapped by field.
|
||||||
|
func (f *Field) UnmarshalText(p []byte) error {
|
||||||
|
r, err := Parse(string(p))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f.reference = r
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Named is an object with a full name
|
||||||
|
type Named interface {
|
||||||
|
Reference
|
||||||
|
Name() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tagged is an object which has a tag
|
||||||
|
type Tagged interface {
|
||||||
|
Reference
|
||||||
|
Tag() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NamedTagged is an object including a name and tag.
|
||||||
|
type NamedTagged interface {
|
||||||
|
Named
|
||||||
|
Tag() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Digested is an object which has a digest
|
||||||
|
// in which it can be referenced by
|
||||||
|
type Digested interface {
|
||||||
|
Reference
|
||||||
|
Digest() digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
// Canonical reference is an object with a fully unique
|
||||||
|
// name including a name with domain and digest
|
||||||
|
type Canonical interface {
|
||||||
|
Named
|
||||||
|
Digest() digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
// namedRepository is a reference to a repository with a name.
|
||||||
|
// A namedRepository has both domain and path components.
|
||||||
|
type namedRepository interface {
|
||||||
|
Named
|
||||||
|
Domain() string
|
||||||
|
Path() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Domain returns the domain part of the Named reference
|
||||||
|
func Domain(named Named) string {
|
||||||
|
if r, ok := named.(namedRepository); ok {
|
||||||
|
return r.Domain()
|
||||||
|
}
|
||||||
|
domain, _ := splitDomain(named.Name())
|
||||||
|
return domain
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns the name without the domain part of the Named reference
|
||||||
|
func Path(named Named) (name string) {
|
||||||
|
if r, ok := named.(namedRepository); ok {
|
||||||
|
return r.Path()
|
||||||
|
}
|
||||||
|
_, path := splitDomain(named.Name())
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitDomain(name string) (string, string) {
|
||||||
|
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||||
|
if len(match) != 3 {
|
||||||
|
return "", name
|
||||||
|
}
|
||||||
|
return match[1], match[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// SplitHostname splits a named reference into a
|
||||||
|
// hostname and name string. If no valid hostname is
|
||||||
|
// found, the hostname is empty and the full value
|
||||||
|
// is returned as name
|
||||||
|
// DEPRECATED: Use Domain or Path
|
||||||
|
func SplitHostname(named Named) (string, string) {
|
||||||
|
if r, ok := named.(namedRepository); ok {
|
||||||
|
return r.Domain(), r.Path()
|
||||||
|
}
|
||||||
|
return splitDomain(named.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses s and returns a syntactically valid Reference.
|
||||||
|
// If an error was encountered it is returned, along with a nil Reference.
|
||||||
|
// NOTE: Parse will not handle short digests.
|
||||||
|
func Parse(s string) (Reference, error) {
|
||||||
|
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||||
|
if matches == nil {
|
||||||
|
if s == "" {
|
||||||
|
return nil, ErrNameEmpty
|
||||||
|
}
|
||||||
|
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
|
||||||
|
return nil, ErrNameContainsUppercase
|
||||||
|
}
|
||||||
|
return nil, ErrReferenceInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(matches[1]) > NameTotalLengthMax {
|
||||||
|
return nil, ErrNameTooLong
|
||||||
|
}
|
||||||
|
|
||||||
|
var repo repository
|
||||||
|
|
||||||
|
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
||||||
|
if nameMatch != nil && len(nameMatch) == 3 {
|
||||||
|
repo.domain = nameMatch[1]
|
||||||
|
repo.path = nameMatch[2]
|
||||||
|
} else {
|
||||||
|
repo.domain = ""
|
||||||
|
repo.path = matches[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
ref := reference{
|
||||||
|
namedRepository: repo,
|
||||||
|
tag: matches[2],
|
||||||
|
}
|
||||||
|
if matches[3] != "" {
|
||||||
|
var err error
|
||||||
|
ref.digest, err = digest.Parse(matches[3])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r := getBestReferenceType(ref)
|
||||||
|
if r == nil {
|
||||||
|
return nil, ErrNameEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||||
|
// the Named interface. The reference must have a name and be in the canonical
|
||||||
|
// form, otherwise an error is returned.
|
||||||
|
// If an error was encountered it is returned, along with a nil Reference.
|
||||||
|
// NOTE: ParseNamed will not handle short digests.
|
||||||
|
func ParseNamed(s string) (Named, error) {
|
||||||
|
named, err := ParseNormalizedNamed(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if named.String() != s {
|
||||||
|
return nil, ErrNameNotCanonical
|
||||||
|
}
|
||||||
|
return named, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithName returns a named object representing the given string. If the input
|
||||||
|
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||||
|
func WithName(name string) (Named, error) {
|
||||||
|
if len(name) > NameTotalLengthMax {
|
||||||
|
return nil, ErrNameTooLong
|
||||||
|
}
|
||||||
|
|
||||||
|
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||||
|
if match == nil || len(match) != 3 {
|
||||||
|
return nil, ErrReferenceInvalidFormat
|
||||||
|
}
|
||||||
|
return repository{
|
||||||
|
domain: match[1],
|
||||||
|
path: match[2],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||||
|
// reference incorporating both the name and the tag.
|
||||||
|
func WithTag(name Named, tag string) (NamedTagged, error) {
|
||||||
|
if !anchoredTagRegexp.MatchString(tag) {
|
||||||
|
return nil, ErrTagInvalidFormat
|
||||||
|
}
|
||||||
|
var repo repository
|
||||||
|
if r, ok := name.(namedRepository); ok {
|
||||||
|
repo.domain = r.Domain()
|
||||||
|
repo.path = r.Path()
|
||||||
|
} else {
|
||||||
|
repo.path = name.Name()
|
||||||
|
}
|
||||||
|
if canonical, ok := name.(Canonical); ok {
|
||||||
|
return reference{
|
||||||
|
namedRepository: repo,
|
||||||
|
tag: tag,
|
||||||
|
digest: canonical.Digest(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return taggedReference{
|
||||||
|
namedRepository: repo,
|
||||||
|
tag: tag,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||||
|
// a reference incorporating both the name and the digest.
|
||||||
|
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||||
|
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||||
|
return nil, ErrDigestInvalidFormat
|
||||||
|
}
|
||||||
|
var repo repository
|
||||||
|
if r, ok := name.(namedRepository); ok {
|
||||||
|
repo.domain = r.Domain()
|
||||||
|
repo.path = r.Path()
|
||||||
|
} else {
|
||||||
|
repo.path = name.Name()
|
||||||
|
}
|
||||||
|
if tagged, ok := name.(Tagged); ok {
|
||||||
|
return reference{
|
||||||
|
namedRepository: repo,
|
||||||
|
tag: tagged.Tag(),
|
||||||
|
digest: digest,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return canonicalReference{
|
||||||
|
namedRepository: repo,
|
||||||
|
digest: digest,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimNamed removes any tag or digest from the named reference.
|
||||||
|
func TrimNamed(ref Named) Named {
|
||||||
|
domain, path := SplitHostname(ref)
|
||||||
|
return repository{
|
||||||
|
domain: domain,
|
||||||
|
path: path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBestReferenceType(ref reference) Reference {
|
||||||
|
if ref.Name() == "" {
|
||||||
|
// Allow digest only references
|
||||||
|
if ref.digest != "" {
|
||||||
|
return digestReference(ref.digest)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if ref.tag == "" {
|
||||||
|
if ref.digest != "" {
|
||||||
|
return canonicalReference{
|
||||||
|
namedRepository: ref.namedRepository,
|
||||||
|
digest: ref.digest,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ref.namedRepository
|
||||||
|
}
|
||||||
|
if ref.digest == "" {
|
||||||
|
return taggedReference{
|
||||||
|
namedRepository: ref.namedRepository,
|
||||||
|
tag: ref.tag,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ref
|
||||||
|
}
|
||||||
|
|
||||||
|
type reference struct {
|
||||||
|
namedRepository
|
||||||
|
tag string
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reference) String() string {
|
||||||
|
return r.Name() + ":" + r.tag + "@" + r.digest.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reference) Tag() string {
|
||||||
|
return r.tag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reference) Digest() digest.Digest {
|
||||||
|
return r.digest
|
||||||
|
}
|
||||||
|
|
||||||
|
type repository struct {
|
||||||
|
domain string
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r repository) String() string {
|
||||||
|
return r.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r repository) Name() string {
|
||||||
|
if r.domain == "" {
|
||||||
|
return r.path
|
||||||
|
}
|
||||||
|
return r.domain + "/" + r.path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r repository) Domain() string {
|
||||||
|
return r.domain
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r repository) Path() string {
|
||||||
|
return r.path
|
||||||
|
}
|
||||||
|
|
||||||
|
type digestReference digest.Digest
|
||||||
|
|
||||||
|
func (d digestReference) String() string {
|
||||||
|
return digest.Digest(d).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d digestReference) Digest() digest.Digest {
|
||||||
|
return digest.Digest(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
type taggedReference struct {
|
||||||
|
namedRepository
|
||||||
|
tag string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t taggedReference) String() string {
|
||||||
|
return t.Name() + ":" + t.tag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t taggedReference) Tag() string {
|
||||||
|
return t.tag
|
||||||
|
}
|
||||||
|
|
||||||
|
type canonicalReference struct {
|
||||||
|
namedRepository
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c canonicalReference) String() string {
|
||||||
|
return c.Name() + "@" + c.digest.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c canonicalReference) Digest() digest.Digest {
|
||||||
|
return c.digest
|
||||||
|
}
|
||||||
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
package reference
|
||||||
|
|
||||||
|
import "regexp"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||||
|
// component of names. This only allows lower case characters and digits.
|
||||||
|
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||||
|
|
||||||
|
// separatorRegexp defines the separators allowed to be embedded in name
|
||||||
|
// components. This allow one period, one or two underscore and multiple
|
||||||
|
// dashes.
|
||||||
|
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||||
|
|
||||||
|
// nameComponentRegexp restricts registry path component names to start
|
||||||
|
// with at least one letter or number, with following parts able to be
|
||||||
|
// separated by one period, one or two underscore and multiple dashes.
|
||||||
|
nameComponentRegexp = expression(
|
||||||
|
alphaNumericRegexp,
|
||||||
|
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||||
|
|
||||||
|
// domainComponentRegexp restricts the registry domain component of a
|
||||||
|
// repository name to start with a component as defined by DomainRegexp
|
||||||
|
// and followed by an optional port.
|
||||||
|
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||||
|
|
||||||
|
// DomainRegexp defines the structure of potential domain components
|
||||||
|
// that may be part of image names. This is purposely a subset of what is
|
||||||
|
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||||
|
// names.
|
||||||
|
DomainRegexp = expression(
|
||||||
|
domainComponentRegexp,
|
||||||
|
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||||
|
optional(literal(`:`), match(`[0-9]+`)))
|
||||||
|
|
||||||
|
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||||
|
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||||
|
|
||||||
|
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||||
|
// end of the matched string.
|
||||||
|
anchoredTagRegexp = anchored(TagRegexp)
|
||||||
|
|
||||||
|
// DigestRegexp matches valid digests.
|
||||||
|
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||||
|
|
||||||
|
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||||
|
// end of the matched string.
|
||||||
|
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||||
|
|
||||||
|
// NameRegexp is the format for the name component of references. The
|
||||||
|
// regexp has capturing groups for the domain and name part omitting
|
||||||
|
// the separating forward slash from either.
|
||||||
|
NameRegexp = expression(
|
||||||
|
optional(DomainRegexp, literal(`/`)),
|
||||||
|
nameComponentRegexp,
|
||||||
|
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||||
|
|
||||||
|
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||||
|
// domain and trailing components.
|
||||||
|
anchoredNameRegexp = anchored(
|
||||||
|
optional(capture(DomainRegexp), literal(`/`)),
|
||||||
|
capture(nameComponentRegexp,
|
||||||
|
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||||
|
|
||||||
|
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||||
|
// is anchored and has capturing groups for name, tag, and digest
|
||||||
|
// components.
|
||||||
|
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||||
|
optional(literal(":"), capture(TagRegexp)),
|
||||||
|
optional(literal("@"), capture(DigestRegexp)))
|
||||||
|
|
||||||
|
// IdentifierRegexp is the format for string identifier used as a
|
||||||
|
// content addressable identifier using sha256. These identifiers
|
||||||
|
// are like digests without the algorithm, since sha256 is used.
|
||||||
|
IdentifierRegexp = match(`([a-f0-9]{64})`)
|
||||||
|
|
||||||
|
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||||
|
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||||
|
// within a list of trusted identifiers.
|
||||||
|
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
|
||||||
|
|
||||||
|
// anchoredIdentifierRegexp is used to check or match an
|
||||||
|
// identifier value, anchored at start and end of string.
|
||||||
|
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
|
||||||
|
|
||||||
|
// anchoredShortIdentifierRegexp is used to check if a value
|
||||||
|
// is a possible identifier prefix, anchored at start and end
|
||||||
|
// of string.
|
||||||
|
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
|
||||||
|
)
|
||||||
|
|
||||||
|
// match compiles the string to a regular expression.
|
||||||
|
var match = regexp.MustCompile
|
||||||
|
|
||||||
|
// literal compiles s into a literal regular expression, escaping any regexp
|
||||||
|
// reserved characters.
|
||||||
|
func literal(s string) *regexp.Regexp {
|
||||||
|
re := match(regexp.QuoteMeta(s))
|
||||||
|
|
||||||
|
if _, complete := re.LiteralPrefix(); !complete {
|
||||||
|
panic("must be a literal")
|
||||||
|
}
|
||||||
|
|
||||||
|
return re
|
||||||
|
}
|
||||||
|
|
||||||
|
// expression defines a full expression, where each regular expression must
|
||||||
|
// follow the previous.
|
||||||
|
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
var s string
|
||||||
|
for _, re := range res {
|
||||||
|
s += re.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return match(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional wraps the expression in a non-capturing group and makes the
|
||||||
|
// production optional.
|
||||||
|
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
return match(group(expression(res...)).String() + `?`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||||
|
// matches.
|
||||||
|
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
return match(group(expression(res...)).String() + `+`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// group wraps the regexp in a non-capturing group.
|
||||||
|
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
return match(`(?:` + expression(res...).String() + `)`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// capture wraps the expression in a capturing group.
|
||||||
|
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
return match(`(` + expression(res...).String() + `)`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// anchored anchors the regular expression by adding start and end delimiters.
|
||||||
|
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
return match(`^` + expression(res...).String() + `$`)
|
||||||
|
}
|
||||||
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
Normal file
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, and
|
||||||
|
distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||||
|
owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||||
|
that control, are controlled by, or are under common control with that entity.
|
||||||
|
For the purposes of this definition, "control" means (i) the power, direct or
|
||||||
|
indirect, to cause the direction or management of such entity, whether by
|
||||||
|
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||||
|
permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, including
|
||||||
|
but not limited to software source code, documentation source, and configuration
|
||||||
|
files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical transformation or
|
||||||
|
translation of a Source form, including but not limited to compiled object code,
|
||||||
|
generated documentation, and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||||
|
available under the License, as indicated by a copyright notice that is included
|
||||||
|
in or attached to the work (an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||||
|
is based on (or derived from) the Work and for which the editorial revisions,
|
||||||
|
annotations, elaborations, or other modifications represent, as a whole, an
|
||||||
|
original work of authorship. For the purposes of this License, Derivative Works
|
||||||
|
shall not include works that remain separable from, or merely link (or bind by
|
||||||
|
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including the original version
|
||||||
|
of the Work and any modifications or additions to that Work or Derivative Works
|
||||||
|
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||||
|
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||||
|
on behalf of the copyright owner. For the purposes of this definition,
|
||||||
|
"submitted" means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems, and
|
||||||
|
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||||
|
the purpose of discussing and improving the Work, but excluding communication
|
||||||
|
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||||
|
owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||||
|
of whom a Contribution has been received by Licensor and subsequently
|
||||||
|
incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||||
|
Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable (except as stated in this section) patent license to make, have
|
||||||
|
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||||
|
such license applies only to those patent claims licensable by such Contributor
|
||||||
|
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||||
|
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||||
|
submitted. If You institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||||
|
Contribution incorporated within the Work constitutes direct or contributory
|
||||||
|
patent infringement, then any patent licenses granted to You under this License
|
||||||
|
for that Work shall terminate as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution.
|
||||||
|
|
||||||
|
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||||
|
in any medium, with or without modifications, and in Source or Object form,
|
||||||
|
provided that You meet the following conditions:
|
||||||
|
|
||||||
|
You must give any other recipients of the Work or Derivative Works a copy of
|
||||||
|
this License; and
|
||||||
|
You must cause any modified files to carry prominent notices stating that You
|
||||||
|
changed the files; and
|
||||||
|
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||||
|
all copyright, patent, trademark, and attribution notices from the Source form
|
||||||
|
of the Work, excluding those notices that do not pertain to any part of the
|
||||||
|
Derivative Works; and
|
||||||
|
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||||
|
Derivative Works that You distribute must include a readable copy of the
|
||||||
|
attribution notices contained within such NOTICE file, excluding those notices
|
||||||
|
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||||
|
following places: within a NOTICE text file distributed as part of the
|
||||||
|
Derivative Works; within the Source form or documentation, if provided along
|
||||||
|
with the Derivative Works; or, within a display generated by the Derivative
|
||||||
|
Works, if and wherever such third-party notices normally appear. The contents of
|
||||||
|
the NOTICE file are for informational purposes only and do not modify the
|
||||||
|
License. You may add Your own attribution notices within Derivative Works that
|
||||||
|
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||||
|
provided that such additional attribution notices cannot be construed as
|
||||||
|
modifying the License.
|
||||||
|
You may add Your own copyright statement to Your modifications and may provide
|
||||||
|
additional or different license terms and conditions for use, reproduction, or
|
||||||
|
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||||
|
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||||
|
with the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions.
|
||||||
|
|
||||||
|
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||||
|
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||||
|
conditions of this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||||
|
any separate license agreement you may have executed with Licensor regarding
|
||||||
|
such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks.
|
||||||
|
|
||||||
|
This License does not grant permission to use the trade names, trademarks,
|
||||||
|
service marks, or product names of the Licensor, except as required for
|
||||||
|
reasonable and customary use in describing the origin of the Work and
|
||||||
|
reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||||
|
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||||
|
including, without limitation, any warranties or conditions of TITLE,
|
||||||
|
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||||
|
solely responsible for determining the appropriateness of using or
|
||||||
|
redistributing the Work and assume any risks associated with Your exercise of
|
||||||
|
permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability.
|
||||||
|
|
||||||
|
In no event and under no legal theory, whether in tort (including negligence),
|
||||||
|
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||||
|
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special, incidental,
|
||||||
|
or consequential damages of any character arising as a result of this License or
|
||||||
|
out of the use or inability to use the Work (including but not limited to
|
||||||
|
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||||
|
any and all other commercial damages or losses), even if such Contributor has
|
||||||
|
been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability.
|
||||||
|
|
||||||
|
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||||
|
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||||
|
other liability obligations and/or rights consistent with this License. However,
|
||||||
|
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||||
|
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||||
|
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason of your
|
||||||
|
accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following boilerplate
|
||||||
|
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||||
|
identifying information. (Don't include the brackets!) The text should be
|
||||||
|
enclosed in the appropriate comment syntax for the file format. We also
|
||||||
|
recommend that a file or class name and description of purpose be included on
|
||||||
|
the same "printed page" as the copyright notice for easier identification within
|
||||||
|
third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
44
vendor/github.com/golang/glog/README
generated
vendored
Normal file
44
vendor/github.com/golang/glog/README
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
glog
|
||||||
|
====
|
||||||
|
|
||||||
|
Leveled execution logs for Go.
|
||||||
|
|
||||||
|
This is an efficient pure Go implementation of leveled logs in the
|
||||||
|
manner of the open source C++ package
|
||||||
|
https://github.com/google/glog
|
||||||
|
|
||||||
|
By binding methods to booleans it is possible to use the log package
|
||||||
|
without paying the expense of evaluating the arguments to the log.
|
||||||
|
Through the -vmodule flag, the package also provides fine-grained
|
||||||
|
control over logging at the file level.
|
||||||
|
|
||||||
|
The comment from glog.go introduces the ideas:
|
||||||
|
|
||||||
|
Package glog implements logging analogous to the Google-internal
|
||||||
|
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
|
||||||
|
Error, Fatal, plus formatting variants such as Infof. It
|
||||||
|
also provides V-style logging controlled by the -v and
|
||||||
|
-vmodule=file=2 flags.
|
||||||
|
|
||||||
|
Basic examples:
|
||||||
|
|
||||||
|
glog.Info("Prepare to repel boarders")
|
||||||
|
|
||||||
|
glog.Fatalf("Initialization failed: %s", err)
|
||||||
|
|
||||||
|
See the documentation for the V function for an explanation
|
||||||
|
of these examples:
|
||||||
|
|
||||||
|
if glog.V(2) {
|
||||||
|
glog.Info("Starting transaction...")
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(2).Infoln("Processed", nItems, "elements")
|
||||||
|
|
||||||
|
|
||||||
|
The repository contains an open source version of the log package
|
||||||
|
used inside Google. The master copy of the source lives inside
|
||||||
|
Google, not here. The code in this repo is for export only and is not itself
|
||||||
|
under development. Feature requests will be ignored.
|
||||||
|
|
||||||
|
Send bug reports to golang-nuts@googlegroups.com.
|
||||||
1180
vendor/github.com/golang/glog/glog.go
generated
vendored
Normal file
1180
vendor/github.com/golang/glog/glog.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
124
vendor/github.com/golang/glog/glog_file.go
generated
vendored
Normal file
124
vendor/github.com/golang/glog/glog_file.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
|
||||||
|
//
|
||||||
|
// Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// File I/O for logs.
|
||||||
|
|
||||||
|
package glog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/user"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MaxSize is the maximum size of a log file in bytes.
|
||||||
|
var MaxSize uint64 = 1024 * 1024 * 1800
|
||||||
|
|
||||||
|
// logDirs lists the candidate directories for new log files.
|
||||||
|
var logDirs []string
|
||||||
|
|
||||||
|
// If non-empty, overrides the choice of directory in which to write logs.
|
||||||
|
// See createLogDirs for the full list of possible destinations.
|
||||||
|
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
|
||||||
|
|
||||||
|
func createLogDirs() {
|
||||||
|
if *logDir != "" {
|
||||||
|
logDirs = append(logDirs, *logDir)
|
||||||
|
}
|
||||||
|
logDirs = append(logDirs, os.TempDir())
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
pid = os.Getpid()
|
||||||
|
program = filepath.Base(os.Args[0])
|
||||||
|
host = "unknownhost"
|
||||||
|
userName = "unknownuser"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
h, err := os.Hostname()
|
||||||
|
if err == nil {
|
||||||
|
host = shortHostname(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
current, err := user.Current()
|
||||||
|
if err == nil {
|
||||||
|
userName = current.Username
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanitize userName since it may contain filepath separators on Windows.
|
||||||
|
userName = strings.Replace(userName, `\`, "_", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// shortHostname returns its argument, truncating at the first period.
|
||||||
|
// For instance, given "www.google.com" it returns "www".
|
||||||
|
func shortHostname(hostname string) string {
|
||||||
|
if i := strings.Index(hostname, "."); i >= 0 {
|
||||||
|
return hostname[:i]
|
||||||
|
}
|
||||||
|
return hostname
|
||||||
|
}
|
||||||
|
|
||||||
|
// logName returns a new log file name containing tag, with start time t, and
|
||||||
|
// the name for the symlink for tag.
|
||||||
|
func logName(tag string, t time.Time) (name, link string) {
|
||||||
|
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
|
||||||
|
program,
|
||||||
|
host,
|
||||||
|
userName,
|
||||||
|
tag,
|
||||||
|
t.Year(),
|
||||||
|
t.Month(),
|
||||||
|
t.Day(),
|
||||||
|
t.Hour(),
|
||||||
|
t.Minute(),
|
||||||
|
t.Second(),
|
||||||
|
pid)
|
||||||
|
return name, program + "." + tag
|
||||||
|
}
|
||||||
|
|
||||||
|
var onceLogDirs sync.Once
|
||||||
|
|
||||||
|
// create creates a new log file and returns the file and its filename, which
|
||||||
|
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
|
||||||
|
// successfully, create also attempts to update the symlink for that tag, ignoring
|
||||||
|
// errors.
|
||||||
|
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
|
||||||
|
onceLogDirs.Do(createLogDirs)
|
||||||
|
if len(logDirs) == 0 {
|
||||||
|
return nil, "", errors.New("log: no log dirs")
|
||||||
|
}
|
||||||
|
name, link := logName(tag, t)
|
||||||
|
var lastErr error
|
||||||
|
for _, dir := range logDirs {
|
||||||
|
fname := filepath.Join(dir, name)
|
||||||
|
f, err := os.Create(fname)
|
||||||
|
if err == nil {
|
||||||
|
symlink := filepath.Join(dir, link)
|
||||||
|
os.Remove(symlink) // ignore err
|
||||||
|
os.Symlink(name, symlink) // ignore err
|
||||||
|
return f, fname, nil
|
||||||
|
}
|
||||||
|
lastErr = err
|
||||||
|
}
|
||||||
|
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
|
||||||
|
}
|
||||||
18
vendor/github.com/julienschmidt/httprouter/.travis.yml
generated
vendored
18
vendor/github.com/julienschmidt/httprouter/.travis.yml
generated
vendored
@@ -1,18 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.7.x
|
|
||||||
- 1.8.x
|
|
||||||
- 1.9.x
|
|
||||||
- 1.10.x
|
|
||||||
- 1.11.x
|
|
||||||
- 1.12.x
|
|
||||||
- 1.13.x
|
|
||||||
- master
|
|
||||||
before_install:
|
|
||||||
- go get github.com/mattn/goveralls
|
|
||||||
script:
|
|
||||||
- go test -v -covermode=count -coverprofile=coverage.out
|
|
||||||
- go vet ./...
|
|
||||||
- test -z "$(gofmt -d -s . | tee /dev/stderr)"
|
|
||||||
- $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
|
|
||||||
29
vendor/github.com/julienschmidt/httprouter/LICENSE
generated
vendored
29
vendor/github.com/julienschmidt/httprouter/LICENSE
generated
vendored
@@ -1,29 +0,0 @@
|
|||||||
BSD 3-Clause License
|
|
||||||
|
|
||||||
Copyright (c) 2013, Julien Schmidt
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer in the documentation
|
|
||||||
and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
3. Neither the name of the copyright holder nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
||||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
||||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
||||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
||||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
||||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
300
vendor/github.com/julienschmidt/httprouter/README.md
generated
vendored
300
vendor/github.com/julienschmidt/httprouter/README.md
generated
vendored
@@ -1,300 +0,0 @@
|
|||||||
# HttpRouter [](https://travis-ci.org/julienschmidt/httprouter) [](https://coveralls.io/github/julienschmidt/httprouter?branch=master) [](http://godoc.org/github.com/julienschmidt/httprouter)
|
|
||||||
|
|
||||||
HttpRouter is a lightweight high performance HTTP request router (also called *multiplexer* or just *mux* for short) for [Go](https://golang.org/).
|
|
||||||
|
|
||||||
In contrast to the [default mux](https://golang.org/pkg/net/http/#ServeMux) of Go's `net/http` package, this router supports variables in the routing pattern and matches against the request method. It also scales better.
|
|
||||||
|
|
||||||
The router is optimized for high performance and a small memory footprint. It scales well even with very long paths and a large number of routes. A compressing dynamic trie (radix tree) structure is used for efficient matching.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
**Only explicit matches:** With other routers, like [`http.ServeMux`](https://golang.org/pkg/net/http/#ServeMux), a requested URL path could match multiple patterns. Therefore they have some awkward pattern priority rules, like *longest match* or *first registered, first matched*. By design of this router, a request can only match exactly one or no route. As a result, there are also no unintended matches, which makes it great for SEO and improves the user experience.
|
|
||||||
|
|
||||||
**Stop caring about trailing slashes:** Choose the URL style you like, the router automatically redirects the client if a trailing slash is missing or if there is one extra. Of course it only does so, if the new path has a handler. If you don't like it, you can [turn off this behavior](https://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash).
|
|
||||||
|
|
||||||
**Path auto-correction:** Besides detecting the missing or additional trailing slash at no extra cost, the router can also fix wrong cases and remove superfluous path elements (like `../` or `//`). Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users? HttpRouter can help him by making a case-insensitive look-up and redirecting him to the correct URL.
|
|
||||||
|
|
||||||
**Parameters in your routing pattern:** Stop parsing the requested URL path, just give the path segment a name and the router delivers the dynamic value to you. Because of the design of the router, path parameters are very cheap.
|
|
||||||
|
|
||||||
**Zero Garbage:** The matching and dispatching process generates zero bytes of garbage. The only heap allocations that are made are building the slice of the key-value pairs for path parameters, and building new context and request objects (the latter only in the standard `Handler`/`HandlerFunc` API). In the 3-argument API, if the request path contains no parameters not a single heap allocation is necessary.
|
|
||||||
|
|
||||||
**Best Performance:** [Benchmarks speak for themselves](https://github.com/julienschmidt/go-http-routing-benchmark). See below for technical details of the implementation.
|
|
||||||
|
|
||||||
**No more server crashes:** You can set a [Panic handler](https://godoc.org/github.com/julienschmidt/httprouter#Router.PanicHandler) to deal with panics occurring during handling a HTTP request. The router then recovers and lets the `PanicHandler` log what happened and deliver a nice error page.
|
|
||||||
|
|
||||||
**Perfect for APIs:** The router design encourages to build sensible, hierarchical RESTful APIs. Moreover it has built-in native support for [OPTIONS requests](http://zacstewart.com/2012/04/14/http-options-method.html) and `405 Method Not Allowed` replies.
|
|
||||||
|
|
||||||
Of course you can also set **custom [`NotFound`](https://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) and [`MethodNotAllowed`](https://godoc.org/github.com/julienschmidt/httprouter#Router.MethodNotAllowed) handlers** and [**serve static files**](https://godoc.org/github.com/julienschmidt/httprouter#Router.ServeFiles).
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/julienschmidt/httprouter) for details.
|
|
||||||
|
|
||||||
Let's start with a trivial example:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/julienschmidt/httprouter"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
|
||||||
fmt.Fprint(w, "Welcome!\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
|
|
||||||
fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
router := httprouter.New()
|
|
||||||
router.GET("/", Index)
|
|
||||||
router.GET("/hello/:name", Hello)
|
|
||||||
|
|
||||||
log.Fatal(http.ListenAndServe(":8080", router))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Named parameters
|
|
||||||
|
|
||||||
As you can see, `:name` is a *named parameter*. The values are accessible via `httprouter.Params`, which is just a slice of `httprouter.Param`s. You can get the value of a parameter either by its index in the slice, or by using the `ByName(name)` method: `:name` can be retrieved by `ByName("name")`.
|
|
||||||
|
|
||||||
When using a `http.Handler` (using `router.Handler` or `http.HandlerFunc`) instead of HttpRouter's handle API using a 3rd function parameter, the named parameters are stored in the `request.Context`. See more below under [Why doesn't this work with http.Handler?](#why-doesnt-this-work-with-httphandler).
|
|
||||||
|
|
||||||
Named parameters only match a single path segment:
|
|
||||||
|
|
||||||
```
|
|
||||||
Pattern: /user/:user
|
|
||||||
|
|
||||||
/user/gordon match
|
|
||||||
/user/you match
|
|
||||||
/user/gordon/profile no match
|
|
||||||
/user/ no match
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other.
|
|
||||||
|
|
||||||
### Catch-All parameters
|
|
||||||
|
|
||||||
The second type are *catch-all* parameters and have the form `*name`. Like the name suggests, they match everything. Therefore they must always be at the **end** of the pattern:
|
|
||||||
|
|
||||||
```
|
|
||||||
Pattern: /src/*filepath
|
|
||||||
|
|
||||||
/src/ match
|
|
||||||
/src/somefile.go match
|
|
||||||
/src/subdir/somefile.go match
|
|
||||||
```
|
|
||||||
|
|
||||||
## How does it work?
|
|
||||||
|
|
||||||
The router relies on a tree structure which makes heavy use of *common prefixes*, it is basically a *compact* [*prefix tree*](https://en.wikipedia.org/wiki/Trie) (or just [*Radix tree*](https://en.wikipedia.org/wiki/Radix_tree)). Nodes with a common prefix also share a common parent. Here is a short example what the routing tree for the `GET` request method could look like:
|
|
||||||
|
|
||||||
```
|
|
||||||
Priority Path Handle
|
|
||||||
9 \ *<1>
|
|
||||||
3 ├s nil
|
|
||||||
2 |├earch\ *<2>
|
|
||||||
1 |└upport\ *<3>
|
|
||||||
2 ├blog\ *<4>
|
|
||||||
1 | └:post nil
|
|
||||||
1 | └\ *<5>
|
|
||||||
2 ├about-us\ *<6>
|
|
||||||
1 | └team\ *<7>
|
|
||||||
1 └contact\ *<8>
|
|
||||||
```
|
|
||||||
|
|
||||||
Every `*<num>` represents the memory address of a handler function (a pointer). If you follow a path trough the tree from the root to the leaf, you get the complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder ([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a tree structure also allows us to use dynamic parts like the `:post` parameter, since we actually match against the routing patterns instead of just comparing hashes. [As benchmarks show](https://github.com/julienschmidt/go-http-routing-benchmark), this works very well and efficient.
|
|
||||||
|
|
||||||
Since URL paths have a hierarchical structure and make use only of a limited set of characters (byte values), it is very likely that there are a lot of common prefixes. This allows us to easily reduce the routing into ever smaller problems. Moreover the router manages a separate tree for every request method. For one thing it is more space efficient than holding a method->handle map in every single node, it also allows us to greatly reduce the routing problem before even starting the look-up in the prefix-tree.
|
|
||||||
|
|
||||||
For even better scalability, the child nodes on each tree level are ordered by priority, where the priority is just the number of handles registered in sub nodes (children, grandchildren, and so on..). This helps in two ways:
|
|
||||||
|
|
||||||
1. Nodes which are part of the most routing paths are evaluated first. This helps to make as much routes as possible to be reachable as fast as possible.
|
|
||||||
2. It is some sort of cost compensation. The longest reachable path (highest cost) can always be evaluated first. The following scheme visualizes the tree structure. Nodes are evaluated from top to bottom and from left to right.
|
|
||||||
|
|
||||||
```
|
|
||||||
├------------
|
|
||||||
├---------
|
|
||||||
├-----
|
|
||||||
├----
|
|
||||||
├--
|
|
||||||
├--
|
|
||||||
└-
|
|
||||||
```
|
|
||||||
|
|
||||||
## Why doesn't this work with `http.Handler`?
|
|
||||||
|
|
||||||
**It does!** The router itself implements the `http.Handler` interface. Moreover the router provides convenient [adapters for `http.Handler`](https://godoc.org/github.com/julienschmidt/httprouter#Router.Handler)s and [`http.HandlerFunc`](https://godoc.org/github.com/julienschmidt/httprouter#Router.HandlerFunc)s which allows them to be used as a [`httprouter.Handle`](https://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) when registering a route.
|
|
||||||
|
|
||||||
Named parameters can be accessed `request.Context`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func Hello(w http.ResponseWriter, r *http.Request) {
|
|
||||||
params := httprouter.ParamsFromContext(r.Context())
|
|
||||||
|
|
||||||
fmt.Fprintf(w, "hello, %s!\n", params.ByName("name"))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, one can also use `params := r.Context().Value(httprouter.ParamsKey)` instead of the helper function.
|
|
||||||
|
|
||||||
Just try it out for yourself, the usage of HttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up.
|
|
||||||
|
|
||||||
## Automatic OPTIONS responses and CORS
|
|
||||||
|
|
||||||
One might wish to modify automatic responses to OPTIONS requests, e.g. to support [CORS preflight requests](https://developer.mozilla.org/en-US/docs/Glossary/preflight_request) or to set other headers.
|
|
||||||
This can be achieved using the [`Router.GlobalOPTIONS`](https://godoc.org/github.com/julienschmidt/httprouter#Router.GlobalOPTIONS) handler:
|
|
||||||
|
|
||||||
```go
|
|
||||||
router.GlobalOPTIONS = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if r.Header.Get("Access-Control-Request-Method") != "" {
|
|
||||||
// Set CORS headers
|
|
||||||
header := w.Header()
|
|
||||||
header.Set("Access-Control-Allow-Methods", r.Header.Get("Allow"))
|
|
||||||
header.Set("Access-Control-Allow-Origin", "*")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adjust status code to 204
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
## Where can I find Middleware *X*?
|
|
||||||
|
|
||||||
This package just provides a very efficient request router with a few extra features. The router is just a [`http.Handler`](https://golang.org/pkg/net/http/#Handler), you can chain any http.Handler compatible middleware before the router, for example the [Gorilla handlers](http://www.gorillatoolkit.org/pkg/handlers). Or you could [just write your own](https://justinas.org/writing-http-middleware-in-go/), it's very easy!
|
|
||||||
|
|
||||||
Alternatively, you could try [a web framework based on HttpRouter](#web-frameworks-based-on-httprouter).
|
|
||||||
|
|
||||||
### Multi-domain / Sub-domains
|
|
||||||
|
|
||||||
Here is a quick example: Does your server serve multiple domains / hosts?
|
|
||||||
You want to use sub-domains?
|
|
||||||
Define a router per host!
|
|
||||||
|
|
||||||
```go
|
|
||||||
// We need an object that implements the http.Handler interface.
|
|
||||||
// Therefore we need a type for which we implement the ServeHTTP method.
|
|
||||||
// We just use a map here, in which we map host names (with port) to http.Handlers
|
|
||||||
type HostSwitch map[string]http.Handler
|
|
||||||
|
|
||||||
// Implement the ServeHTTP method on our new type
|
|
||||||
func (hs HostSwitch) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Check if a http.Handler is registered for the given host.
|
|
||||||
// If yes, use it to handle the request.
|
|
||||||
if handler := hs[r.Host]; handler != nil {
|
|
||||||
handler.ServeHTTP(w, r)
|
|
||||||
} else {
|
|
||||||
// Handle host names for which no handler is registered
|
|
||||||
http.Error(w, "Forbidden", 403) // Or Redirect?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Initialize a router as usual
|
|
||||||
router := httprouter.New()
|
|
||||||
router.GET("/", Index)
|
|
||||||
router.GET("/hello/:name", Hello)
|
|
||||||
|
|
||||||
// Make a new HostSwitch and insert the router (our http handler)
|
|
||||||
// for example.com and port 12345
|
|
||||||
hs := make(HostSwitch)
|
|
||||||
hs["example.com:12345"] = router
|
|
||||||
|
|
||||||
// Use the HostSwitch to listen and serve on port 12345
|
|
||||||
log.Fatal(http.ListenAndServe(":12345", hs))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Basic Authentication
|
|
||||||
|
|
||||||
Another quick example: Basic Authentication (RFC 2617) for handles:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/julienschmidt/httprouter"
|
|
||||||
)
|
|
||||||
|
|
||||||
func BasicAuth(h httprouter.Handle, requiredUser, requiredPassword string) httprouter.Handle {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
|
|
||||||
// Get the Basic Authentication credentials
|
|
||||||
user, password, hasAuth := r.BasicAuth()
|
|
||||||
|
|
||||||
if hasAuth && user == requiredUser && password == requiredPassword {
|
|
||||||
// Delegate request to the given handle
|
|
||||||
h(w, r, ps)
|
|
||||||
} else {
|
|
||||||
// Request Basic Authentication otherwise
|
|
||||||
w.Header().Set("WWW-Authenticate", "Basic realm=Restricted")
|
|
||||||
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
|
||||||
fmt.Fprint(w, "Not protected!\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func Protected(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
|
||||||
fmt.Fprint(w, "Protected!\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
user := "gordon"
|
|
||||||
pass := "secret!"
|
|
||||||
|
|
||||||
router := httprouter.New()
|
|
||||||
router.GET("/", Index)
|
|
||||||
router.GET("/protected/", BasicAuth(Protected, user, pass))
|
|
||||||
|
|
||||||
log.Fatal(http.ListenAndServe(":8080", router))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Chaining with the NotFound handler
|
|
||||||
|
|
||||||
**NOTE: It might be required to set [`Router.HandleMethodNotAllowed`](https://godoc.org/github.com/julienschmidt/httprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.**
|
|
||||||
|
|
||||||
You can use another [`http.Handler`](https://golang.org/pkg/net/http/#Handler), for example another router, to handle requests which could not be matched by this router by using the [`Router.NotFound`](https://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) handler. This allows chaining.
|
|
||||||
|
|
||||||
### Static files
|
|
||||||
|
|
||||||
The `NotFound` handler can for example be used to serve static files from the root path `/` (like an `index.html` file along with other assets):
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Serve static files from the ./public directory
|
|
||||||
router.NotFound = http.FileServer(http.Dir("public"))
|
|
||||||
```
|
|
||||||
|
|
||||||
But this approach sidesteps the strict core rules of this router to avoid routing problems. A cleaner approach is to use a distinct sub-path for serving files, like `/static/*filepath` or `/files/*filepath`.
|
|
||||||
|
|
||||||
## Web Frameworks based on HttpRouter
|
|
||||||
|
|
||||||
If the HttpRouter is a bit too minimalistic for you, you might try one of the following more high-level 3rd-party web frameworks building upon the HttpRouter package:
|
|
||||||
|
|
||||||
* [Ace](https://github.com/plimble/ace): Blazing fast Go Web Framework
|
|
||||||
* [api2go](https://github.com/manyminds/api2go): A JSON API Implementation for Go
|
|
||||||
* [Gin](https://github.com/gin-gonic/gin): Features a martini-like API with much better performance
|
|
||||||
* [Goat](https://github.com/bahlo/goat): A minimalistic REST API server in Go
|
|
||||||
* [goMiddlewareChain](https://github.com/TobiEiss/goMiddlewareChain): An express.js-like-middleware-chain
|
|
||||||
* [Hikaru](https://github.com/najeira/hikaru): Supports standalone and Google AppEngine
|
|
||||||
* [Hitch](https://github.com/nbio/hitch): Hitch ties httprouter, [httpcontext](https://github.com/nbio/httpcontext), and middleware up in a bow
|
|
||||||
* [httpway](https://github.com/corneldamian/httpway): Simple middleware extension with context for httprouter and a server with gracefully shutdown support
|
|
||||||
* [kami](https://github.com/guregu/kami): A tiny web framework using x/net/context
|
|
||||||
* [Medeina](https://github.com/imdario/medeina): Inspired by Ruby's Roda and Cuba
|
|
||||||
* [Neko](https://github.com/rocwong/neko): A lightweight web application framework for Golang
|
|
||||||
* [pbgo](https://github.com/chai2010/pbgo): pbgo is a mini RPC/REST framework based on Protobuf
|
|
||||||
* [River](https://github.com/abiosoft/river): River is a simple and lightweight REST server
|
|
||||||
* [siesta](https://github.com/VividCortex/siesta): Composable HTTP handlers with contexts
|
|
||||||
* [xmux](https://github.com/rs/xmux): xmux is a httprouter fork on top of xhandler (net/context aware)
|
|
||||||
3
vendor/github.com/julienschmidt/httprouter/go.mod
generated
vendored
3
vendor/github.com/julienschmidt/httprouter/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
|||||||
module github.com/julienschmidt/httprouter
|
|
||||||
|
|
||||||
go 1.7
|
|
||||||
123
vendor/github.com/julienschmidt/httprouter/path.go
generated
vendored
123
vendor/github.com/julienschmidt/httprouter/path.go
generated
vendored
@@ -1,123 +0,0 @@
|
|||||||
// Copyright 2013 Julien Schmidt. All rights reserved.
|
|
||||||
// Based on the path package, Copyright 2009 The Go Authors.
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be found
|
|
||||||
// in the LICENSE file.
|
|
||||||
|
|
||||||
package httprouter
|
|
||||||
|
|
||||||
// CleanPath is the URL version of path.Clean, it returns a canonical URL path
|
|
||||||
// for p, eliminating . and .. elements.
|
|
||||||
//
|
|
||||||
// The following rules are applied iteratively until no further processing can
|
|
||||||
// be done:
|
|
||||||
// 1. Replace multiple slashes with a single slash.
|
|
||||||
// 2. Eliminate each . path name element (the current directory).
|
|
||||||
// 3. Eliminate each inner .. path name element (the parent directory)
|
|
||||||
// along with the non-.. element that precedes it.
|
|
||||||
// 4. Eliminate .. elements that begin a rooted path:
|
|
||||||
// that is, replace "/.." by "/" at the beginning of a path.
|
|
||||||
//
|
|
||||||
// If the result of this process is an empty string, "/" is returned
|
|
||||||
func CleanPath(p string) string {
|
|
||||||
// Turn empty string into "/"
|
|
||||||
if p == "" {
|
|
||||||
return "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
n := len(p)
|
|
||||||
var buf []byte
|
|
||||||
|
|
||||||
// Invariants:
|
|
||||||
// reading from path; r is index of next byte to process.
|
|
||||||
// writing to buf; w is index of next byte to write.
|
|
||||||
|
|
||||||
// path must start with '/'
|
|
||||||
r := 1
|
|
||||||
w := 1
|
|
||||||
|
|
||||||
if p[0] != '/' {
|
|
||||||
r = 0
|
|
||||||
buf = make([]byte, n+1)
|
|
||||||
buf[0] = '/'
|
|
||||||
}
|
|
||||||
|
|
||||||
trailing := n > 1 && p[n-1] == '/'
|
|
||||||
|
|
||||||
// A bit more clunky without a 'lazybuf' like the path package, but the loop
|
|
||||||
// gets completely inlined (bufApp). So in contrast to the path package this
|
|
||||||
// loop has no expensive function calls (except 1x make)
|
|
||||||
|
|
||||||
for r < n {
|
|
||||||
switch {
|
|
||||||
case p[r] == '/':
|
|
||||||
// empty path element, trailing slash is added after the end
|
|
||||||
r++
|
|
||||||
|
|
||||||
case p[r] == '.' && r+1 == n:
|
|
||||||
trailing = true
|
|
||||||
r++
|
|
||||||
|
|
||||||
case p[r] == '.' && p[r+1] == '/':
|
|
||||||
// . element
|
|
||||||
r += 2
|
|
||||||
|
|
||||||
case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'):
|
|
||||||
// .. element: remove to last /
|
|
||||||
r += 3
|
|
||||||
|
|
||||||
if w > 1 {
|
|
||||||
// can backtrack
|
|
||||||
w--
|
|
||||||
|
|
||||||
if buf == nil {
|
|
||||||
for w > 1 && p[w] != '/' {
|
|
||||||
w--
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for w > 1 && buf[w] != '/' {
|
|
||||||
w--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
// real path element.
|
|
||||||
// add slash if needed
|
|
||||||
if w > 1 {
|
|
||||||
bufApp(&buf, p, w, '/')
|
|
||||||
w++
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy element
|
|
||||||
for r < n && p[r] != '/' {
|
|
||||||
bufApp(&buf, p, w, p[r])
|
|
||||||
w++
|
|
||||||
r++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// re-append trailing slash
|
|
||||||
if trailing && w > 1 {
|
|
||||||
bufApp(&buf, p, w, '/')
|
|
||||||
w++
|
|
||||||
}
|
|
||||||
|
|
||||||
if buf == nil {
|
|
||||||
return p[:w]
|
|
||||||
}
|
|
||||||
return string(buf[:w])
|
|
||||||
}
|
|
||||||
|
|
||||||
// internal helper to lazily create a buffer if necessary
|
|
||||||
func bufApp(buf *[]byte, s string, w int, c byte) {
|
|
||||||
if *buf == nil {
|
|
||||||
if s[w] == c {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
*buf = make([]byte, len(s))
|
|
||||||
copy(*buf, s[:w])
|
|
||||||
}
|
|
||||||
(*buf)[w] = c
|
|
||||||
}
|
|
||||||
452
vendor/github.com/julienschmidt/httprouter/router.go
generated
vendored
452
vendor/github.com/julienschmidt/httprouter/router.go
generated
vendored
@@ -1,452 +0,0 @@
|
|||||||
// Copyright 2013 Julien Schmidt. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be found
|
|
||||||
// in the LICENSE file.
|
|
||||||
|
|
||||||
// Package httprouter is a trie based high performance HTTP request router.
|
|
||||||
//
|
|
||||||
// A trivial example is:
|
|
||||||
//
|
|
||||||
// package main
|
|
||||||
//
|
|
||||||
// import (
|
|
||||||
// "fmt"
|
|
||||||
// "github.com/julienschmidt/httprouter"
|
|
||||||
// "net/http"
|
|
||||||
// "log"
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
|
||||||
// fmt.Fprint(w, "Welcome!\n")
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
|
|
||||||
// fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name"))
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// func main() {
|
|
||||||
// router := httprouter.New()
|
|
||||||
// router.GET("/", Index)
|
|
||||||
// router.GET("/hello/:name", Hello)
|
|
||||||
//
|
|
||||||
// log.Fatal(http.ListenAndServe(":8080", router))
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// The router matches incoming requests by the request method and the path.
|
|
||||||
// If a handle is registered for this path and method, the router delegates the
|
|
||||||
// request to that function.
|
|
||||||
// For the methods GET, POST, PUT, PATCH and DELETE shortcut functions exist to
|
|
||||||
// register handles, for all other methods router.Handle can be used.
|
|
||||||
//
|
|
||||||
// The registered path, against which the router matches incoming requests, can
|
|
||||||
// contain two types of parameters:
|
|
||||||
// Syntax Type
|
|
||||||
// :name named parameter
|
|
||||||
// *name catch-all parameter
|
|
||||||
//
|
|
||||||
// Named parameters are dynamic path segments. They match anything until the
|
|
||||||
// next '/' or the path end:
|
|
||||||
// Path: /blog/:category/:post
|
|
||||||
//
|
|
||||||
// Requests:
|
|
||||||
// /blog/go/request-routers match: category="go", post="request-routers"
|
|
||||||
// /blog/go/request-routers/ no match, but the router would redirect
|
|
||||||
// /blog/go/ no match
|
|
||||||
// /blog/go/request-routers/comments no match
|
|
||||||
//
|
|
||||||
// Catch-all parameters match anything until the path end, including the
|
|
||||||
// directory index (the '/' before the catch-all). Since they match anything
|
|
||||||
// until the end, catch-all parameters must always be the final path element.
|
|
||||||
// Path: /files/*filepath
|
|
||||||
//
|
|
||||||
// Requests:
|
|
||||||
// /files/ match: filepath="/"
|
|
||||||
// /files/LICENSE match: filepath="/LICENSE"
|
|
||||||
// /files/templates/article.html match: filepath="/templates/article.html"
|
|
||||||
// /files no match, but the router would redirect
|
|
||||||
//
|
|
||||||
// The value of parameters is saved as a slice of the Param struct, consisting
|
|
||||||
// each of a key and a value. The slice is passed to the Handle func as a third
|
|
||||||
// parameter.
|
|
||||||
// There are two ways to retrieve the value of a parameter:
|
|
||||||
// // by the name of the parameter
|
|
||||||
// user := ps.ByName("user") // defined by :user or *user
|
|
||||||
//
|
|
||||||
// // by the index of the parameter. This way you can also get the name (key)
|
|
||||||
// thirdKey := ps[2].Key // the name of the 3rd parameter
|
|
||||||
// thirdValue := ps[2].Value // the value of the 3rd parameter
|
|
||||||
package httprouter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Handle is a function that can be registered to a route to handle HTTP
|
|
||||||
// requests. Like http.HandlerFunc, but has a third parameter for the values of
|
|
||||||
// wildcards (variables).
|
|
||||||
type Handle func(http.ResponseWriter, *http.Request, Params)
|
|
||||||
|
|
||||||
// Param is a single URL parameter, consisting of a key and a value.
|
|
||||||
type Param struct {
|
|
||||||
Key string
|
|
||||||
Value string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Params is a Param-slice, as returned by the router.
|
|
||||||
// The slice is ordered, the first URL parameter is also the first slice value.
|
|
||||||
// It is therefore safe to read values by the index.
|
|
||||||
type Params []Param
|
|
||||||
|
|
||||||
// ByName returns the value of the first Param which key matches the given name.
|
|
||||||
// If no matching Param is found, an empty string is returned.
|
|
||||||
func (ps Params) ByName(name string) string {
|
|
||||||
for i := range ps {
|
|
||||||
if ps[i].Key == name {
|
|
||||||
return ps[i].Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type paramsKey struct{}
|
|
||||||
|
|
||||||
// ParamsKey is the request context key under which URL params are stored.
|
|
||||||
var ParamsKey = paramsKey{}
|
|
||||||
|
|
||||||
// ParamsFromContext pulls the URL parameters from a request context,
|
|
||||||
// or returns nil if none are present.
|
|
||||||
func ParamsFromContext(ctx context.Context) Params {
|
|
||||||
p, _ := ctx.Value(ParamsKey).(Params)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Router is a http.Handler which can be used to dispatch requests to different
|
|
||||||
// handler functions via configurable routes
|
|
||||||
type Router struct {
|
|
||||||
trees map[string]*node
|
|
||||||
|
|
||||||
// Enables automatic redirection if the current route can't be matched but a
|
|
||||||
// handler for the path with (without) the trailing slash exists.
|
|
||||||
// For example if /foo/ is requested but a route only exists for /foo, the
|
|
||||||
// client is redirected to /foo with http status code 301 for GET requests
|
|
||||||
// and 307 for all other request methods.
|
|
||||||
RedirectTrailingSlash bool
|
|
||||||
|
|
||||||
// If enabled, the router tries to fix the current request path, if no
|
|
||||||
// handle is registered for it.
|
|
||||||
// First superfluous path elements like ../ or // are removed.
|
|
||||||
// Afterwards the router does a case-insensitive lookup of the cleaned path.
|
|
||||||
// If a handle can be found for this route, the router makes a redirection
|
|
||||||
// to the corrected path with status code 301 for GET requests and 307 for
|
|
||||||
// all other request methods.
|
|
||||||
// For example /FOO and /..//Foo could be redirected to /foo.
|
|
||||||
// RedirectTrailingSlash is independent of this option.
|
|
||||||
RedirectFixedPath bool
|
|
||||||
|
|
||||||
// If enabled, the router checks if another method is allowed for the
|
|
||||||
// current route, if the current request can not be routed.
|
|
||||||
// If this is the case, the request is answered with 'Method Not Allowed'
|
|
||||||
// and HTTP status code 405.
|
|
||||||
// If no other Method is allowed, the request is delegated to the NotFound
|
|
||||||
// handler.
|
|
||||||
HandleMethodNotAllowed bool
|
|
||||||
|
|
||||||
// If enabled, the router automatically replies to OPTIONS requests.
|
|
||||||
// Custom OPTIONS handlers take priority over automatic replies.
|
|
||||||
HandleOPTIONS bool
|
|
||||||
|
|
||||||
// An optional http.Handler that is called on automatic OPTIONS requests.
|
|
||||||
// The handler is only called if HandleOPTIONS is true and no OPTIONS
|
|
||||||
// handler for the specific path was set.
|
|
||||||
// The "Allowed" header is set before calling the handler.
|
|
||||||
GlobalOPTIONS http.Handler
|
|
||||||
|
|
||||||
// Cached value of global (*) allowed methods
|
|
||||||
globalAllowed string
|
|
||||||
|
|
||||||
// Configurable http.Handler which is called when no matching route is
|
|
||||||
// found. If it is not set, http.NotFound is used.
|
|
||||||
NotFound http.Handler
|
|
||||||
|
|
||||||
// Configurable http.Handler which is called when a request
|
|
||||||
// cannot be routed and HandleMethodNotAllowed is true.
|
|
||||||
// If it is not set, http.Error with http.StatusMethodNotAllowed is used.
|
|
||||||
// The "Allow" header with allowed request methods is set before the handler
|
|
||||||
// is called.
|
|
||||||
MethodNotAllowed http.Handler
|
|
||||||
|
|
||||||
// Function to handle panics recovered from http handlers.
|
|
||||||
// It should be used to generate a error page and return the http error code
|
|
||||||
// 500 (Internal Server Error).
|
|
||||||
// The handler can be used to keep your server from crashing because of
|
|
||||||
// unrecovered panics.
|
|
||||||
PanicHandler func(http.ResponseWriter, *http.Request, interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure the Router conforms with the http.Handler interface
|
|
||||||
var _ http.Handler = New()
|
|
||||||
|
|
||||||
// New returns a new initialized Router.
|
|
||||||
// Path auto-correction, including trailing slashes, is enabled by default.
|
|
||||||
func New() *Router {
|
|
||||||
return &Router{
|
|
||||||
RedirectTrailingSlash: true,
|
|
||||||
RedirectFixedPath: true,
|
|
||||||
HandleMethodNotAllowed: true,
|
|
||||||
HandleOPTIONS: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GET is a shortcut for router.Handle(http.MethodGet, path, handle)
|
|
||||||
func (r *Router) GET(path string, handle Handle) {
|
|
||||||
r.Handle(http.MethodGet, path, handle)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HEAD is a shortcut for router.Handle(http.MethodHead, path, handle)
|
|
||||||
func (r *Router) HEAD(path string, handle Handle) {
|
|
||||||
r.Handle(http.MethodHead, path, handle)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OPTIONS is a shortcut for router.Handle(http.MethodOptions, path, handle)
|
|
||||||
func (r *Router) OPTIONS(path string, handle Handle) {
|
|
||||||
r.Handle(http.MethodOptions, path, handle)
|
|
||||||
}
|
|
||||||
|
|
||||||
// POST is a shortcut for router.Handle(http.MethodPost, path, handle)
|
|
||||||
func (r *Router) POST(path string, handle Handle) {
|
|
||||||
r.Handle(http.MethodPost, path, handle)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PUT is a shortcut for router.Handle(http.MethodPut, path, handle)
|
|
||||||
func (r *Router) PUT(path string, handle Handle) {
|
|
||||||
r.Handle(http.MethodPut, path, handle)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PATCH is a shortcut for router.Handle(http.MethodPatch, path, handle)
|
|
||||||
func (r *Router) PATCH(path string, handle Handle) {
|
|
||||||
r.Handle(http.MethodPatch, path, handle)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DELETE is a shortcut for router.Handle(http.MethodDelete, path, handle)
|
|
||||||
func (r *Router) DELETE(path string, handle Handle) {
|
|
||||||
r.Handle(http.MethodDelete, path, handle)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle registers a new request handle with the given path and method.
|
|
||||||
//
|
|
||||||
// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut
|
|
||||||
// functions can be used.
|
|
||||||
//
|
|
||||||
// This function is intended for bulk loading and to allow the usage of less
|
|
||||||
// frequently used, non-standardized or custom methods (e.g. for internal
|
|
||||||
// communication with a proxy).
|
|
||||||
func (r *Router) Handle(method, path string, handle Handle) {
|
|
||||||
if len(path) < 1 || path[0] != '/' {
|
|
||||||
panic("path must begin with '/' in path '" + path + "'")
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.trees == nil {
|
|
||||||
r.trees = make(map[string]*node)
|
|
||||||
}
|
|
||||||
|
|
||||||
root := r.trees[method]
|
|
||||||
if root == nil {
|
|
||||||
root = new(node)
|
|
||||||
r.trees[method] = root
|
|
||||||
|
|
||||||
r.globalAllowed = r.allowed("*", "")
|
|
||||||
}
|
|
||||||
|
|
||||||
root.addRoute(path, handle)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler is an adapter which allows the usage of an http.Handler as a
|
|
||||||
// request handle.
|
|
||||||
// The Params are available in the request context under ParamsKey.
|
|
||||||
func (r *Router) Handler(method, path string, handler http.Handler) {
|
|
||||||
r.Handle(method, path,
|
|
||||||
func(w http.ResponseWriter, req *http.Request, p Params) {
|
|
||||||
if len(p) > 0 {
|
|
||||||
ctx := req.Context()
|
|
||||||
ctx = context.WithValue(ctx, ParamsKey, p)
|
|
||||||
req = req.WithContext(ctx)
|
|
||||||
}
|
|
||||||
handler.ServeHTTP(w, req)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a
|
|
||||||
// request handle.
|
|
||||||
func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) {
|
|
||||||
r.Handler(method, path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeFiles serves files from the given file system root.
|
|
||||||
// The path must end with "/*filepath", files are then served from the local
|
|
||||||
// path /defined/root/dir/*filepath.
|
|
||||||
// For example if root is "/etc" and *filepath is "passwd", the local file
|
|
||||||
// "/etc/passwd" would be served.
|
|
||||||
// Internally a http.FileServer is used, therefore http.NotFound is used instead
|
|
||||||
// of the Router's NotFound handler.
|
|
||||||
// To use the operating system's file system implementation,
|
|
||||||
// use http.Dir:
|
|
||||||
// router.ServeFiles("/src/*filepath", http.Dir("/var/www"))
|
|
||||||
func (r *Router) ServeFiles(path string, root http.FileSystem) {
|
|
||||||
if len(path) < 10 || path[len(path)-10:] != "/*filepath" {
|
|
||||||
panic("path must end with /*filepath in path '" + path + "'")
|
|
||||||
}
|
|
||||||
|
|
||||||
fileServer := http.FileServer(root)
|
|
||||||
|
|
||||||
r.GET(path, func(w http.ResponseWriter, req *http.Request, ps Params) {
|
|
||||||
req.URL.Path = ps.ByName("filepath")
|
|
||||||
fileServer.ServeHTTP(w, req)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Router) recv(w http.ResponseWriter, req *http.Request) {
|
|
||||||
if rcv := recover(); rcv != nil {
|
|
||||||
r.PanicHandler(w, req, rcv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lookup allows the manual lookup of a method + path combo.
|
|
||||||
// This is e.g. useful to build a framework around this router.
|
|
||||||
// If the path was found, it returns the handle function and the path parameter
|
|
||||||
// values. Otherwise the third return value indicates whether a redirection to
|
|
||||||
// the same path with an extra / without the trailing slash should be performed.
|
|
||||||
func (r *Router) Lookup(method, path string) (Handle, Params, bool) {
|
|
||||||
if root := r.trees[method]; root != nil {
|
|
||||||
return root.getValue(path)
|
|
||||||
}
|
|
||||||
return nil, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Router) allowed(path, reqMethod string) (allow string) {
|
|
||||||
allowed := make([]string, 0, 9)
|
|
||||||
|
|
||||||
if path == "*" { // server-wide
|
|
||||||
// empty method is used for internal calls to refresh the cache
|
|
||||||
if reqMethod == "" {
|
|
||||||
for method := range r.trees {
|
|
||||||
if method == http.MethodOptions {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Add request method to list of allowed methods
|
|
||||||
allowed = append(allowed, method)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return r.globalAllowed
|
|
||||||
}
|
|
||||||
} else { // specific path
|
|
||||||
for method := range r.trees {
|
|
||||||
// Skip the requested method - we already tried this one
|
|
||||||
if method == reqMethod || method == http.MethodOptions {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
handle, _, _ := r.trees[method].getValue(path)
|
|
||||||
if handle != nil {
|
|
||||||
// Add request method to list of allowed methods
|
|
||||||
allowed = append(allowed, method)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(allowed) > 0 {
|
|
||||||
// Add request method to list of allowed methods
|
|
||||||
allowed = append(allowed, http.MethodOptions)
|
|
||||||
|
|
||||||
// Sort allowed methods.
|
|
||||||
// sort.Strings(allowed) unfortunately causes unnecessary allocations
|
|
||||||
// due to allowed being moved to the heap and interface conversion
|
|
||||||
for i, l := 1, len(allowed); i < l; i++ {
|
|
||||||
for j := i; j > 0 && allowed[j] < allowed[j-1]; j-- {
|
|
||||||
allowed[j], allowed[j-1] = allowed[j-1], allowed[j]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// return as comma separated list
|
|
||||||
return strings.Join(allowed, ", ")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeHTTP makes the router implement the http.Handler interface.
|
|
||||||
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
|
||||||
if r.PanicHandler != nil {
|
|
||||||
defer r.recv(w, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
path := req.URL.Path
|
|
||||||
|
|
||||||
if root := r.trees[req.Method]; root != nil {
|
|
||||||
if handle, ps, tsr := root.getValue(path); handle != nil {
|
|
||||||
handle(w, req, ps)
|
|
||||||
return
|
|
||||||
} else if req.Method != http.MethodConnect && path != "/" {
|
|
||||||
code := 301 // Permanent redirect, request with GET method
|
|
||||||
if req.Method != http.MethodGet {
|
|
||||||
// Temporary redirect, request with same method
|
|
||||||
// As of Go 1.3, Go does not support status code 308.
|
|
||||||
code = 307
|
|
||||||
}
|
|
||||||
|
|
||||||
if tsr && r.RedirectTrailingSlash {
|
|
||||||
if len(path) > 1 && path[len(path)-1] == '/' {
|
|
||||||
req.URL.Path = path[:len(path)-1]
|
|
||||||
} else {
|
|
||||||
req.URL.Path = path + "/"
|
|
||||||
}
|
|
||||||
http.Redirect(w, req, req.URL.String(), code)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to fix the request path
|
|
||||||
if r.RedirectFixedPath {
|
|
||||||
fixedPath, found := root.findCaseInsensitivePath(
|
|
||||||
CleanPath(path),
|
|
||||||
r.RedirectTrailingSlash,
|
|
||||||
)
|
|
||||||
if found {
|
|
||||||
req.URL.Path = string(fixedPath)
|
|
||||||
http.Redirect(w, req, req.URL.String(), code)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Method == http.MethodOptions && r.HandleOPTIONS {
|
|
||||||
// Handle OPTIONS requests
|
|
||||||
if allow := r.allowed(path, http.MethodOptions); allow != "" {
|
|
||||||
w.Header().Set("Allow", allow)
|
|
||||||
if r.GlobalOPTIONS != nil {
|
|
||||||
r.GlobalOPTIONS.ServeHTTP(w, req)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else if r.HandleMethodNotAllowed { // Handle 405
|
|
||||||
if allow := r.allowed(path, req.Method); allow != "" {
|
|
||||||
w.Header().Set("Allow", allow)
|
|
||||||
if r.MethodNotAllowed != nil {
|
|
||||||
r.MethodNotAllowed.ServeHTTP(w, req)
|
|
||||||
} else {
|
|
||||||
http.Error(w,
|
|
||||||
http.StatusText(http.StatusMethodNotAllowed),
|
|
||||||
http.StatusMethodNotAllowed,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle 404
|
|
||||||
if r.NotFound != nil {
|
|
||||||
r.NotFound.ServeHTTP(w, req)
|
|
||||||
} else {
|
|
||||||
http.NotFound(w, req)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
666
vendor/github.com/julienschmidt/httprouter/tree.go
generated
vendored
666
vendor/github.com/julienschmidt/httprouter/tree.go
generated
vendored
@@ -1,666 +0,0 @@
|
|||||||
// Copyright 2013 Julien Schmidt. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be found
|
|
||||||
// in the LICENSE file.
|
|
||||||
|
|
||||||
package httprouter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
func min(a, b int) int {
|
|
||||||
if a <= b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxParamCount uint8 = ^uint8(0)
|
|
||||||
|
|
||||||
func countParams(path string) uint8 {
|
|
||||||
var n uint
|
|
||||||
for i := 0; i < len(path); i++ {
|
|
||||||
if path[i] != ':' && path[i] != '*' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
if n >= uint(maxParamCount) {
|
|
||||||
return maxParamCount
|
|
||||||
}
|
|
||||||
|
|
||||||
return uint8(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
type nodeType uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
static nodeType = iota // default
|
|
||||||
root
|
|
||||||
param
|
|
||||||
catchAll
|
|
||||||
)
|
|
||||||
|
|
||||||
type node struct {
|
|
||||||
path string
|
|
||||||
wildChild bool
|
|
||||||
nType nodeType
|
|
||||||
maxParams uint8
|
|
||||||
priority uint32
|
|
||||||
indices string
|
|
||||||
children []*node
|
|
||||||
handle Handle
|
|
||||||
}
|
|
||||||
|
|
||||||
// increments priority of the given child and reorders if necessary
|
|
||||||
func (n *node) incrementChildPrio(pos int) int {
|
|
||||||
n.children[pos].priority++
|
|
||||||
prio := n.children[pos].priority
|
|
||||||
|
|
||||||
// adjust position (move to front)
|
|
||||||
newPos := pos
|
|
||||||
for newPos > 0 && n.children[newPos-1].priority < prio {
|
|
||||||
// swap node positions
|
|
||||||
n.children[newPos-1], n.children[newPos] = n.children[newPos], n.children[newPos-1]
|
|
||||||
|
|
||||||
newPos--
|
|
||||||
}
|
|
||||||
|
|
||||||
// build new index char string
|
|
||||||
if newPos != pos {
|
|
||||||
n.indices = n.indices[:newPos] + // unchanged prefix, might be empty
|
|
||||||
n.indices[pos:pos+1] + // the index char we move
|
|
||||||
n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos'
|
|
||||||
}
|
|
||||||
|
|
||||||
return newPos
|
|
||||||
}
|
|
||||||
|
|
||||||
// addRoute adds a node with the given handle to the path.
|
|
||||||
// Not concurrency-safe!
|
|
||||||
func (n *node) addRoute(path string, handle Handle) {
|
|
||||||
fullPath := path
|
|
||||||
n.priority++
|
|
||||||
numParams := countParams(path)
|
|
||||||
|
|
||||||
// non-empty tree
|
|
||||||
if len(n.path) > 0 || len(n.children) > 0 {
|
|
||||||
walk:
|
|
||||||
for {
|
|
||||||
// Update maxParams of the current node
|
|
||||||
if numParams > n.maxParams {
|
|
||||||
n.maxParams = numParams
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the longest common prefix.
|
|
||||||
// This also implies that the common prefix contains no ':' or '*'
|
|
||||||
// since the existing key can't contain those chars.
|
|
||||||
i := 0
|
|
||||||
max := min(len(path), len(n.path))
|
|
||||||
for i < max && path[i] == n.path[i] {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split edge
|
|
||||||
if i < len(n.path) {
|
|
||||||
child := node{
|
|
||||||
path: n.path[i:],
|
|
||||||
wildChild: n.wildChild,
|
|
||||||
nType: static,
|
|
||||||
indices: n.indices,
|
|
||||||
children: n.children,
|
|
||||||
handle: n.handle,
|
|
||||||
priority: n.priority - 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update maxParams (max of all children)
|
|
||||||
for i := range child.children {
|
|
||||||
if child.children[i].maxParams > child.maxParams {
|
|
||||||
child.maxParams = child.children[i].maxParams
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n.children = []*node{&child}
|
|
||||||
// []byte for proper unicode char conversion, see #65
|
|
||||||
n.indices = string([]byte{n.path[i]})
|
|
||||||
n.path = path[:i]
|
|
||||||
n.handle = nil
|
|
||||||
n.wildChild = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make new node a child of this node
|
|
||||||
if i < len(path) {
|
|
||||||
path = path[i:]
|
|
||||||
|
|
||||||
if n.wildChild {
|
|
||||||
n = n.children[0]
|
|
||||||
n.priority++
|
|
||||||
|
|
||||||
// Update maxParams of the child node
|
|
||||||
if numParams > n.maxParams {
|
|
||||||
n.maxParams = numParams
|
|
||||||
}
|
|
||||||
numParams--
|
|
||||||
|
|
||||||
// Check if the wildcard matches
|
|
||||||
if len(path) >= len(n.path) && n.path == path[:len(n.path)] &&
|
|
||||||
// Adding a child to a catchAll is not possible
|
|
||||||
n.nType != catchAll &&
|
|
||||||
// Check for longer wildcard, e.g. :name and :names
|
|
||||||
(len(n.path) >= len(path) || path[len(n.path)] == '/') {
|
|
||||||
continue walk
|
|
||||||
} else {
|
|
||||||
// Wildcard conflict
|
|
||||||
var pathSeg string
|
|
||||||
if n.nType == catchAll {
|
|
||||||
pathSeg = path
|
|
||||||
} else {
|
|
||||||
pathSeg = strings.SplitN(path, "/", 2)[0]
|
|
||||||
}
|
|
||||||
prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path
|
|
||||||
panic("'" + pathSeg +
|
|
||||||
"' in new path '" + fullPath +
|
|
||||||
"' conflicts with existing wildcard '" + n.path +
|
|
||||||
"' in existing prefix '" + prefix +
|
|
||||||
"'")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c := path[0]
|
|
||||||
|
|
||||||
// slash after param
|
|
||||||
if n.nType == param && c == '/' && len(n.children) == 1 {
|
|
||||||
n = n.children[0]
|
|
||||||
n.priority++
|
|
||||||
continue walk
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if a child with the next path byte exists
|
|
||||||
for i := 0; i < len(n.indices); i++ {
|
|
||||||
if c == n.indices[i] {
|
|
||||||
i = n.incrementChildPrio(i)
|
|
||||||
n = n.children[i]
|
|
||||||
continue walk
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise insert it
|
|
||||||
if c != ':' && c != '*' {
|
|
||||||
// []byte for proper unicode char conversion, see #65
|
|
||||||
n.indices += string([]byte{c})
|
|
||||||
child := &node{
|
|
||||||
maxParams: numParams,
|
|
||||||
}
|
|
||||||
n.children = append(n.children, child)
|
|
||||||
n.incrementChildPrio(len(n.indices) - 1)
|
|
||||||
n = child
|
|
||||||
}
|
|
||||||
n.insertChild(numParams, path, fullPath, handle)
|
|
||||||
return
|
|
||||||
|
|
||||||
} else if i == len(path) { // Make node a (in-path) leaf
|
|
||||||
if n.handle != nil {
|
|
||||||
panic("a handle is already registered for path '" + fullPath + "'")
|
|
||||||
}
|
|
||||||
n.handle = handle
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else { // Empty tree
|
|
||||||
n.insertChild(numParams, path, fullPath, handle)
|
|
||||||
n.nType = root
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) insertChild(numParams uint8, path, fullPath string, handle Handle) {
|
|
||||||
var offset int // already handled bytes of the path
|
|
||||||
|
|
||||||
// find prefix until first wildcard (beginning with ':'' or '*'')
|
|
||||||
for i, max := 0, len(path); numParams > 0; i++ {
|
|
||||||
c := path[i]
|
|
||||||
if c != ':' && c != '*' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// find wildcard end (either '/' or path end)
|
|
||||||
end := i + 1
|
|
||||||
for end < max && path[end] != '/' {
|
|
||||||
switch path[end] {
|
|
||||||
// the wildcard name must not contain ':' and '*'
|
|
||||||
case ':', '*':
|
|
||||||
panic("only one wildcard per path segment is allowed, has: '" +
|
|
||||||
path[i:] + "' in path '" + fullPath + "'")
|
|
||||||
default:
|
|
||||||
end++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if this Node existing children which would be
|
|
||||||
// unreachable if we insert the wildcard here
|
|
||||||
if len(n.children) > 0 {
|
|
||||||
panic("wildcard route '" + path[i:end] +
|
|
||||||
"' conflicts with existing children in path '" + fullPath + "'")
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if the wildcard has a name
|
|
||||||
if end-i < 2 {
|
|
||||||
panic("wildcards must be named with a non-empty name in path '" + fullPath + "'")
|
|
||||||
}
|
|
||||||
|
|
||||||
if c == ':' { // param
|
|
||||||
// split path at the beginning of the wildcard
|
|
||||||
if i > 0 {
|
|
||||||
n.path = path[offset:i]
|
|
||||||
offset = i
|
|
||||||
}
|
|
||||||
|
|
||||||
child := &node{
|
|
||||||
nType: param,
|
|
||||||
maxParams: numParams,
|
|
||||||
}
|
|
||||||
n.children = []*node{child}
|
|
||||||
n.wildChild = true
|
|
||||||
n = child
|
|
||||||
n.priority++
|
|
||||||
numParams--
|
|
||||||
|
|
||||||
// if the path doesn't end with the wildcard, then there
|
|
||||||
// will be another non-wildcard subpath starting with '/'
|
|
||||||
if end < max {
|
|
||||||
n.path = path[offset:end]
|
|
||||||
offset = end
|
|
||||||
|
|
||||||
child := &node{
|
|
||||||
maxParams: numParams,
|
|
||||||
priority: 1,
|
|
||||||
}
|
|
||||||
n.children = []*node{child}
|
|
||||||
n = child
|
|
||||||
}
|
|
||||||
|
|
||||||
} else { // catchAll
|
|
||||||
if end != max || numParams > 1 {
|
|
||||||
panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(n.path) > 0 && n.path[len(n.path)-1] == '/' {
|
|
||||||
panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'")
|
|
||||||
}
|
|
||||||
|
|
||||||
// currently fixed width 1 for '/'
|
|
||||||
i--
|
|
||||||
if path[i] != '/' {
|
|
||||||
panic("no / before catch-all in path '" + fullPath + "'")
|
|
||||||
}
|
|
||||||
|
|
||||||
n.path = path[offset:i]
|
|
||||||
|
|
||||||
// first node: catchAll node with empty path
|
|
||||||
child := &node{
|
|
||||||
wildChild: true,
|
|
||||||
nType: catchAll,
|
|
||||||
maxParams: 1,
|
|
||||||
}
|
|
||||||
// update maxParams of the parent node
|
|
||||||
if n.maxParams < 1 {
|
|
||||||
n.maxParams = 1
|
|
||||||
}
|
|
||||||
n.children = []*node{child}
|
|
||||||
n.indices = string(path[i])
|
|
||||||
n = child
|
|
||||||
n.priority++
|
|
||||||
|
|
||||||
// second node: node holding the variable
|
|
||||||
child = &node{
|
|
||||||
path: path[i:],
|
|
||||||
nType: catchAll,
|
|
||||||
maxParams: 1,
|
|
||||||
handle: handle,
|
|
||||||
priority: 1,
|
|
||||||
}
|
|
||||||
n.children = []*node{child}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert remaining path part and handle to the leaf
|
|
||||||
n.path = path[offset:]
|
|
||||||
n.handle = handle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the handle registered with the given path (key). The values of
|
|
||||||
// wildcards are saved to a map.
|
|
||||||
// If no handle can be found, a TSR (trailing slash redirect) recommendation is
|
|
||||||
// made if a handle exists with an extra (without the) trailing slash for the
|
|
||||||
// given path.
|
|
||||||
func (n *node) getValue(path string) (handle Handle, p Params, tsr bool) {
|
|
||||||
walk: // outer loop for walking the tree
|
|
||||||
for {
|
|
||||||
if len(path) > len(n.path) {
|
|
||||||
if path[:len(n.path)] == n.path {
|
|
||||||
path = path[len(n.path):]
|
|
||||||
// If this node does not have a wildcard (param or catchAll)
|
|
||||||
// child, we can just look up the next child node and continue
|
|
||||||
// to walk down the tree
|
|
||||||
if !n.wildChild {
|
|
||||||
c := path[0]
|
|
||||||
for i := 0; i < len(n.indices); i++ {
|
|
||||||
if c == n.indices[i] {
|
|
||||||
n = n.children[i]
|
|
||||||
continue walk
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Nothing found.
|
|
||||||
// We can recommend to redirect to the same URL without a
|
|
||||||
// trailing slash if a leaf exists for that path.
|
|
||||||
tsr = (path == "/" && n.handle != nil)
|
|
||||||
return
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// handle wildcard child
|
|
||||||
n = n.children[0]
|
|
||||||
switch n.nType {
|
|
||||||
case param:
|
|
||||||
// find param end (either '/' or path end)
|
|
||||||
end := 0
|
|
||||||
for end < len(path) && path[end] != '/' {
|
|
||||||
end++
|
|
||||||
}
|
|
||||||
|
|
||||||
// save param value
|
|
||||||
if p == nil {
|
|
||||||
// lazy allocation
|
|
||||||
p = make(Params, 0, n.maxParams)
|
|
||||||
}
|
|
||||||
i := len(p)
|
|
||||||
p = p[:i+1] // expand slice within preallocated capacity
|
|
||||||
p[i].Key = n.path[1:]
|
|
||||||
p[i].Value = path[:end]
|
|
||||||
|
|
||||||
// we need to go deeper!
|
|
||||||
if end < len(path) {
|
|
||||||
if len(n.children) > 0 {
|
|
||||||
path = path[end:]
|
|
||||||
n = n.children[0]
|
|
||||||
continue walk
|
|
||||||
}
|
|
||||||
|
|
||||||
// ... but we can't
|
|
||||||
tsr = (len(path) == end+1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if handle = n.handle; handle != nil {
|
|
||||||
return
|
|
||||||
} else if len(n.children) == 1 {
|
|
||||||
// No handle found. Check if a handle for this path + a
|
|
||||||
// trailing slash exists for TSR recommendation
|
|
||||||
n = n.children[0]
|
|
||||||
tsr = (n.path == "/" && n.handle != nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
case catchAll:
|
|
||||||
// save param value
|
|
||||||
if p == nil {
|
|
||||||
// lazy allocation
|
|
||||||
p = make(Params, 0, n.maxParams)
|
|
||||||
}
|
|
||||||
i := len(p)
|
|
||||||
p = p[:i+1] // expand slice within preallocated capacity
|
|
||||||
p[i].Key = n.path[2:]
|
|
||||||
p[i].Value = path
|
|
||||||
|
|
||||||
handle = n.handle
|
|
||||||
return
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("invalid node type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if path == n.path {
|
|
||||||
// We should have reached the node containing the handle.
|
|
||||||
// Check if this node has a handle registered.
|
|
||||||
if handle = n.handle; handle != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if path == "/" && n.wildChild && n.nType != root {
|
|
||||||
tsr = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// No handle found. Check if a handle for this path + a
|
|
||||||
// trailing slash exists for trailing slash recommendation
|
|
||||||
for i := 0; i < len(n.indices); i++ {
|
|
||||||
if n.indices[i] == '/' {
|
|
||||||
n = n.children[i]
|
|
||||||
tsr = (len(n.path) == 1 && n.handle != nil) ||
|
|
||||||
(n.nType == catchAll && n.children[0].handle != nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Nothing found. We can recommend to redirect to the same URL with an
|
|
||||||
// extra trailing slash if a leaf exists for that path
|
|
||||||
tsr = (path == "/") ||
|
|
||||||
(len(n.path) == len(path)+1 && n.path[len(path)] == '/' &&
|
|
||||||
path == n.path[:len(n.path)-1] && n.handle != nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Makes a case-insensitive lookup of the given path and tries to find a handler.
|
|
||||||
// It can optionally also fix trailing slashes.
|
|
||||||
// It returns the case-corrected path and a bool indicating whether the lookup
|
|
||||||
// was successful.
|
|
||||||
func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) {
|
|
||||||
return n.findCaseInsensitivePathRec(
|
|
||||||
path,
|
|
||||||
make([]byte, 0, len(path)+1), // preallocate enough memory for new path
|
|
||||||
[4]byte{}, // empty rune buffer
|
|
||||||
fixTrailingSlash,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// shift bytes in array by n bytes left
|
|
||||||
func shiftNRuneBytes(rb [4]byte, n int) [4]byte {
|
|
||||||
switch n {
|
|
||||||
case 0:
|
|
||||||
return rb
|
|
||||||
case 1:
|
|
||||||
return [4]byte{rb[1], rb[2], rb[3], 0}
|
|
||||||
case 2:
|
|
||||||
return [4]byte{rb[2], rb[3]}
|
|
||||||
case 3:
|
|
||||||
return [4]byte{rb[3]}
|
|
||||||
default:
|
|
||||||
return [4]byte{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// recursive case-insensitive lookup function used by n.findCaseInsensitivePath
|
|
||||||
func (n *node) findCaseInsensitivePathRec(path string, ciPath []byte, rb [4]byte, fixTrailingSlash bool) ([]byte, bool) {
|
|
||||||
npLen := len(n.path)
|
|
||||||
|
|
||||||
walk: // outer loop for walking the tree
|
|
||||||
for len(path) >= npLen && (npLen == 0 || strings.EqualFold(path[1:npLen], n.path[1:])) {
|
|
||||||
// add common prefix to result
|
|
||||||
|
|
||||||
oldPath := path
|
|
||||||
path = path[npLen:]
|
|
||||||
ciPath = append(ciPath, n.path...)
|
|
||||||
|
|
||||||
if len(path) > 0 {
|
|
||||||
// If this node does not have a wildcard (param or catchAll) child,
|
|
||||||
// we can just look up the next child node and continue to walk down
|
|
||||||
// the tree
|
|
||||||
if !n.wildChild {
|
|
||||||
// skip rune bytes already processed
|
|
||||||
rb = shiftNRuneBytes(rb, npLen)
|
|
||||||
|
|
||||||
if rb[0] != 0 {
|
|
||||||
// old rune not finished
|
|
||||||
for i := 0; i < len(n.indices); i++ {
|
|
||||||
if n.indices[i] == rb[0] {
|
|
||||||
// continue with child node
|
|
||||||
n = n.children[i]
|
|
||||||
npLen = len(n.path)
|
|
||||||
continue walk
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// process a new rune
|
|
||||||
var rv rune
|
|
||||||
|
|
||||||
// find rune start
|
|
||||||
// runes are up to 4 byte long,
|
|
||||||
// -4 would definitely be another rune
|
|
||||||
var off int
|
|
||||||
for max := min(npLen, 3); off < max; off++ {
|
|
||||||
if i := npLen - off; utf8.RuneStart(oldPath[i]) {
|
|
||||||
// read rune from cached path
|
|
||||||
rv, _ = utf8.DecodeRuneInString(oldPath[i:])
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// calculate lowercase bytes of current rune
|
|
||||||
lo := unicode.ToLower(rv)
|
|
||||||
utf8.EncodeRune(rb[:], lo)
|
|
||||||
|
|
||||||
// skip already processed bytes
|
|
||||||
rb = shiftNRuneBytes(rb, off)
|
|
||||||
|
|
||||||
for i := 0; i < len(n.indices); i++ {
|
|
||||||
// lowercase matches
|
|
||||||
if n.indices[i] == rb[0] {
|
|
||||||
// must use a recursive approach since both the
|
|
||||||
// uppercase byte and the lowercase byte might exist
|
|
||||||
// as an index
|
|
||||||
if out, found := n.children[i].findCaseInsensitivePathRec(
|
|
||||||
path, ciPath, rb, fixTrailingSlash,
|
|
||||||
); found {
|
|
||||||
return out, true
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we found no match, the same for the uppercase rune,
|
|
||||||
// if it differs
|
|
||||||
if up := unicode.ToUpper(rv); up != lo {
|
|
||||||
utf8.EncodeRune(rb[:], up)
|
|
||||||
rb = shiftNRuneBytes(rb, off)
|
|
||||||
|
|
||||||
for i, c := 0, rb[0]; i < len(n.indices); i++ {
|
|
||||||
// uppercase matches
|
|
||||||
if n.indices[i] == c {
|
|
||||||
// continue with child node
|
|
||||||
n = n.children[i]
|
|
||||||
npLen = len(n.path)
|
|
||||||
continue walk
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Nothing found. We can recommend to redirect to the same URL
|
|
||||||
// without a trailing slash if a leaf exists for that path
|
|
||||||
return ciPath, (fixTrailingSlash && path == "/" && n.handle != nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
n = n.children[0]
|
|
||||||
switch n.nType {
|
|
||||||
case param:
|
|
||||||
// find param end (either '/' or path end)
|
|
||||||
k := 0
|
|
||||||
for k < len(path) && path[k] != '/' {
|
|
||||||
k++
|
|
||||||
}
|
|
||||||
|
|
||||||
// add param value to case insensitive path
|
|
||||||
ciPath = append(ciPath, path[:k]...)
|
|
||||||
|
|
||||||
// we need to go deeper!
|
|
||||||
if k < len(path) {
|
|
||||||
if len(n.children) > 0 {
|
|
||||||
// continue with child node
|
|
||||||
n = n.children[0]
|
|
||||||
npLen = len(n.path)
|
|
||||||
path = path[k:]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// ... but we can't
|
|
||||||
if fixTrailingSlash && len(path) == k+1 {
|
|
||||||
return ciPath, true
|
|
||||||
}
|
|
||||||
return ciPath, false
|
|
||||||
}
|
|
||||||
|
|
||||||
if n.handle != nil {
|
|
||||||
return ciPath, true
|
|
||||||
} else if fixTrailingSlash && len(n.children) == 1 {
|
|
||||||
// No handle found. Check if a handle for this path + a
|
|
||||||
// trailing slash exists
|
|
||||||
n = n.children[0]
|
|
||||||
if n.path == "/" && n.handle != nil {
|
|
||||||
return append(ciPath, '/'), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ciPath, false
|
|
||||||
|
|
||||||
case catchAll:
|
|
||||||
return append(ciPath, path...), true
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("invalid node type")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// We should have reached the node containing the handle.
|
|
||||||
// Check if this node has a handle registered.
|
|
||||||
if n.handle != nil {
|
|
||||||
return ciPath, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// No handle found.
|
|
||||||
// Try to fix the path by adding a trailing slash
|
|
||||||
if fixTrailingSlash {
|
|
||||||
for i := 0; i < len(n.indices); i++ {
|
|
||||||
if n.indices[i] == '/' {
|
|
||||||
n = n.children[i]
|
|
||||||
if (len(n.path) == 1 && n.handle != nil) ||
|
|
||||||
(n.nType == catchAll && n.children[0].handle != nil) {
|
|
||||||
return append(ciPath, '/'), true
|
|
||||||
}
|
|
||||||
return ciPath, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ciPath, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Nothing found.
|
|
||||||
// Try to fix the path by adding / removing a trailing slash
|
|
||||||
if fixTrailingSlash {
|
|
||||||
if path == "/" {
|
|
||||||
return ciPath, true
|
|
||||||
}
|
|
||||||
if len(path)+1 == npLen && n.path[len(path)] == '/' &&
|
|
||||||
strings.EqualFold(path[1:], n.path[1:len(path)]) && n.handle != nil {
|
|
||||||
return append(ciPath, n.path...), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ciPath, false
|
|
||||||
}
|
|
||||||
1
vendor/github.com/opencontainers/go-digest/.mailmap
generated
vendored
Normal file
1
vendor/github.com/opencontainers/go-digest/.mailmap
generated
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Stephen J Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
|
||||||
12
vendor/github.com/opencontainers/go-digest/.pullapprove.yml
generated
vendored
Normal file
12
vendor/github.com/opencontainers/go-digest/.pullapprove.yml
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
approve_by_comment: true
|
||||||
|
approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
|
||||||
|
reject_regex: ^Rejected
|
||||||
|
reset_on_push: true
|
||||||
|
author_approval: ignored
|
||||||
|
signed_off_by:
|
||||||
|
required: true
|
||||||
|
reviewers:
|
||||||
|
teams:
|
||||||
|
- go-digest-maintainers
|
||||||
|
name: default
|
||||||
|
required: 2
|
||||||
4
vendor/github.com/opencontainers/go-digest/.travis.yml
generated
vendored
Normal file
4
vendor/github.com/opencontainers/go-digest/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.7
|
||||||
|
- master
|
||||||
72
vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
generated
vendored
Normal file
72
vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
# Contributing to Docker open source projects
|
||||||
|
|
||||||
|
Want to hack on this project? Awesome! Here are instructions to get you started.
|
||||||
|
|
||||||
|
This project is a part of the [Docker](https://www.docker.com) project, and follows
|
||||||
|
the same rules and principles. If you're already familiar with the way
|
||||||
|
Docker does things, you'll feel right at home.
|
||||||
|
|
||||||
|
Otherwise, go read Docker's
|
||||||
|
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
|
||||||
|
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
|
||||||
|
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
|
||||||
|
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
|
||||||
|
|
||||||
|
For an in-depth description of our contribution process, visit the
|
||||||
|
contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
|
||||||
|
|
||||||
|
### Sign your work
|
||||||
|
|
||||||
|
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||||
|
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||||
|
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||||
|
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||||
|
|
||||||
|
```
|
||||||
|
Developer Certificate of Origin
|
||||||
|
Version 1.1
|
||||||
|
|
||||||
|
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||||
|
1 Letterman Drive
|
||||||
|
Suite D4700
|
||||||
|
San Francisco, CA, 94129
|
||||||
|
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies of this
|
||||||
|
license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
|
||||||
|
Developer's Certificate of Origin 1.1
|
||||||
|
|
||||||
|
By making a contribution to this project, I certify that:
|
||||||
|
|
||||||
|
(a) The contribution was created in whole or in part by me and I
|
||||||
|
have the right to submit it under the open source license
|
||||||
|
indicated in the file; or
|
||||||
|
|
||||||
|
(b) The contribution is based upon previous work that, to the best
|
||||||
|
of my knowledge, is covered under an appropriate open source
|
||||||
|
license and I have the right under that license to submit that
|
||||||
|
work with modifications, whether created in whole or in part
|
||||||
|
by me, under the same open source license (unless I am
|
||||||
|
permitted to submit under a different license), as indicated
|
||||||
|
in the file; or
|
||||||
|
|
||||||
|
(c) The contribution was provided directly to me by some other
|
||||||
|
person who certified (a), (b) or (c) and I have not modified
|
||||||
|
it.
|
||||||
|
|
||||||
|
(d) I understand and agree that this project and the contribution
|
||||||
|
are public and that a record of the contribution (including all
|
||||||
|
personal information I submit with it, including my sign-off) is
|
||||||
|
maintained indefinitely and may be redistributed consistent with
|
||||||
|
this project or the open source license(s) involved.
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you just add a line to every git commit message:
|
||||||
|
|
||||||
|
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||||
|
|
||||||
|
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||||
|
|
||||||
|
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||||
|
commit automatically with `git commit -s`.
|
||||||
191
vendor/github.com/opencontainers/go-digest/LICENSE.code
generated
vendored
Normal file
191
vendor/github.com/opencontainers/go-digest/LICENSE.code
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2016 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
425
vendor/github.com/opencontainers/go-digest/LICENSE.docs
generated
vendored
Normal file
425
vendor/github.com/opencontainers/go-digest/LICENSE.docs
generated
vendored
Normal file
@@ -0,0 +1,425 @@
|
|||||||
|
Attribution-ShareAlike 4.0 International
|
||||||
|
|
||||||
|
=======================================================================
|
||||||
|
|
||||||
|
Creative Commons Corporation ("Creative Commons") is not a law firm and
|
||||||
|
does not provide legal services or legal advice. Distribution of
|
||||||
|
Creative Commons public licenses does not create a lawyer-client or
|
||||||
|
other relationship. Creative Commons makes its licenses and related
|
||||||
|
information available on an "as-is" basis. Creative Commons gives no
|
||||||
|
warranties regarding its licenses, any material licensed under their
|
||||||
|
terms and conditions, or any related information. Creative Commons
|
||||||
|
disclaims all liability for damages resulting from their use to the
|
||||||
|
fullest extent possible.
|
||||||
|
|
||||||
|
Using Creative Commons Public Licenses
|
||||||
|
|
||||||
|
Creative Commons public licenses provide a standard set of terms and
|
||||||
|
conditions that creators and other rights holders may use to share
|
||||||
|
original works of authorship and other material subject to copyright
|
||||||
|
and certain other rights specified in the public license below. The
|
||||||
|
following considerations are for informational purposes only, are not
|
||||||
|
exhaustive, and do not form part of our licenses.
|
||||||
|
|
||||||
|
Considerations for licensors: Our public licenses are
|
||||||
|
intended for use by those authorized to give the public
|
||||||
|
permission to use material in ways otherwise restricted by
|
||||||
|
copyright and certain other rights. Our licenses are
|
||||||
|
irrevocable. Licensors should read and understand the terms
|
||||||
|
and conditions of the license they choose before applying it.
|
||||||
|
Licensors should also secure all rights necessary before
|
||||||
|
applying our licenses so that the public can reuse the
|
||||||
|
material as expected. Licensors should clearly mark any
|
||||||
|
material not subject to the license. This includes other CC-
|
||||||
|
licensed material, or material used under an exception or
|
||||||
|
limitation to copyright. More considerations for licensors:
|
||||||
|
wiki.creativecommons.org/Considerations_for_licensors
|
||||||
|
|
||||||
|
Considerations for the public: By using one of our public
|
||||||
|
licenses, a licensor grants the public permission to use the
|
||||||
|
licensed material under specified terms and conditions. If
|
||||||
|
the licensor's permission is not necessary for any reason--for
|
||||||
|
example, because of any applicable exception or limitation to
|
||||||
|
copyright--then that use is not regulated by the license. Our
|
||||||
|
licenses grant only permissions under copyright and certain
|
||||||
|
other rights that a licensor has authority to grant. Use of
|
||||||
|
the licensed material may still be restricted for other
|
||||||
|
reasons, including because others have copyright or other
|
||||||
|
rights in the material. A licensor may make special requests,
|
||||||
|
such as asking that all changes be marked or described.
|
||||||
|
Although not required by our licenses, you are encouraged to
|
||||||
|
respect those requests where reasonable. More_considerations
|
||||||
|
for the public:
|
||||||
|
wiki.creativecommons.org/Considerations_for_licensees
|
||||||
|
|
||||||
|
=======================================================================
|
||||||
|
|
||||||
|
Creative Commons Attribution-ShareAlike 4.0 International Public
|
||||||
|
License
|
||||||
|
|
||||||
|
By exercising the Licensed Rights (defined below), You accept and agree
|
||||||
|
to be bound by the terms and conditions of this Creative Commons
|
||||||
|
Attribution-ShareAlike 4.0 International Public License ("Public
|
||||||
|
License"). To the extent this Public License may be interpreted as a
|
||||||
|
contract, You are granted the Licensed Rights in consideration of Your
|
||||||
|
acceptance of these terms and conditions, and the Licensor grants You
|
||||||
|
such rights in consideration of benefits the Licensor receives from
|
||||||
|
making the Licensed Material available under these terms and
|
||||||
|
conditions.
|
||||||
|
|
||||||
|
|
||||||
|
Section 1 -- Definitions.
|
||||||
|
|
||||||
|
a. Adapted Material means material subject to Copyright and Similar
|
||||||
|
Rights that is derived from or based upon the Licensed Material
|
||||||
|
and in which the Licensed Material is translated, altered,
|
||||||
|
arranged, transformed, or otherwise modified in a manner requiring
|
||||||
|
permission under the Copyright and Similar Rights held by the
|
||||||
|
Licensor. For purposes of this Public License, where the Licensed
|
||||||
|
Material is a musical work, performance, or sound recording,
|
||||||
|
Adapted Material is always produced where the Licensed Material is
|
||||||
|
synched in timed relation with a moving image.
|
||||||
|
|
||||||
|
b. Adapter's License means the license You apply to Your Copyright
|
||||||
|
and Similar Rights in Your contributions to Adapted Material in
|
||||||
|
accordance with the terms and conditions of this Public License.
|
||||||
|
|
||||||
|
c. BY-SA Compatible License means a license listed at
|
||||||
|
creativecommons.org/compatiblelicenses, approved by Creative
|
||||||
|
Commons as essentially the equivalent of this Public License.
|
||||||
|
|
||||||
|
d. Copyright and Similar Rights means copyright and/or similar rights
|
||||||
|
closely related to copyright including, without limitation,
|
||||||
|
performance, broadcast, sound recording, and Sui Generis Database
|
||||||
|
Rights, without regard to how the rights are labeled or
|
||||||
|
categorized. For purposes of this Public License, the rights
|
||||||
|
specified in Section 2(b)(1)-(2) are not Copyright and Similar
|
||||||
|
Rights.
|
||||||
|
|
||||||
|
e. Effective Technological Measures means those measures that, in the
|
||||||
|
absence of proper authority, may not be circumvented under laws
|
||||||
|
fulfilling obligations under Article 11 of the WIPO Copyright
|
||||||
|
Treaty adopted on December 20, 1996, and/or similar international
|
||||||
|
agreements.
|
||||||
|
|
||||||
|
f. Exceptions and Limitations means fair use, fair dealing, and/or
|
||||||
|
any other exception or limitation to Copyright and Similar Rights
|
||||||
|
that applies to Your use of the Licensed Material.
|
||||||
|
|
||||||
|
g. License Elements means the license attributes listed in the name
|
||||||
|
of a Creative Commons Public License. The License Elements of this
|
||||||
|
Public License are Attribution and ShareAlike.
|
||||||
|
|
||||||
|
h. Licensed Material means the artistic or literary work, database,
|
||||||
|
or other material to which the Licensor applied this Public
|
||||||
|
License.
|
||||||
|
|
||||||
|
i. Licensed Rights means the rights granted to You subject to the
|
||||||
|
terms and conditions of this Public License, which are limited to
|
||||||
|
all Copyright and Similar Rights that apply to Your use of the
|
||||||
|
Licensed Material and that the Licensor has authority to license.
|
||||||
|
|
||||||
|
j. Licensor means the individual(s) or entity(ies) granting rights
|
||||||
|
under this Public License.
|
||||||
|
|
||||||
|
k. Share means to provide material to the public by any means or
|
||||||
|
process that requires permission under the Licensed Rights, such
|
||||||
|
as reproduction, public display, public performance, distribution,
|
||||||
|
dissemination, communication, or importation, and to make material
|
||||||
|
available to the public including in ways that members of the
|
||||||
|
public may access the material from a place and at a time
|
||||||
|
individually chosen by them.
|
||||||
|
|
||||||
|
l. Sui Generis Database Rights means rights other than copyright
|
||||||
|
resulting from Directive 96/9/EC of the European Parliament and of
|
||||||
|
the Council of 11 March 1996 on the legal protection of databases,
|
||||||
|
as amended and/or succeeded, as well as other essentially
|
||||||
|
equivalent rights anywhere in the world.
|
||||||
|
|
||||||
|
m. You means the individual or entity exercising the Licensed Rights
|
||||||
|
under this Public License. Your has a corresponding meaning.
|
||||||
|
|
||||||
|
|
||||||
|
Section 2 -- Scope.
|
||||||
|
|
||||||
|
a. License grant.
|
||||||
|
|
||||||
|
1. Subject to the terms and conditions of this Public License,
|
||||||
|
the Licensor hereby grants You a worldwide, royalty-free,
|
||||||
|
non-sublicensable, non-exclusive, irrevocable license to
|
||||||
|
exercise the Licensed Rights in the Licensed Material to:
|
||||||
|
|
||||||
|
a. reproduce and Share the Licensed Material, in whole or
|
||||||
|
in part; and
|
||||||
|
|
||||||
|
b. produce, reproduce, and Share Adapted Material.
|
||||||
|
|
||||||
|
2. Exceptions and Limitations. For the avoidance of doubt, where
|
||||||
|
Exceptions and Limitations apply to Your use, this Public
|
||||||
|
License does not apply, and You do not need to comply with
|
||||||
|
its terms and conditions.
|
||||||
|
|
||||||
|
3. Term. The term of this Public License is specified in Section
|
||||||
|
6(a).
|
||||||
|
|
||||||
|
4. Media and formats; technical modifications allowed. The
|
||||||
|
Licensor authorizes You to exercise the Licensed Rights in
|
||||||
|
all media and formats whether now known or hereafter created,
|
||||||
|
and to make technical modifications necessary to do so. The
|
||||||
|
Licensor waives and/or agrees not to assert any right or
|
||||||
|
authority to forbid You from making technical modifications
|
||||||
|
necessary to exercise the Licensed Rights, including
|
||||||
|
technical modifications necessary to circumvent Effective
|
||||||
|
Technological Measures. For purposes of this Public License,
|
||||||
|
simply making modifications authorized by this Section 2(a)
|
||||||
|
(4) never produces Adapted Material.
|
||||||
|
|
||||||
|
5. Downstream recipients.
|
||||||
|
|
||||||
|
a. Offer from the Licensor -- Licensed Material. Every
|
||||||
|
recipient of the Licensed Material automatically
|
||||||
|
receives an offer from the Licensor to exercise the
|
||||||
|
Licensed Rights under the terms and conditions of this
|
||||||
|
Public License.
|
||||||
|
|
||||||
|
b. Additional offer from the Licensor -- Adapted Material.
|
||||||
|
Every recipient of Adapted Material from You
|
||||||
|
automatically receives an offer from the Licensor to
|
||||||
|
exercise the Licensed Rights in the Adapted Material
|
||||||
|
under the conditions of the Adapter's License You apply.
|
||||||
|
|
||||||
|
c. No downstream restrictions. You may not offer or impose
|
||||||
|
any additional or different terms or conditions on, or
|
||||||
|
apply any Effective Technological Measures to, the
|
||||||
|
Licensed Material if doing so restricts exercise of the
|
||||||
|
Licensed Rights by any recipient of the Licensed
|
||||||
|
Material.
|
||||||
|
|
||||||
|
6. No endorsement. Nothing in this Public License constitutes or
|
||||||
|
may be construed as permission to assert or imply that You
|
||||||
|
are, or that Your use of the Licensed Material is, connected
|
||||||
|
with, or sponsored, endorsed, or granted official status by,
|
||||||
|
the Licensor or others designated to receive attribution as
|
||||||
|
provided in Section 3(a)(1)(A)(i).
|
||||||
|
|
||||||
|
b. Other rights.
|
||||||
|
|
||||||
|
1. Moral rights, such as the right of integrity, are not
|
||||||
|
licensed under this Public License, nor are publicity,
|
||||||
|
privacy, and/or other similar personality rights; however, to
|
||||||
|
the extent possible, the Licensor waives and/or agrees not to
|
||||||
|
assert any such rights held by the Licensor to the limited
|
||||||
|
extent necessary to allow You to exercise the Licensed
|
||||||
|
Rights, but not otherwise.
|
||||||
|
|
||||||
|
2. Patent and trademark rights are not licensed under this
|
||||||
|
Public License.
|
||||||
|
|
||||||
|
3. To the extent possible, the Licensor waives any right to
|
||||||
|
collect royalties from You for the exercise of the Licensed
|
||||||
|
Rights, whether directly or through a collecting society
|
||||||
|
under any voluntary or waivable statutory or compulsory
|
||||||
|
licensing scheme. In all other cases the Licensor expressly
|
||||||
|
reserves any right to collect such royalties.
|
||||||
|
|
||||||
|
|
||||||
|
Section 3 -- License Conditions.
|
||||||
|
|
||||||
|
Your exercise of the Licensed Rights is expressly made subject to the
|
||||||
|
following conditions.
|
||||||
|
|
||||||
|
a. Attribution.
|
||||||
|
|
||||||
|
1. If You Share the Licensed Material (including in modified
|
||||||
|
form), You must:
|
||||||
|
|
||||||
|
a. retain the following if it is supplied by the Licensor
|
||||||
|
with the Licensed Material:
|
||||||
|
|
||||||
|
i. identification of the creator(s) of the Licensed
|
||||||
|
Material and any others designated to receive
|
||||||
|
attribution, in any reasonable manner requested by
|
||||||
|
the Licensor (including by pseudonym if
|
||||||
|
designated);
|
||||||
|
|
||||||
|
ii. a copyright notice;
|
||||||
|
|
||||||
|
iii. a notice that refers to this Public License;
|
||||||
|
|
||||||
|
iv. a notice that refers to the disclaimer of
|
||||||
|
warranties;
|
||||||
|
|
||||||
|
v. a URI or hyperlink to the Licensed Material to the
|
||||||
|
extent reasonably practicable;
|
||||||
|
|
||||||
|
b. indicate if You modified the Licensed Material and
|
||||||
|
retain an indication of any previous modifications; and
|
||||||
|
|
||||||
|
c. indicate the Licensed Material is licensed under this
|
||||||
|
Public License, and include the text of, or the URI or
|
||||||
|
hyperlink to, this Public License.
|
||||||
|
|
||||||
|
2. You may satisfy the conditions in Section 3(a)(1) in any
|
||||||
|
reasonable manner based on the medium, means, and context in
|
||||||
|
which You Share the Licensed Material. For example, it may be
|
||||||
|
reasonable to satisfy the conditions by providing a URI or
|
||||||
|
hyperlink to a resource that includes the required
|
||||||
|
information.
|
||||||
|
|
||||||
|
3. If requested by the Licensor, You must remove any of the
|
||||||
|
information required by Section 3(a)(1)(A) to the extent
|
||||||
|
reasonably practicable.
|
||||||
|
|
||||||
|
b. ShareAlike.
|
||||||
|
|
||||||
|
In addition to the conditions in Section 3(a), if You Share
|
||||||
|
Adapted Material You produce, the following conditions also apply.
|
||||||
|
|
||||||
|
1. The Adapter's License You apply must be a Creative Commons
|
||||||
|
license with the same License Elements, this version or
|
||||||
|
later, or a BY-SA Compatible License.
|
||||||
|
|
||||||
|
2. You must include the text of, or the URI or hyperlink to, the
|
||||||
|
Adapter's License You apply. You may satisfy this condition
|
||||||
|
in any reasonable manner based on the medium, means, and
|
||||||
|
context in which You Share Adapted Material.
|
||||||
|
|
||||||
|
3. You may not offer or impose any additional or different terms
|
||||||
|
or conditions on, or apply any Effective Technological
|
||||||
|
Measures to, Adapted Material that restrict exercise of the
|
||||||
|
rights granted under the Adapter's License You apply.
|
||||||
|
|
||||||
|
|
||||||
|
Section 4 -- Sui Generis Database Rights.
|
||||||
|
|
||||||
|
Where the Licensed Rights include Sui Generis Database Rights that
|
||||||
|
apply to Your use of the Licensed Material:
|
||||||
|
|
||||||
|
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
|
||||||
|
to extract, reuse, reproduce, and Share all or a substantial
|
||||||
|
portion of the contents of the database;
|
||||||
|
|
||||||
|
b. if You include all or a substantial portion of the database
|
||||||
|
contents in a database in which You have Sui Generis Database
|
||||||
|
Rights, then the database in which You have Sui Generis Database
|
||||||
|
Rights (but not its individual contents) is Adapted Material,
|
||||||
|
|
||||||
|
including for purposes of Section 3(b); and
|
||||||
|
c. You must comply with the conditions in Section 3(a) if You Share
|
||||||
|
all or a substantial portion of the contents of the database.
|
||||||
|
|
||||||
|
For the avoidance of doubt, this Section 4 supplements and does not
|
||||||
|
replace Your obligations under this Public License where the Licensed
|
||||||
|
Rights include other Copyright and Similar Rights.
|
||||||
|
|
||||||
|
|
||||||
|
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
|
||||||
|
|
||||||
|
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
|
||||||
|
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
|
||||||
|
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
|
||||||
|
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
|
||||||
|
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
|
||||||
|
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
|
||||||
|
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
|
||||||
|
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
|
||||||
|
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
|
||||||
|
|
||||||
|
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
|
||||||
|
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
|
||||||
|
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
|
||||||
|
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
|
||||||
|
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
|
||||||
|
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
|
||||||
|
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
|
||||||
|
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
|
||||||
|
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
|
||||||
|
|
||||||
|
c. The disclaimer of warranties and limitation of liability provided
|
||||||
|
above shall be interpreted in a manner that, to the extent
|
||||||
|
possible, most closely approximates an absolute disclaimer and
|
||||||
|
waiver of all liability.
|
||||||
|
|
||||||
|
|
||||||
|
Section 6 -- Term and Termination.
|
||||||
|
|
||||||
|
a. This Public License applies for the term of the Copyright and
|
||||||
|
Similar Rights licensed here. However, if You fail to comply with
|
||||||
|
this Public License, then Your rights under this Public License
|
||||||
|
terminate automatically.
|
||||||
|
|
||||||
|
b. Where Your right to use the Licensed Material has terminated under
|
||||||
|
Section 6(a), it reinstates:
|
||||||
|
|
||||||
|
1. automatically as of the date the violation is cured, provided
|
||||||
|
it is cured within 30 days of Your discovery of the
|
||||||
|
violation; or
|
||||||
|
|
||||||
|
2. upon express reinstatement by the Licensor.
|
||||||
|
|
||||||
|
For the avoidance of doubt, this Section 6(b) does not affect any
|
||||||
|
right the Licensor may have to seek remedies for Your violations
|
||||||
|
of this Public License.
|
||||||
|
|
||||||
|
c. For the avoidance of doubt, the Licensor may also offer the
|
||||||
|
Licensed Material under separate terms or conditions or stop
|
||||||
|
distributing the Licensed Material at any time; however, doing so
|
||||||
|
will not terminate this Public License.
|
||||||
|
|
||||||
|
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
|
||||||
|
License.
|
||||||
|
|
||||||
|
|
||||||
|
Section 7 -- Other Terms and Conditions.
|
||||||
|
|
||||||
|
a. The Licensor shall not be bound by any additional or different
|
||||||
|
terms or conditions communicated by You unless expressly agreed.
|
||||||
|
|
||||||
|
b. Any arrangements, understandings, or agreements regarding the
|
||||||
|
Licensed Material not stated herein are separate from and
|
||||||
|
independent of the terms and conditions of this Public License.
|
||||||
|
|
||||||
|
|
||||||
|
Section 8 -- Interpretation.
|
||||||
|
|
||||||
|
a. For the avoidance of doubt, this Public License does not, and
|
||||||
|
shall not be interpreted to, reduce, limit, restrict, or impose
|
||||||
|
conditions on any use of the Licensed Material that could lawfully
|
||||||
|
be made without permission under this Public License.
|
||||||
|
|
||||||
|
b. To the extent possible, if any provision of this Public License is
|
||||||
|
deemed unenforceable, it shall be automatically reformed to the
|
||||||
|
minimum extent necessary to make it enforceable. If the provision
|
||||||
|
cannot be reformed, it shall be severed from this Public License
|
||||||
|
without affecting the enforceability of the remaining terms and
|
||||||
|
conditions.
|
||||||
|
|
||||||
|
c. No term or condition of this Public License will be waived and no
|
||||||
|
failure to comply consented to unless expressly agreed to by the
|
||||||
|
Licensor.
|
||||||
|
|
||||||
|
d. Nothing in this Public License constitutes or may be interpreted
|
||||||
|
as a limitation upon, or waiver of, any privileges and immunities
|
||||||
|
that apply to the Licensor or You, including from the legal
|
||||||
|
processes of any jurisdiction or authority.
|
||||||
|
|
||||||
|
|
||||||
|
=======================================================================
|
||||||
|
|
||||||
|
Creative Commons is not a party to its public licenses.
|
||||||
|
Notwithstanding, Creative Commons may elect to apply one of its public
|
||||||
|
licenses to material it publishes and in those instances will be
|
||||||
|
considered the "Licensor." Except for the limited purpose of indicating
|
||||||
|
that material is shared under a Creative Commons public license or as
|
||||||
|
otherwise permitted by the Creative Commons policies published at
|
||||||
|
creativecommons.org/policies, Creative Commons does not authorize the
|
||||||
|
use of the trademark "Creative Commons" or any other trademark or logo
|
||||||
|
of Creative Commons without its prior written consent including,
|
||||||
|
without limitation, in connection with any unauthorized modifications
|
||||||
|
to any of its public licenses or any other arrangements,
|
||||||
|
understandings, or agreements concerning use of licensed material. For
|
||||||
|
the avoidance of doubt, this paragraph does not form part of the public
|
||||||
|
licenses.
|
||||||
|
|
||||||
|
Creative Commons may be contacted at creativecommons.org.
|
||||||
9
vendor/github.com/opencontainers/go-digest/MAINTAINERS
generated
vendored
Normal file
9
vendor/github.com/opencontainers/go-digest/MAINTAINERS
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Aaron Lehmann <aaron.lehmann@docker.com> (@aaronlehmann)
|
||||||
|
Brandon Philips <brandon.philips@coreos.com> (@philips)
|
||||||
|
Brendan Burns <bburns@microsoft.com> (@brendandburns)
|
||||||
|
Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
|
||||||
|
Jason Bouzane <jbouzane@google.com> (@jbouzane)
|
||||||
|
John Starks <jostarks@microsoft.com> (@jstarks)
|
||||||
|
Jonathan Boulle <jon.boulle@coreos.com> (@jonboulle)
|
||||||
|
Stephen Day <stephen.day@docker.com> (@stevvooe)
|
||||||
|
Vincent Batts <vbatts@redhat.com> (@vbatts)
|
||||||
104
vendor/github.com/opencontainers/go-digest/README.md
generated
vendored
Normal file
104
vendor/github.com/opencontainers/go-digest/README.md
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
# go-digest
|
||||||
|
|
||||||
|
[](https://godoc.org/github.com/opencontainers/go-digest) [](https://goreportcard.com/report/github.com/opencontainers/go-digest) [](https://travis-ci.org/opencontainers/go-digest)
|
||||||
|
|
||||||
|
Common digest package used across the container ecosystem.
|
||||||
|
|
||||||
|
Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information.
|
||||||
|
|
||||||
|
# What is a digest?
|
||||||
|
|
||||||
|
A digest is just a hash.
|
||||||
|
|
||||||
|
The most common use case for a digest is to create a content
|
||||||
|
identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage)
|
||||||
|
systems:
|
||||||
|
|
||||||
|
```go
|
||||||
|
id := digest.FromBytes([]byte("my content"))
|
||||||
|
```
|
||||||
|
|
||||||
|
In the example above, the id can be used to uniquely identify
|
||||||
|
the byte slice "my content". This allows two disparate applications
|
||||||
|
to agree on a verifiable identifier without having to trust one
|
||||||
|
another.
|
||||||
|
|
||||||
|
An identifying digest can be verified, as follows:
|
||||||
|
|
||||||
|
```go
|
||||||
|
if id != digest.FromBytes([]byte("my content")) {
|
||||||
|
return errors.New("the content has changed!")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
A `Verifier` type can be used to handle cases where an `io.Reader`
|
||||||
|
makes more sense:
|
||||||
|
|
||||||
|
```go
|
||||||
|
rd := getContent()
|
||||||
|
verifier := id.Verifier()
|
||||||
|
io.Copy(verifier, rd)
|
||||||
|
|
||||||
|
if !verifier.Verified() {
|
||||||
|
return errors.New("the content has changed!")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this
|
||||||
|
can power a rich, safe, content distribution system.
|
||||||
|
|
||||||
|
# Usage
|
||||||
|
|
||||||
|
While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is
|
||||||
|
considered the best resource, a few important items need to be called
|
||||||
|
out when using this package.
|
||||||
|
|
||||||
|
1. Make sure to import the hash implementations into your application
|
||||||
|
or the package will panic. You should have something like the
|
||||||
|
following in the main (or other entrypoint) of your application:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
_ "crypto/sha256"
|
||||||
|
_ "crypto/sha512"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
This may seem inconvenient but it allows you replace the hash
|
||||||
|
implementations with others, such as https://github.com/stevvooe/resumable.
|
||||||
|
|
||||||
|
2. Even though `digest.Digest` may be assemable as a string, _always_
|
||||||
|
verify your input with `digest.Parse` or use `Digest.Validate`
|
||||||
|
when accepting untrusted input. While there are measures to
|
||||||
|
avoid common problems, this will ensure you have valid digests
|
||||||
|
in the rest of your application.
|
||||||
|
|
||||||
|
# Stability
|
||||||
|
|
||||||
|
The Go API, at this stage, is considered stable, unless otherwise noted.
|
||||||
|
|
||||||
|
As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest).
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
This package is considered fairly complete. It has been in production
|
||||||
|
in thousands (millions?) of deployments and is fairly battle-hardened.
|
||||||
|
New additions will be met with skepticism. If you think there is a
|
||||||
|
missing feature, please file a bug clearly describing the problem and
|
||||||
|
the alternatives you tried before submitting a PR.
|
||||||
|
|
||||||
|
# Reporting security issues
|
||||||
|
|
||||||
|
Please DO NOT file a public issue, instead send your report privately to
|
||||||
|
security@opencontainers.org.
|
||||||
|
|
||||||
|
The maintainers take security seriously. If you discover a security issue,
|
||||||
|
please bring it to their attention right away!
|
||||||
|
|
||||||
|
If you are reporting a security issue, do not create an issue or file a pull
|
||||||
|
request on GitHub. Instead, disclose the issue responsibly by sending an email
|
||||||
|
to security@opencontainers.org (which is inhabited only by the maintainers of
|
||||||
|
the various OCI projects).
|
||||||
|
|
||||||
|
# Copyright and license
|
||||||
|
|
||||||
|
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
|
||||||
192
vendor/github.com/opencontainers/go-digest/algorithm.go
generated
vendored
Normal file
192
vendor/github.com/opencontainers/go-digest/algorithm.go
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
// Copyright 2017 Docker, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Algorithm identifies and implementation of a digester by an identifier.
|
||||||
|
// Note the that this defines both the hash algorithm used and the string
|
||||||
|
// encoding.
|
||||||
|
type Algorithm string
|
||||||
|
|
||||||
|
// supported digest types
|
||||||
|
const (
|
||||||
|
SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only)
|
||||||
|
SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only)
|
||||||
|
SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only)
|
||||||
|
|
||||||
|
// Canonical is the primary digest algorithm used with the distribution
|
||||||
|
// project. Other digests may be used but this one is the primary storage
|
||||||
|
// digest.
|
||||||
|
Canonical = SHA256
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// TODO(stevvooe): Follow the pattern of the standard crypto package for
|
||||||
|
// registration of digests. Effectively, we are a registerable set and
|
||||||
|
// common symbol access.
|
||||||
|
|
||||||
|
// algorithms maps values to hash.Hash implementations. Other algorithms
|
||||||
|
// may be available but they cannot be calculated by the digest package.
|
||||||
|
algorithms = map[Algorithm]crypto.Hash{
|
||||||
|
SHA256: crypto.SHA256,
|
||||||
|
SHA384: crypto.SHA384,
|
||||||
|
SHA512: crypto.SHA512,
|
||||||
|
}
|
||||||
|
|
||||||
|
// anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests.
|
||||||
|
// Note that /A-F/ disallowed.
|
||||||
|
anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{
|
||||||
|
SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`),
|
||||||
|
SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`),
|
||||||
|
SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Available returns true if the digest type is available for use. If this
|
||||||
|
// returns false, Digester and Hash will return nil.
|
||||||
|
func (a Algorithm) Available() bool {
|
||||||
|
h, ok := algorithms[a]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// check availability of the hash, as well
|
||||||
|
return h.Available()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Algorithm) String() string {
|
||||||
|
return string(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns number of bytes returned by the hash.
|
||||||
|
func (a Algorithm) Size() int {
|
||||||
|
h, ok := algorithms[a]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return h.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set implemented to allow use of Algorithm as a command line flag.
|
||||||
|
func (a *Algorithm) Set(value string) error {
|
||||||
|
if value == "" {
|
||||||
|
*a = Canonical
|
||||||
|
} else {
|
||||||
|
// just do a type conversion, support is queried with Available.
|
||||||
|
*a = Algorithm(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !a.Available() {
|
||||||
|
return ErrDigestUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Digester returns a new digester for the specified algorithm. If the algorithm
|
||||||
|
// does not have a digester implementation, nil will be returned. This can be
|
||||||
|
// checked by calling Available before calling Digester.
|
||||||
|
func (a Algorithm) Digester() Digester {
|
||||||
|
return &digester{
|
||||||
|
alg: a,
|
||||||
|
hash: a.Hash(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns a new hash as used by the algorithm. If not available, the
|
||||||
|
// method will panic. Check Algorithm.Available() before calling.
|
||||||
|
func (a Algorithm) Hash() hash.Hash {
|
||||||
|
if !a.Available() {
|
||||||
|
// Empty algorithm string is invalid
|
||||||
|
if a == "" {
|
||||||
|
panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE(stevvooe): A missing hash is usually a programming error that
|
||||||
|
// must be resolved at compile time. We don't import in the digest
|
||||||
|
// package to allow users to choose their hash implementation (such as
|
||||||
|
// when using stevvooe/resumable or a hardware accelerated package).
|
||||||
|
//
|
||||||
|
// Applications that may want to resolve the hash at runtime should
|
||||||
|
// call Algorithm.Available before call Algorithm.Hash().
|
||||||
|
panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
|
||||||
|
}
|
||||||
|
|
||||||
|
return algorithms[a].New()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into
|
||||||
|
// the encoded portion of the digest.
|
||||||
|
func (a Algorithm) Encode(d []byte) string {
|
||||||
|
// TODO(stevvooe): Currently, all algorithms use a hex encoding. When we
|
||||||
|
// add support for back registration, we can modify this accordingly.
|
||||||
|
return fmt.Sprintf("%x", d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromReader returns the digest of the reader using the algorithm.
|
||||||
|
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
|
||||||
|
digester := a.Digester()
|
||||||
|
|
||||||
|
if _, err := io.Copy(digester.Hash(), rd); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return digester.Digest(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromBytes digests the input and returns a Digest.
|
||||||
|
func (a Algorithm) FromBytes(p []byte) Digest {
|
||||||
|
digester := a.Digester()
|
||||||
|
|
||||||
|
if _, err := digester.Hash().Write(p); err != nil {
|
||||||
|
// Writes to a Hash should never fail. None of the existing
|
||||||
|
// hash implementations in the stdlib or hashes vendored
|
||||||
|
// here can return errors from Write. Having a panic in this
|
||||||
|
// condition instead of having FromBytes return an error value
|
||||||
|
// avoids unnecessary error handling paths in all callers.
|
||||||
|
panic("write to hash function returned error: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return digester.Digest()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromString digests the string input and returns a Digest.
|
||||||
|
func (a Algorithm) FromString(s string) Digest {
|
||||||
|
return a.FromBytes([]byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate validates the encoded portion string
|
||||||
|
func (a Algorithm) Validate(encoded string) error {
|
||||||
|
r, ok := anchoredEncodedRegexps[a]
|
||||||
|
if !ok {
|
||||||
|
return ErrDigestUnsupported
|
||||||
|
}
|
||||||
|
// Digests much always be hex-encoded, ensuring that their hex portion will
|
||||||
|
// always be size*2
|
||||||
|
if a.Size()*2 != len(encoded) {
|
||||||
|
return ErrDigestInvalidLength
|
||||||
|
}
|
||||||
|
if r.MatchString(encoded) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ErrDigestInvalidFormat
|
||||||
|
}
|
||||||
156
vendor/github.com/opencontainers/go-digest/digest.go
generated
vendored
Normal file
156
vendor/github.com/opencontainers/go-digest/digest.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
// Copyright 2017 Docker, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Digest allows simple protection of hex formatted digest strings, prefixed
|
||||||
|
// by their algorithm. Strings of type Digest have some guarantee of being in
|
||||||
|
// the correct format and it provides quick access to the components of a
|
||||||
|
// digest string.
|
||||||
|
//
|
||||||
|
// The following is an example of the contents of Digest types:
|
||||||
|
//
|
||||||
|
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||||
|
//
|
||||||
|
// This allows to abstract the digest behind this type and work only in those
|
||||||
|
// terms.
|
||||||
|
type Digest string
|
||||||
|
|
||||||
|
// NewDigest returns a Digest from alg and a hash.Hash object.
|
||||||
|
func NewDigest(alg Algorithm, h hash.Hash) Digest {
|
||||||
|
return NewDigestFromBytes(alg, h.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDigestFromBytes returns a new digest from the byte contents of p.
|
||||||
|
// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
|
||||||
|
// functions. This is also useful for rebuilding digests from binary
|
||||||
|
// serializations.
|
||||||
|
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
|
||||||
|
return NewDigestFromEncoded(alg, alg.Encode(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded.
|
||||||
|
func NewDigestFromHex(alg, hex string) Digest {
|
||||||
|
return NewDigestFromEncoded(Algorithm(alg), hex)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDigestFromEncoded returns a Digest from alg and the encoded digest.
|
||||||
|
func NewDigestFromEncoded(alg Algorithm, encoded string) Digest {
|
||||||
|
return Digest(fmt.Sprintf("%s:%s", alg, encoded))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DigestRegexp matches valid digest types.
|
||||||
|
var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`)
|
||||||
|
|
||||||
|
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
|
||||||
|
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrDigestInvalidFormat returned when digest format invalid.
|
||||||
|
ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
|
||||||
|
|
||||||
|
// ErrDigestInvalidLength returned when digest has invalid length.
|
||||||
|
ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
|
||||||
|
|
||||||
|
// ErrDigestUnsupported returned when the digest algorithm is unsupported.
|
||||||
|
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parse parses s and returns the validated digest object. An error will
|
||||||
|
// be returned if the format is invalid.
|
||||||
|
func Parse(s string) (Digest, error) {
|
||||||
|
d := Digest(s)
|
||||||
|
return d, d.Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromReader consumes the content of rd until io.EOF, returning canonical digest.
|
||||||
|
func FromReader(rd io.Reader) (Digest, error) {
|
||||||
|
return Canonical.FromReader(rd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromBytes digests the input and returns a Digest.
|
||||||
|
func FromBytes(p []byte) Digest {
|
||||||
|
return Canonical.FromBytes(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromString digests the input and returns a Digest.
|
||||||
|
func FromString(s string) Digest {
|
||||||
|
return Canonical.FromString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks that the contents of d is a valid digest, returning an
|
||||||
|
// error if not.
|
||||||
|
func (d Digest) Validate() error {
|
||||||
|
s := string(d)
|
||||||
|
i := strings.Index(s, ":")
|
||||||
|
if i <= 0 || i+1 == len(s) {
|
||||||
|
return ErrDigestInvalidFormat
|
||||||
|
}
|
||||||
|
algorithm, encoded := Algorithm(s[:i]), s[i+1:]
|
||||||
|
if !algorithm.Available() {
|
||||||
|
if !DigestRegexpAnchored.MatchString(s) {
|
||||||
|
return ErrDigestInvalidFormat
|
||||||
|
}
|
||||||
|
return ErrDigestUnsupported
|
||||||
|
}
|
||||||
|
return algorithm.Validate(encoded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Algorithm returns the algorithm portion of the digest. This will panic if
|
||||||
|
// the underlying digest is not in a valid format.
|
||||||
|
func (d Digest) Algorithm() Algorithm {
|
||||||
|
return Algorithm(d[:d.sepIndex()])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verifier returns a writer object that can be used to verify a stream of
|
||||||
|
// content against the digest. If the digest is invalid, the method will panic.
|
||||||
|
func (d Digest) Verifier() Verifier {
|
||||||
|
return hashVerifier{
|
||||||
|
hash: d.Algorithm().Hash(),
|
||||||
|
digest: d,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encoded returns the encoded portion of the digest. This will panic if the
|
||||||
|
// underlying digest is not in a valid format.
|
||||||
|
func (d Digest) Encoded() string {
|
||||||
|
return string(d[d.sepIndex()+1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hex is deprecated. Please use Digest.Encoded.
|
||||||
|
func (d Digest) Hex() string {
|
||||||
|
return d.Encoded()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Digest) String() string {
|
||||||
|
return string(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Digest) sepIndex() int {
|
||||||
|
i := strings.Index(string(d), ":")
|
||||||
|
|
||||||
|
if i < 0 {
|
||||||
|
panic(fmt.Sprintf("no ':' separator in digest %q", d))
|
||||||
|
}
|
||||||
|
|
||||||
|
return i
|
||||||
|
}
|
||||||
39
vendor/github.com/opencontainers/go-digest/digester.go
generated
vendored
Normal file
39
vendor/github.com/opencontainers/go-digest/digester.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
// Copyright 2017 Docker, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import "hash"
|
||||||
|
|
||||||
|
// Digester calculates the digest of written data. Writes should go directly
|
||||||
|
// to the return value of Hash, while calling Digest will return the current
|
||||||
|
// value of the digest.
|
||||||
|
type Digester interface {
|
||||||
|
Hash() hash.Hash // provides direct access to underlying hash instance.
|
||||||
|
Digest() Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
// digester provides a simple digester definition that embeds a hasher.
|
||||||
|
type digester struct {
|
||||||
|
alg Algorithm
|
||||||
|
hash hash.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *digester) Hash() hash.Hash {
|
||||||
|
return d.hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *digester) Digest() Digest {
|
||||||
|
return NewDigest(d.alg, d.hash)
|
||||||
|
}
|
||||||
56
vendor/github.com/opencontainers/go-digest/doc.go
generated
vendored
Normal file
56
vendor/github.com/opencontainers/go-digest/doc.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2017 Docker, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package digest provides a generalized type to opaquely represent message
|
||||||
|
// digests and their operations within the registry. The Digest type is
|
||||||
|
// designed to serve as a flexible identifier in a content-addressable system.
|
||||||
|
// More importantly, it provides tools and wrappers to work with
|
||||||
|
// hash.Hash-based digests with little effort.
|
||||||
|
//
|
||||||
|
// Basics
|
||||||
|
//
|
||||||
|
// The format of a digest is simply a string with two parts, dubbed the
|
||||||
|
// "algorithm" and the "digest", separated by a colon:
|
||||||
|
//
|
||||||
|
// <algorithm>:<digest>
|
||||||
|
//
|
||||||
|
// An example of a sha256 digest representation follows:
|
||||||
|
//
|
||||||
|
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||||
|
//
|
||||||
|
// In this case, the string "sha256" is the algorithm and the hex bytes are
|
||||||
|
// the "digest".
|
||||||
|
//
|
||||||
|
// Because the Digest type is simply a string, once a valid Digest is
|
||||||
|
// obtained, comparisons are cheap, quick and simple to express with the
|
||||||
|
// standard equality operator.
|
||||||
|
//
|
||||||
|
// Verification
|
||||||
|
//
|
||||||
|
// The main benefit of using the Digest type is simple verification against a
|
||||||
|
// given digest. The Verifier interface, modeled after the stdlib hash.Hash
|
||||||
|
// interface, provides a common write sink for digest verification. After
|
||||||
|
// writing is complete, calling the Verifier.Verified method will indicate
|
||||||
|
// whether or not the stream of bytes matches the target digest.
|
||||||
|
//
|
||||||
|
// Missing Features
|
||||||
|
//
|
||||||
|
// In addition to the above, we intend to add the following features to this
|
||||||
|
// package:
|
||||||
|
//
|
||||||
|
// 1. A Digester type that supports write sink digest calculation.
|
||||||
|
//
|
||||||
|
// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
|
||||||
|
//
|
||||||
|
package digest
|
||||||
45
vendor/github.com/opencontainers/go-digest/verifiers.go
generated
vendored
Normal file
45
vendor/github.com/opencontainers/go-digest/verifiers.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
// Copyright 2017 Docker, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Verifier presents a general verification interface to be used with message
|
||||||
|
// digests and other byte stream verifications. Users instantiate a Verifier
|
||||||
|
// from one of the various methods, write the data under test to it then check
|
||||||
|
// the result with the Verified method.
|
||||||
|
type Verifier interface {
|
||||||
|
io.Writer
|
||||||
|
|
||||||
|
// Verified will return true if the content written to Verifier matches
|
||||||
|
// the digest.
|
||||||
|
Verified() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type hashVerifier struct {
|
||||||
|
digest Digest
|
||||||
|
hash hash.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hv hashVerifier) Write(p []byte) (n int, err error) {
|
||||||
|
return hv.hash.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hv hashVerifier) Verified() bool {
|
||||||
|
return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
|
||||||
|
}
|
||||||
14
vendor/github.com/suborbital/vektor/vk/README.md
generated
vendored
14
vendor/github.com/suborbital/vektor/vk/README.md
generated
vendored
@@ -1,14 +0,0 @@
|
|||||||
# vektor API
|
|
||||||
|
|
||||||
`vk` is the vektor component that allows for easy development of API servers in Go.
|
|
||||||
|
|
||||||
Features:
|
|
||||||
|
|
||||||
- HTTPS by default using LetsEncrypt
|
|
||||||
- Easy configuration of CORS
|
|
||||||
- Built in logging
|
|
||||||
- Authentication plug-in point
|
|
||||||
- Fast HTTP router built in
|
|
||||||
|
|
||||||
Planned:
|
|
||||||
- Rate limiter
|
|
||||||
76
vendor/github.com/suborbital/vektor/vk/context.go
generated
vendored
76
vendor/github.com/suborbital/vektor/vk/context.go
generated
vendored
@@ -1,76 +0,0 @@
|
|||||||
package vk
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/julienschmidt/httprouter"
|
|
||||||
"github.com/suborbital/vektor/vlog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ctxKey is a type to represent a key in the Ctx context.
|
|
||||||
type ctxKey string
|
|
||||||
|
|
||||||
// Ctx serves a similar purpose to context.Context, but has some typed fields
|
|
||||||
type Ctx struct {
|
|
||||||
Context context.Context
|
|
||||||
Log *vlog.Logger
|
|
||||||
Params httprouter.Params
|
|
||||||
RespHeaders http.Header
|
|
||||||
requestID string
|
|
||||||
scope interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCtx creates a new Ctx
|
|
||||||
func NewCtx(log *vlog.Logger, params httprouter.Params, headers http.Header) *Ctx {
|
|
||||||
ctx := &Ctx{
|
|
||||||
Context: context.Background(),
|
|
||||||
Log: log,
|
|
||||||
Params: params,
|
|
||||||
RespHeaders: headers,
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets a value on the Ctx's embedded Context (a la key/value store)
|
|
||||||
func (c *Ctx) Set(key string, val interface{}) {
|
|
||||||
realKey := ctxKey(key)
|
|
||||||
c.Context = context.WithValue(c.Context, realKey, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets a value from the Ctx's embedded Context (a la key/value store)
|
|
||||||
func (c *Ctx) Get(key string) interface{} {
|
|
||||||
realKey := ctxKey(key)
|
|
||||||
val := c.Context.Value(realKey)
|
|
||||||
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseScope sets an object to be the scope of the request, including setting the logger's scope
|
|
||||||
// the scope can be retrieved later with the Scope() method
|
|
||||||
func (c *Ctx) UseScope(scope interface{}) {
|
|
||||||
c.Log = c.Log.CreateScoped(scope)
|
|
||||||
|
|
||||||
c.scope = scope
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scope retrieves the context's scope
|
|
||||||
func (c *Ctx) Scope() interface{} {
|
|
||||||
return c.scope
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseRequestID is a setter for the request ID
|
|
||||||
func (c *Ctx) UseRequestID(id string) {
|
|
||||||
c.requestID = id
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestID returns the request ID of the current request, generating one if none exists.
|
|
||||||
func (c *Ctx) RequestID() string {
|
|
||||||
if c.requestID == "" {
|
|
||||||
c.requestID = uuid.New().String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.requestID
|
|
||||||
}
|
|
||||||
91
vendor/github.com/suborbital/vektor/vk/error.go
generated
vendored
91
vendor/github.com/suborbital/vektor/vk/error.go
generated
vendored
@@ -1,91 +0,0 @@
|
|||||||
package vk
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/suborbital/vektor/vlog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Error is an interface representing a failed request
|
|
||||||
type Error interface {
|
|
||||||
Error() string // this ensures all Errors will also conform to the normal error interface
|
|
||||||
|
|
||||||
Message() string
|
|
||||||
Status() int
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorResponse is a concrete implementation of Error,
|
|
||||||
// representing a failed HTTP request
|
|
||||||
type ErrorResponse struct {
|
|
||||||
StatusCode int `json:"status"`
|
|
||||||
MessageText string `json:"message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a full error string
|
|
||||||
func (e *ErrorResponse) Error() string {
|
|
||||||
return fmt.Sprintf("%d: %s", e.StatusCode, e.MessageText)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status returns the error status code
|
|
||||||
func (e *ErrorResponse) Status() int {
|
|
||||||
return e.StatusCode
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message returns the error's message
|
|
||||||
func (e *ErrorResponse) Message() string {
|
|
||||||
return e.MessageText
|
|
||||||
}
|
|
||||||
|
|
||||||
// Err returns an error with status and message
|
|
||||||
func Err(status int, message string) Error {
|
|
||||||
e := &ErrorResponse{
|
|
||||||
StatusCode: status,
|
|
||||||
MessageText: message,
|
|
||||||
}
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// E is Err for those who like terse code
|
|
||||||
func E(status int, message string) Error {
|
|
||||||
return Err(status, message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap wraps an error in vk.Error
|
|
||||||
func Wrap(status int, err error) Error {
|
|
||||||
return Err(status, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
genericErrorResponseBytes = []byte("Internal Server Error")
|
|
||||||
genericErrorResponseCode = 500
|
|
||||||
)
|
|
||||||
|
|
||||||
// converts _something_ into bytes, best it can:
|
|
||||||
// if data is Error type, returns (status, {status: status, message: message})
|
|
||||||
// if other error, returns (500, []byte(err.Error()))
|
|
||||||
func errorOrOtherToBytes(l *vlog.Logger, err error) (int, []byte, contentType) {
|
|
||||||
statusCode := genericErrorResponseCode
|
|
||||||
|
|
||||||
// first, check if it's vk.Error interface type, and unpack it for further processing
|
|
||||||
if e, ok := err.(Error); ok {
|
|
||||||
statusCode = e.Status() // grab this in case anything fails
|
|
||||||
|
|
||||||
errResp := Err(e.Status(), e.Message()) // create a concrete instance that can be marshalled
|
|
||||||
|
|
||||||
errJSON, marshalErr := json.Marshal(errResp)
|
|
||||||
if marshalErr != nil {
|
|
||||||
// any failure results in the generic response body being used
|
|
||||||
l.ErrorString("failed to marshal vk.Error:", marshalErr.Error(), "original error:", err.Error())
|
|
||||||
|
|
||||||
return statusCode, genericErrorResponseBytes, contentTypeTextPlain
|
|
||||||
}
|
|
||||||
|
|
||||||
return statusCode, errJSON, contentTypeJSON
|
|
||||||
}
|
|
||||||
|
|
||||||
l.Warn("redacting potential unsafe error response, original error:", err.Error())
|
|
||||||
|
|
||||||
return statusCode, genericErrorResponseBytes, contentTypeTextPlain
|
|
||||||
}
|
|
||||||
140
vendor/github.com/suborbital/vektor/vk/group.go
generated
vendored
140
vendor/github.com/suborbital/vektor/vk/group.go
generated
vendored
@@ -1,140 +0,0 @@
|
|||||||
package vk
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RouteGroup represents a group of routes
|
|
||||||
type RouteGroup struct {
|
|
||||||
prefix string
|
|
||||||
routes []routeHandler
|
|
||||||
middleware []Middleware
|
|
||||||
afterware []Afterware
|
|
||||||
}
|
|
||||||
|
|
||||||
type routeHandler struct {
|
|
||||||
Method string
|
|
||||||
Path string
|
|
||||||
Handler HandlerFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// Group creates a group of routes with a common prefix and middlewares
|
|
||||||
func Group(prefix string) *RouteGroup {
|
|
||||||
rg := &RouteGroup{
|
|
||||||
prefix: prefix,
|
|
||||||
routes: []routeHandler{},
|
|
||||||
middleware: []Middleware{},
|
|
||||||
afterware: []Afterware{},
|
|
||||||
}
|
|
||||||
|
|
||||||
return rg
|
|
||||||
}
|
|
||||||
|
|
||||||
// GET is a shortcut for server.Handle(http.MethodGet, path, handler)
|
|
||||||
func (g *RouteGroup) GET(path string, handler HandlerFunc) {
|
|
||||||
g.addRouteHandler(http.MethodGet, path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HEAD is a shortcut for server.Handle(http.MethodHead, path, handler)
|
|
||||||
func (g *RouteGroup) HEAD(path string, handler HandlerFunc) {
|
|
||||||
g.addRouteHandler(http.MethodHead, path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OPTIONS is a shortcut for server.Handle(http.MethodOptions, path, handler)
|
|
||||||
func (g *RouteGroup) OPTIONS(path string, handler HandlerFunc) {
|
|
||||||
g.addRouteHandler(http.MethodOptions, path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// POST is a shortcut for server.Handle(http.MethodPost, path, handler)
|
|
||||||
func (g *RouteGroup) POST(path string, handler HandlerFunc) {
|
|
||||||
g.addRouteHandler(http.MethodPost, path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PUT is a shortcut for server.Handle(http.MethodPut, path, handler)
|
|
||||||
func (g *RouteGroup) PUT(path string, handler HandlerFunc) {
|
|
||||||
g.addRouteHandler(http.MethodPut, path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PATCH is a shortcut for server.Handle(http.MethodPatch, path, handler)
|
|
||||||
func (g *RouteGroup) PATCH(path string, handler HandlerFunc) {
|
|
||||||
g.addRouteHandler(http.MethodPatch, path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DELETE is a shortcut for server.Handle(http.MethodDelete, path, handler)
|
|
||||||
func (g *RouteGroup) DELETE(path string, handler HandlerFunc) {
|
|
||||||
g.addRouteHandler(http.MethodDelete, path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle adds a route to be handled
|
|
||||||
func (g *RouteGroup) Handle(method, path string, handler HandlerFunc) {
|
|
||||||
g.addRouteHandler(method, path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddGroup adds a group of routes to this group as a subgroup.
|
|
||||||
// the subgroup's prefix is added to all of the routes it contains,
|
|
||||||
// with the resulting path being "/group.prefix/subgroup.prefix/route/path/here"
|
|
||||||
func (g *RouteGroup) AddGroup(group *RouteGroup) {
|
|
||||||
g.routes = append(g.routes, group.routeHandlers()...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Before adds middleware to the group, which are applied to every handler in the group (called before the handler)
|
|
||||||
func (g *RouteGroup) Before(middleware ...Middleware) *RouteGroup {
|
|
||||||
g.middleware = append(g.middleware, middleware...)
|
|
||||||
|
|
||||||
return g
|
|
||||||
}
|
|
||||||
|
|
||||||
// After adds afterware to the group, which are applied to every handler in the group (called after the handler)
|
|
||||||
func (g *RouteGroup) After(afterware ...Afterware) *RouteGroup {
|
|
||||||
g.afterware = append(g.afterware, afterware...)
|
|
||||||
|
|
||||||
return g
|
|
||||||
}
|
|
||||||
|
|
||||||
// routeHandlers computes the "full" path for each handler, and creates
|
|
||||||
// a HandlerFunc that chains together the group's middlewares
|
|
||||||
// before calling the inner HandlerFunc. It can be called 'recursively'
|
|
||||||
// since groups can be added to groups
|
|
||||||
func (g *RouteGroup) routeHandlers() []routeHandler {
|
|
||||||
routes := make([]routeHandler, len(g.routes))
|
|
||||||
|
|
||||||
for i, r := range g.routes {
|
|
||||||
fullPath := fmt.Sprintf("%s%s", ensureLeadingSlash(g.prefix), ensureLeadingSlash(r.Path))
|
|
||||||
augR := routeHandler{
|
|
||||||
Method: r.Method,
|
|
||||||
Path: fullPath,
|
|
||||||
Handler: augmentHandler(r.Handler, g.middleware, g.afterware),
|
|
||||||
}
|
|
||||||
|
|
||||||
routes[i] = augR
|
|
||||||
}
|
|
||||||
|
|
||||||
return routes
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *RouteGroup) addRouteHandler(method string, path string, handler HandlerFunc) {
|
|
||||||
rh := routeHandler{
|
|
||||||
Method: method,
|
|
||||||
Path: path,
|
|
||||||
Handler: handler,
|
|
||||||
}
|
|
||||||
|
|
||||||
g.routes = append(g.routes, rh)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *RouteGroup) routePrefix() string {
|
|
||||||
return g.prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
func ensureLeadingSlash(path string) string {
|
|
||||||
if path == "" {
|
|
||||||
// handle the "root group" case
|
|
||||||
return ""
|
|
||||||
} else if !strings.HasPrefix(path, "/") {
|
|
||||||
path = "/" + path
|
|
||||||
}
|
|
||||||
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
80
vendor/github.com/suborbital/vektor/vk/middleware.go
generated
vendored
80
vendor/github.com/suborbital/vektor/vk/middleware.go
generated
vendored
@@ -1,80 +0,0 @@
|
|||||||
package vk
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Middleware represents a handler that runs on a request before reaching its handler
|
|
||||||
type Middleware func(*http.Request, *Ctx) error
|
|
||||||
|
|
||||||
// Afterware represents a handler that runs on a request after the handler has dealt with the request
|
|
||||||
type Afterware func(*http.Request, *Ctx)
|
|
||||||
|
|
||||||
// ContentTypeMiddleware allows the content-type to be set
|
|
||||||
func ContentTypeMiddleware(contentType string) Middleware {
|
|
||||||
return func(r *http.Request, ctx *Ctx) error {
|
|
||||||
ctx.RespHeaders.Set(contentTypeHeaderKey, contentType)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CORSMiddleware enables CORS with the given domain for a route
|
|
||||||
// pass "*" to allow all domains, or empty string to allow none
|
|
||||||
func CORSMiddleware(domain string) Middleware {
|
|
||||||
return func(r *http.Request, ctx *Ctx) error {
|
|
||||||
enableCors(ctx, domain)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CORSHandler enables CORS for a route
|
|
||||||
// pass "*" to allow all domains, or empty string to allow none
|
|
||||||
func CORSHandler(domain string) HandlerFunc {
|
|
||||||
return func(r *http.Request, ctx *Ctx) (interface{}, error) {
|
|
||||||
enableCors(ctx, domain)
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func enableCors(ctx *Ctx, domain string) {
|
|
||||||
if domain != "" {
|
|
||||||
ctx.RespHeaders.Set("Access-Control-Allow-Origin", domain)
|
|
||||||
ctx.RespHeaders.Set("X-Requested-With", "XMLHttpRequest")
|
|
||||||
ctx.RespHeaders.Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, Authorization, cache-control")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func loggerMiddleware() Middleware {
|
|
||||||
return func(r *http.Request, ctx *Ctx) error {
|
|
||||||
ctx.Log.Info(r.Method, r.URL.String())
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate a HandlerFunc that passes the request through a set of Middleware first and Afterware after
|
|
||||||
func augmentHandler(inner HandlerFunc, middleware []Middleware, afterware []Afterware) HandlerFunc {
|
|
||||||
return func(r *http.Request, ctx *Ctx) (interface{}, error) {
|
|
||||||
defer func() {
|
|
||||||
// run the afterware (which cannot affect the response)
|
|
||||||
// even if something in the request chain fails
|
|
||||||
for _, a := range afterware {
|
|
||||||
a(r, ctx)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// run the middleware (which can error to stop progression)
|
|
||||||
for _, m := range middleware {
|
|
||||||
if err := m(r, ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := inner(r, ctx)
|
|
||||||
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
73
vendor/github.com/suborbital/vektor/vk/optionmodifiers.go
generated
vendored
73
vendor/github.com/suborbital/vektor/vk/optionmodifiers.go
generated
vendored
@@ -1,73 +0,0 @@
|
|||||||
package vk
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/suborbital/vektor/vlog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OptionsModifier takes an options struct and returns a modified Options struct
|
|
||||||
type OptionsModifier func(*Options)
|
|
||||||
|
|
||||||
// UseDomain sets the server to use a particular domain for TLS
|
|
||||||
func UseDomain(domain string) OptionsModifier {
|
|
||||||
return func(o *Options) {
|
|
||||||
o.Domain = domain
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseTLSConfig sets a TLS config that will be used for HTTPS
|
|
||||||
// This will take precedence over the Domain option in all cases
|
|
||||||
func UseTLSConfig(config *tls.Config) OptionsModifier {
|
|
||||||
return func(o *Options) {
|
|
||||||
o.TLSConfig = config
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseTLSPort sets the HTTPS port to be used:
|
|
||||||
func UseTLSPort(port int) OptionsModifier {
|
|
||||||
return func(o *Options) {
|
|
||||||
o.TLSPort = port
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseHTTPPort sets the HTTP port to be used:
|
|
||||||
// If domain is set, HTTP port will be used for LetsEncrypt challenge server
|
|
||||||
// If domain is NOT set, this option will put VK in insecure HTTP mode
|
|
||||||
func UseHTTPPort(port int) OptionsModifier {
|
|
||||||
return func(o *Options) {
|
|
||||||
o.HTTPPort = port
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseLogger allows a custom logger to be used
|
|
||||||
func UseLogger(logger *vlog.Logger) OptionsModifier {
|
|
||||||
return func(o *Options) {
|
|
||||||
o.Logger = logger
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseAppName allows an app name to be set (for vanity only, really....)
|
|
||||||
func UseAppName(name string) OptionsModifier {
|
|
||||||
return func(o *Options) {
|
|
||||||
o.AppName = name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseEnvPrefix uses the provided env prefix (default VK) when looking up other options such as `VK_HTTP_PORT`
|
|
||||||
func UseEnvPrefix(prefix string) OptionsModifier {
|
|
||||||
return func(o *Options) {
|
|
||||||
o.EnvPrefix = prefix
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseInspector sets a function that will be allowed to inspect every HTTP request
|
|
||||||
// before it reaches VK's internal router, but cannot modify said request or affect
|
|
||||||
// the handling of said request in any way. Use at your own risk, as it may introduce
|
|
||||||
// performance issues if not used correctly.
|
|
||||||
func UseInspector(isp func(http.Request)) OptionsModifier {
|
|
||||||
return func(o *Options) {
|
|
||||||
o.PreRouterInspector = isp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
95
vendor/github.com/suborbital/vektor/vk/options.go
generated
vendored
95
vendor/github.com/suborbital/vektor/vk/options.go
generated
vendored
@@ -1,95 +0,0 @@
|
|||||||
package vk
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sethvargo/go-envconfig"
|
|
||||||
"github.com/suborbital/vektor/vlog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Options are the available options for Server
|
|
||||||
type Options struct {
|
|
||||||
AppName string `env:"_APP_NAME"`
|
|
||||||
Domain string `env:"_DOMAIN"`
|
|
||||||
HTTPPort int `env:"_HTTP_PORT"`
|
|
||||||
TLSPort int `env:"_TLS_PORT"`
|
|
||||||
TLSConfig *tls.Config `env:"-"`
|
|
||||||
EnvPrefix string `env:"-"`
|
|
||||||
Logger *vlog.Logger `env:"-"`
|
|
||||||
|
|
||||||
PreRouterInspector func(http.Request) `env:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func newOptsWithModifiers(mods ...OptionsModifier) *Options {
|
|
||||||
options := &Options{}
|
|
||||||
// loop through the provided options and apply the
|
|
||||||
// modifier function to the options object
|
|
||||||
for _, mod := range mods {
|
|
||||||
mod(options)
|
|
||||||
}
|
|
||||||
|
|
||||||
envPrefix := defaultEnvPrefix
|
|
||||||
if options.EnvPrefix != "" {
|
|
||||||
envPrefix = options.EnvPrefix
|
|
||||||
}
|
|
||||||
|
|
||||||
options.finalize(envPrefix)
|
|
||||||
|
|
||||||
return options
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldUseTLS returns true if domain is set and/or TLS is configured
|
|
||||||
func (o *Options) ShouldUseTLS() bool {
|
|
||||||
return o.Domain != "" || o.TLSConfig != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPPortSet returns true if the HTTP port is set
|
|
||||||
func (o *Options) HTTPPortSet() bool {
|
|
||||||
return o.HTTPPort != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldUseHTTP returns true if insecure HTTP should be used
|
|
||||||
func (o *Options) ShouldUseHTTP() bool {
|
|
||||||
return !o.ShouldUseTLS() && o.HTTPPortSet()
|
|
||||||
}
|
|
||||||
|
|
||||||
// finalize "locks in" the options by overriding any existing options with the version from the environment, and setting the default logger if needed
|
|
||||||
func (o *Options) finalize(prefix string) {
|
|
||||||
if o.Logger == nil {
|
|
||||||
o.Logger = vlog.Default(vlog.EnvPrefix(prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
// if no inspector was set, create an empty one
|
|
||||||
if o.PreRouterInspector == nil {
|
|
||||||
o.PreRouterInspector = func(_ http.Request) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
envOpts := Options{}
|
|
||||||
if err := envconfig.ProcessWith(context.Background(), &envOpts, envconfig.PrefixLookuper(prefix, envconfig.OsLookuper())); err != nil {
|
|
||||||
o.Logger.Error(errors.Wrap(err, "[vk] failed to ProcessWith environment config"))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
o.replaceFieldsIfNeeded(&envOpts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Options) replaceFieldsIfNeeded(replacement *Options) {
|
|
||||||
if replacement.AppName != "" {
|
|
||||||
o.AppName = replacement.AppName
|
|
||||||
}
|
|
||||||
|
|
||||||
if replacement.Domain != "" {
|
|
||||||
o.Domain = replacement.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
if replacement.HTTPPort != 0 {
|
|
||||||
o.HTTPPort = replacement.HTTPPort
|
|
||||||
}
|
|
||||||
|
|
||||||
if replacement.TLSPort != 0 {
|
|
||||||
o.TLSPort = replacement.TLSPort
|
|
||||||
}
|
|
||||||
}
|
|
||||||
77
vendor/github.com/suborbital/vektor/vk/response.go
generated
vendored
77
vendor/github.com/suborbital/vektor/vk/response.go
generated
vendored
@@ -1,77 +0,0 @@
|
|||||||
package vk
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/suborbital/vektor/vlog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Response represents a non-error HTTP response
|
|
||||||
type Response struct {
|
|
||||||
status int
|
|
||||||
body interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Respond returns a filled-in response
|
|
||||||
func Respond(status int, body interface{}) Response {
|
|
||||||
r := Response{
|
|
||||||
status: status,
|
|
||||||
body: body,
|
|
||||||
}
|
|
||||||
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// R is `Respond` for those who prefer terse code
|
|
||||||
func R(status int, body interface{}) Response {
|
|
||||||
return Respond(status, body)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: add convenience helpers for status codes
|
|
||||||
|
|
||||||
const (
|
|
||||||
contentTypeJSON contentType = "application/json"
|
|
||||||
contentTypeTextPlain contentType = "text/plain"
|
|
||||||
contentTypeOctetStream contentType = "application/octet-stream"
|
|
||||||
)
|
|
||||||
|
|
||||||
// converts _something_ into bytes, best it can:
|
|
||||||
// if data is Response type, returns (status, body processed as below)
|
|
||||||
// if bytes, return (200, bytes)
|
|
||||||
// if string, return (200, []byte(string))
|
|
||||||
// if struct, return (200, json(struct))
|
|
||||||
// otherwise, return (500, nil)
|
|
||||||
func responseOrOtherToBytes(l *vlog.Logger, data interface{}) (int, []byte, contentType) {
|
|
||||||
if data == nil {
|
|
||||||
return http.StatusNoContent, []byte{}, contentTypeTextPlain
|
|
||||||
}
|
|
||||||
|
|
||||||
statusCode := http.StatusOK
|
|
||||||
realData := data
|
|
||||||
|
|
||||||
// first, check if it's response type, and unpack it for further processing
|
|
||||||
if r, ok := data.(Response); ok {
|
|
||||||
statusCode = r.status
|
|
||||||
realData = r.body
|
|
||||||
}
|
|
||||||
|
|
||||||
// if data is []byte or string, return it as-is
|
|
||||||
if b, ok := realData.([]byte); ok {
|
|
||||||
return statusCode, b, contentTypeOctetStream
|
|
||||||
} else if s, ok := realData.(string); ok {
|
|
||||||
return statusCode, []byte(s), contentTypeTextPlain
|
|
||||||
}
|
|
||||||
|
|
||||||
// otherwise, assume it's a struct of some kind,
|
|
||||||
// so JSON marshal it and return it
|
|
||||||
json, err := json.Marshal(realData)
|
|
||||||
if err != nil {
|
|
||||||
l.Error(errors.Wrap(err, "failed to Marshal response struct"))
|
|
||||||
|
|
||||||
return genericErrorResponseCode, []byte(genericErrorResponseBytes), contentTypeTextPlain
|
|
||||||
}
|
|
||||||
|
|
||||||
return statusCode, json, contentTypeJSON
|
|
||||||
}
|
|
||||||
144
vendor/github.com/suborbital/vektor/vk/router.go
generated
vendored
144
vendor/github.com/suborbital/vektor/vk/router.go
generated
vendored
@@ -1,144 +0,0 @@
|
|||||||
package vk
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/julienschmidt/httprouter"
|
|
||||||
"github.com/suborbital/vektor/vlog"
|
|
||||||
)
|
|
||||||
|
|
||||||
const contentTypeHeaderKey = "Content-Type"
|
|
||||||
|
|
||||||
// used internally to convey content types
|
|
||||||
type contentType string
|
|
||||||
|
|
||||||
// HandlerFunc is the vk version of http.HandlerFunc
|
|
||||||
// instead of exposing the ResponseWriter, the function instead returns
|
|
||||||
// an object and an error, which are handled as described in `With` below
|
|
||||||
type HandlerFunc func(*http.Request, *Ctx) (interface{}, error)
|
|
||||||
|
|
||||||
// Router handles the responses on behalf of the server
|
|
||||||
type Router struct {
|
|
||||||
*RouteGroup // the "root" RouteGroup that is mounted at server start
|
|
||||||
hrouter *httprouter.Router // the internal 'actual' router
|
|
||||||
finalizeOnce sync.Once // ensure that the root only gets mounted once
|
|
||||||
|
|
||||||
log *vlog.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
type defaultScope struct {
|
|
||||||
RequestID string `json:"request_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRouter creates a new Router
|
|
||||||
func NewRouter(logger *vlog.Logger) *Router {
|
|
||||||
// add the logger middleware
|
|
||||||
middleware := []Middleware{loggerMiddleware()}
|
|
||||||
|
|
||||||
r := &Router{
|
|
||||||
RouteGroup: Group("").Before(middleware...),
|
|
||||||
hrouter: httprouter.New(),
|
|
||||||
finalizeOnce: sync.Once{},
|
|
||||||
log: logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleHTTP handles a classic Go HTTP handlerFunc
|
|
||||||
func (rt *Router) HandleHTTP(method, path string, handler http.HandlerFunc) {
|
|
||||||
rt.hrouter.Handle(method, path, func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
|
||||||
handler(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finalize mounts the root group to prepare the Router to handle requests
|
|
||||||
func (rt *Router) Finalize() {
|
|
||||||
rt.finalizeOnce.Do(func() {
|
|
||||||
rt.mountGroup(rt.RouteGroup)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
//ServeHTTP serves HTTP requests
|
|
||||||
func (rt *Router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// check to see if the router has a handler for this path
|
|
||||||
handler, params, _ := rt.hrouter.Lookup(r.Method, r.URL.Path)
|
|
||||||
|
|
||||||
if handler != nil {
|
|
||||||
handler(w, r, params)
|
|
||||||
} else {
|
|
||||||
rt.log.Debug("not handled:", r.Method, r.URL.String())
|
|
||||||
|
|
||||||
// let httprouter handle the fallthrough cases
|
|
||||||
rt.hrouter.ServeHTTP(w, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// mountGroup adds a group of handlers to the httprouter
|
|
||||||
func (rt *Router) mountGroup(group *RouteGroup) {
|
|
||||||
for _, r := range group.routeHandlers() {
|
|
||||||
rt.log.Debug("mounting route", r.Method, r.Path)
|
|
||||||
rt.hrouter.Handle(r.Method, r.Path, rt.handleWrap(r.Handler))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleWrap returns an httprouter.Handle that uses the `inner` vk.HandleFunc to handle the request
|
|
||||||
//
|
|
||||||
// inner returns a body and an error;
|
|
||||||
// the body can can be:
|
|
||||||
// - a vk.Response object (status and body are written to w)
|
|
||||||
// - []byte (written directly to w, status 200)
|
|
||||||
// - a struct (marshalled to JSON and written to w, status 200)
|
|
||||||
//
|
|
||||||
// the error can be:
|
|
||||||
// - a vk.Error type (status and message are written to w)
|
|
||||||
// - any other error object (status 500 and error.Error() are written to w)
|
|
||||||
//
|
|
||||||
func (rt *Router) handleWrap(inner HandlerFunc) httprouter.Handle {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
|
||||||
var status int
|
|
||||||
var body []byte
|
|
||||||
var detectedCType contentType
|
|
||||||
|
|
||||||
// create a context handleWrap the configured logger
|
|
||||||
// (and use the ctx.Log for all remaining logging
|
|
||||||
// in case a scope was set on it)
|
|
||||||
ctx := NewCtx(rt.log, params, w.Header())
|
|
||||||
ctx.UseScope(defaultScope{ctx.RequestID()})
|
|
||||||
|
|
||||||
resp, err := inner(r, ctx)
|
|
||||||
if err != nil {
|
|
||||||
status, body, detectedCType = errorOrOtherToBytes(ctx.Log, err)
|
|
||||||
} else {
|
|
||||||
status, body, detectedCType = responseOrOtherToBytes(ctx.Log, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if anything in the handler chain set the content type
|
|
||||||
// header, and only use the auto-detected value if it wasn't
|
|
||||||
headerCType := w.Header().Get(contentTypeHeaderKey)
|
|
||||||
shouldSetCType := headerCType == ""
|
|
||||||
|
|
||||||
ctx.Log.Debug("post-handler contenttype:", string(headerCType))
|
|
||||||
|
|
||||||
// if no contentType was set in the middleware chain,
|
|
||||||
// then set it here based on the type detected
|
|
||||||
if shouldSetCType {
|
|
||||||
ctx.Log.Debug("setting auto-detected contenttype:", string(detectedCType))
|
|
||||||
w.Header().Set(contentTypeHeaderKey, string(detectedCType))
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(status)
|
|
||||||
w.Write(body)
|
|
||||||
|
|
||||||
ctx.Log.Info(r.Method, r.URL.String(), fmt.Sprintf("completed (%d: %s)", status, http.StatusText(status)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// canHandle returns true if there's a registered handler that can
|
|
||||||
// handle the method and path provided or not
|
|
||||||
func (rt *Router) canHandle(method, path string) bool {
|
|
||||||
handler, _, _ := rt.hrouter.Lookup(method, path)
|
|
||||||
return handler != nil
|
|
||||||
}
|
|
||||||
289
vendor/github.com/suborbital/vektor/vk/server.go
generated
vendored
289
vendor/github.com/suborbital/vektor/vk/server.go
generated
vendored
@@ -1,289 +0,0 @@
|
|||||||
package vk
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/acme/autocert"
|
|
||||||
)
|
|
||||||
|
|
||||||
const defaultEnvPrefix = "VK"
|
|
||||||
|
|
||||||
// Server represents a vektor API server
|
|
||||||
type Server struct {
|
|
||||||
router *Router
|
|
||||||
lock sync.RWMutex
|
|
||||||
started atomic.Value
|
|
||||||
|
|
||||||
server *http.Server
|
|
||||||
options *Options
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new vektor API server
|
|
||||||
func New(opts ...OptionsModifier) *Server {
|
|
||||||
options := newOptsWithModifiers(opts...)
|
|
||||||
|
|
||||||
router := NewRouter(options.Logger)
|
|
||||||
|
|
||||||
s := &Server{
|
|
||||||
router: router,
|
|
||||||
lock: sync.RWMutex{},
|
|
||||||
started: atomic.Value{},
|
|
||||||
options: options,
|
|
||||||
}
|
|
||||||
|
|
||||||
s.started.Store(false)
|
|
||||||
|
|
||||||
// yes this creates a circular reference,
|
|
||||||
// but the VK server and HTTP server are
|
|
||||||
// extremely tightly wound together so
|
|
||||||
// we have to make this compromise
|
|
||||||
s.server = createGoServer(options, s)
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts the server listening
|
|
||||||
func (s *Server) Start() error {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
err := errors.New("server already started")
|
|
||||||
s.options.Logger.Error(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// lock the router modifiers (GET, POST etc.)
|
|
||||||
s.started.Store(true)
|
|
||||||
|
|
||||||
// mount the root set of routes before starting
|
|
||||||
s.router.Finalize()
|
|
||||||
|
|
||||||
if s.options.AppName != "" {
|
|
||||||
s.options.Logger.Info("starting", s.options.AppName, "...")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.options.Logger.Info("serving on", s.server.Addr)
|
|
||||||
|
|
||||||
if !s.options.HTTPPortSet() && !s.options.ShouldUseTLS() {
|
|
||||||
s.options.Logger.ErrorString("domain and HTTP port options are both unset, server will start up but fail to acquire a certificate. reconfigure and restart")
|
|
||||||
} else if s.options.ShouldUseHTTP() {
|
|
||||||
return s.server.ListenAndServe()
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.server.ListenAndServeTLS("", "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestStart "starts" the server for automated testing with vtest
|
|
||||||
func (s *Server) TestStart() error {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
err := errors.New("server already started")
|
|
||||||
s.options.Logger.Error(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// lock the router modifiers (GET, POST etc.)
|
|
||||||
s.started.Store(true)
|
|
||||||
|
|
||||||
// mount the root set of routes before starting
|
|
||||||
s.router.Finalize()
|
|
||||||
|
|
||||||
if s.options.AppName != "" {
|
|
||||||
s.options.Logger.Info("starting", s.options.AppName, "in Test Mode...")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeHTTP serves HTTP requests using the internal router while allowing
|
|
||||||
// said router to be swapped out underneath at any time in a thread-safe way
|
|
||||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// run the inspector with a dereferenced pointer
|
|
||||||
// so that it can view but not change said request
|
|
||||||
//
|
|
||||||
// we intentionally run this before the lock as it's
|
|
||||||
// possible the inspector may trigger a router-swap
|
|
||||||
// and that would cause a nasty deadlock
|
|
||||||
s.options.PreRouterInspector(*r)
|
|
||||||
|
|
||||||
// now lock to ensure the router isn't being swapped
|
|
||||||
// out from underneath us while we're serving this req
|
|
||||||
s.lock.RLock()
|
|
||||||
defer s.lock.RUnlock()
|
|
||||||
|
|
||||||
s.router.ServeHTTP(w, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SwapRouter allows swapping VK's router out in realtime while
|
|
||||||
// continuing to serve requests in the background
|
|
||||||
func (s *Server) SwapRouter(router *Router) {
|
|
||||||
router.Finalize()
|
|
||||||
|
|
||||||
// lock after Finalizing the router so
|
|
||||||
// the lock is released as quickly as possible
|
|
||||||
s.lock.Lock()
|
|
||||||
defer s.lock.Unlock()
|
|
||||||
|
|
||||||
s.router = router
|
|
||||||
}
|
|
||||||
|
|
||||||
// CanHandle returns true if the server can handle a given method and path
|
|
||||||
func (s *Server) CanHandle(method, path string) bool {
|
|
||||||
s.lock.RLock()
|
|
||||||
defer s.lock.RUnlock()
|
|
||||||
|
|
||||||
return s.router.canHandle(method, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GET is a shortcut for router.Handle(http.MethodGet, path, handle)
|
|
||||||
func (s *Server) GET(path string, handler HandlerFunc) {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.router.GET(path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HEAD is a shortcut for router.Handle(http.MethodHead, path, handle)
|
|
||||||
func (s *Server) HEAD(path string, handler HandlerFunc) {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.router.HEAD(path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OPTIONS is a shortcut for router.Handle(http.MethodOptions, path, handle)
|
|
||||||
func (s *Server) OPTIONS(path string, handler HandlerFunc) {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.router.OPTIONS(path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// POST is a shortcut for router.Handle(http.MethodPost, path, handle)
|
|
||||||
func (s *Server) POST(path string, handler HandlerFunc) {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.router.POST(path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PUT is a shortcut for router.Handle(http.MethodPut, path, handle)
|
|
||||||
func (s *Server) PUT(path string, handler HandlerFunc) {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.router.PUT(path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PATCH is a shortcut for router.Handle(http.MethodPatch, path, handle)
|
|
||||||
func (s *Server) PATCH(path string, handler HandlerFunc) {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.router.PATCH(path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DELETE is a shortcut for router.Handle(http.MethodDelete, path, handle)
|
|
||||||
func (s *Server) DELETE(path string, handler HandlerFunc) {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.router.DELETE(path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle adds a route to be handled
|
|
||||||
func (s *Server) Handle(method, path string, handler HandlerFunc) {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.router.Handle(method, path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddGroup adds a RouteGroup to be handled
|
|
||||||
func (s *Server) AddGroup(group *RouteGroup) {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.router.AddGroup(group)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleHTTP allows vk to handle a standard http.HandlerFunc
|
|
||||||
func (s *Server) HandleHTTP(method, path string, handler http.HandlerFunc) {
|
|
||||||
if s.started.Load().(bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.router.HandleHTTP(method, path, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func createGoServer(options *Options, handler http.Handler) *http.Server {
|
|
||||||
if useHTTP := options.ShouldUseHTTP(); useHTTP {
|
|
||||||
return goHTTPServerWithPort(options, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
return goTLSServerWithDomain(options, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func goTLSServerWithDomain(options *Options, handler http.Handler) *http.Server {
|
|
||||||
if options.TLSConfig != nil {
|
|
||||||
options.Logger.Info("configured for HTTPS with custom configuration")
|
|
||||||
} else if options.Domain != "" {
|
|
||||||
options.Logger.Info("configured for HTTPS using domain", options.Domain)
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsConfig := options.TLSConfig
|
|
||||||
|
|
||||||
if tlsConfig == nil {
|
|
||||||
m := &autocert.Manager{
|
|
||||||
Cache: autocert.DirCache("~/.autocert"),
|
|
||||||
Prompt: autocert.AcceptTOS,
|
|
||||||
HostPolicy: autocert.HostWhitelist(options.Domain),
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := fmt.Sprintf(":%d", options.HTTPPort)
|
|
||||||
if options.HTTPPort == 0 {
|
|
||||||
addr = ":8080"
|
|
||||||
}
|
|
||||||
|
|
||||||
options.Logger.Info("serving TLS challenges on", addr)
|
|
||||||
|
|
||||||
go http.ListenAndServe(addr, m.HTTPHandler(nil))
|
|
||||||
|
|
||||||
tlsConfig = &tls.Config{GetCertificate: m.GetCertificate}
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := fmt.Sprintf(":%d", options.TLSPort)
|
|
||||||
if options.TLSPort == 0 {
|
|
||||||
addr = ":443"
|
|
||||||
}
|
|
||||||
|
|
||||||
s := &http.Server{
|
|
||||||
Addr: addr,
|
|
||||||
TLSConfig: tlsConfig,
|
|
||||||
Handler: handler,
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func goHTTPServerWithPort(options *Options, handler http.Handler) *http.Server {
|
|
||||||
options.Logger.Warn("configured to use HTTP with no TLS")
|
|
||||||
|
|
||||||
s := &http.Server{
|
|
||||||
Addr: fmt.Sprintf(":%d", options.HTTPPort),
|
|
||||||
Handler: handler,
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
1102
vendor/golang.org/x/crypto/acme/acme.go
generated
vendored
1102
vendor/golang.org/x/crypto/acme/acme.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1249
vendor/golang.org/x/crypto/acme/autocert/autocert.go
generated
vendored
1249
vendor/golang.org/x/crypto/acme/autocert/autocert.go
generated
vendored
File diff suppressed because it is too large
Load Diff
136
vendor/golang.org/x/crypto/acme/autocert/cache.go
generated
vendored
136
vendor/golang.org/x/crypto/acme/autocert/cache.go
generated
vendored
@@ -1,136 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package autocert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrCacheMiss is returned when a certificate is not found in cache.
|
|
||||||
var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss")
|
|
||||||
|
|
||||||
// Cache is used by Manager to store and retrieve previously obtained certificates
|
|
||||||
// and other account data as opaque blobs.
|
|
||||||
//
|
|
||||||
// Cache implementations should not rely on the key naming pattern. Keys can
|
|
||||||
// include any printable ASCII characters, except the following: \/:*?"<>|
|
|
||||||
type Cache interface {
|
|
||||||
// Get returns a certificate data for the specified key.
|
|
||||||
// If there's no such key, Get returns ErrCacheMiss.
|
|
||||||
Get(ctx context.Context, key string) ([]byte, error)
|
|
||||||
|
|
||||||
// Put stores the data in the cache under the specified key.
|
|
||||||
// Underlying implementations may use any data storage format,
|
|
||||||
// as long as the reverse operation, Get, results in the original data.
|
|
||||||
Put(ctx context.Context, key string, data []byte) error
|
|
||||||
|
|
||||||
// Delete removes a certificate data from the cache under the specified key.
|
|
||||||
// If there's no such key in the cache, Delete returns nil.
|
|
||||||
Delete(ctx context.Context, key string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCache implements Cache using a directory on the local filesystem.
|
|
||||||
// If the directory does not exist, it will be created with 0700 permissions.
|
|
||||||
type DirCache string
|
|
||||||
|
|
||||||
// Get reads a certificate data from the specified file name.
|
|
||||||
func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) {
|
|
||||||
name = filepath.Join(string(d), name)
|
|
||||||
var (
|
|
||||||
data []byte
|
|
||||||
err error
|
|
||||||
done = make(chan struct{})
|
|
||||||
)
|
|
||||||
go func() {
|
|
||||||
data, err = ioutil.ReadFile(name)
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil, ctx.Err()
|
|
||||||
case <-done:
|
|
||||||
}
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil, ErrCacheMiss
|
|
||||||
}
|
|
||||||
return data, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put writes the certificate data to the specified file name.
|
|
||||||
// The file will be created with 0600 permissions.
|
|
||||||
func (d DirCache) Put(ctx context.Context, name string, data []byte) error {
|
|
||||||
if err := os.MkdirAll(string(d), 0700); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
var err error
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
var tmp string
|
|
||||||
if tmp, err = d.writeTempFile(name, data); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer os.Remove(tmp)
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
// Don't overwrite the file if the context was canceled.
|
|
||||||
default:
|
|
||||||
newName := filepath.Join(string(d), name)
|
|
||||||
err = os.Rename(tmp, newName)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
case <-done:
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes the specified file name.
|
|
||||||
func (d DirCache) Delete(ctx context.Context, name string) error {
|
|
||||||
name = filepath.Join(string(d), name)
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
done = make(chan struct{})
|
|
||||||
)
|
|
||||||
go func() {
|
|
||||||
err = os.Remove(name)
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
case <-done:
|
|
||||||
}
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeTempFile writes b to a temporary file, closes the file and returns its path.
|
|
||||||
func (d DirCache) writeTempFile(prefix string, b []byte) (name string, reterr error) {
|
|
||||||
// TempFile uses 0600 permissions
|
|
||||||
f, err := ioutil.TempFile(string(d), prefix)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if reterr != nil {
|
|
||||||
os.Remove(f.Name())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if _, err := f.Write(b); err != nil {
|
|
||||||
f.Close()
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return f.Name(), f.Close()
|
|
||||||
}
|
|
||||||
155
vendor/golang.org/x/crypto/acme/autocert/listener.go
generated
vendored
155
vendor/golang.org/x/crypto/acme/autocert/listener.go
generated
vendored
@@ -1,155 +0,0 @@
|
|||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package autocert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewListener returns a net.Listener that listens on the standard TLS
|
|
||||||
// port (443) on all interfaces and returns *tls.Conn connections with
|
|
||||||
// LetsEncrypt certificates for the provided domain or domains.
|
|
||||||
//
|
|
||||||
// It enables one-line HTTPS servers:
|
|
||||||
//
|
|
||||||
// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler))
|
|
||||||
//
|
|
||||||
// NewListener is a convenience function for a common configuration.
|
|
||||||
// More complex or custom configurations can use the autocert.Manager
|
|
||||||
// type instead.
|
|
||||||
//
|
|
||||||
// Use of this function implies acceptance of the LetsEncrypt Terms of
|
|
||||||
// Service. If domains is not empty, the provided domains are passed
|
|
||||||
// to HostWhitelist. If domains is empty, the listener will do
|
|
||||||
// LetsEncrypt challenges for any requested domain, which is not
|
|
||||||
// recommended.
|
|
||||||
//
|
|
||||||
// Certificates are cached in a "golang-autocert" directory under an
|
|
||||||
// operating system-specific cache or temp directory. This may not
|
|
||||||
// be suitable for servers spanning multiple machines.
|
|
||||||
//
|
|
||||||
// The returned listener uses a *tls.Config that enables HTTP/2, and
|
|
||||||
// should only be used with servers that support HTTP/2.
|
|
||||||
//
|
|
||||||
// The returned Listener also enables TCP keep-alives on the accepted
|
|
||||||
// connections. The returned *tls.Conn are returned before their TLS
|
|
||||||
// handshake has completed.
|
|
||||||
func NewListener(domains ...string) net.Listener {
|
|
||||||
m := &Manager{
|
|
||||||
Prompt: AcceptTOS,
|
|
||||||
}
|
|
||||||
if len(domains) > 0 {
|
|
||||||
m.HostPolicy = HostWhitelist(domains...)
|
|
||||||
}
|
|
||||||
dir := cacheDir()
|
|
||||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
|
||||||
log.Printf("warning: autocert.NewListener not using a cache: %v", err)
|
|
||||||
} else {
|
|
||||||
m.Cache = DirCache(dir)
|
|
||||||
}
|
|
||||||
return m.Listener()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Listener listens on the standard TLS port (443) on all interfaces
|
|
||||||
// and returns a net.Listener returning *tls.Conn connections.
|
|
||||||
//
|
|
||||||
// The returned listener uses a *tls.Config that enables HTTP/2, and
|
|
||||||
// should only be used with servers that support HTTP/2.
|
|
||||||
//
|
|
||||||
// The returned Listener also enables TCP keep-alives on the accepted
|
|
||||||
// connections. The returned *tls.Conn are returned before their TLS
|
|
||||||
// handshake has completed.
|
|
||||||
//
|
|
||||||
// Unlike NewListener, it is the caller's responsibility to initialize
|
|
||||||
// the Manager m's Prompt, Cache, HostPolicy, and other desired options.
|
|
||||||
func (m *Manager) Listener() net.Listener {
|
|
||||||
ln := &listener{
|
|
||||||
conf: m.TLSConfig(),
|
|
||||||
}
|
|
||||||
ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443")
|
|
||||||
return ln
|
|
||||||
}
|
|
||||||
|
|
||||||
type listener struct {
|
|
||||||
conf *tls.Config
|
|
||||||
|
|
||||||
tcpListener net.Listener
|
|
||||||
tcpListenErr error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ln *listener) Accept() (net.Conn, error) {
|
|
||||||
if ln.tcpListenErr != nil {
|
|
||||||
return nil, ln.tcpListenErr
|
|
||||||
}
|
|
||||||
conn, err := ln.tcpListener.Accept()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tcpConn := conn.(*net.TCPConn)
|
|
||||||
|
|
||||||
// Because Listener is a convenience function, help out with
|
|
||||||
// this too. This is not possible for the caller to set once
|
|
||||||
// we return a *tcp.Conn wrapping an inaccessible net.Conn.
|
|
||||||
// If callers don't want this, they can do things the manual
|
|
||||||
// way and tweak as needed. But this is what net/http does
|
|
||||||
// itself, so copy that. If net/http changes, we can change
|
|
||||||
// here too.
|
|
||||||
tcpConn.SetKeepAlive(true)
|
|
||||||
tcpConn.SetKeepAlivePeriod(3 * time.Minute)
|
|
||||||
|
|
||||||
return tls.Server(tcpConn, ln.conf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ln *listener) Addr() net.Addr {
|
|
||||||
if ln.tcpListener != nil {
|
|
||||||
return ln.tcpListener.Addr()
|
|
||||||
}
|
|
||||||
// net.Listen failed. Return something non-nil in case callers
|
|
||||||
// call Addr before Accept:
|
|
||||||
return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ln *listener) Close() error {
|
|
||||||
if ln.tcpListenErr != nil {
|
|
||||||
return ln.tcpListenErr
|
|
||||||
}
|
|
||||||
return ln.tcpListener.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func homeDir() string {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
|
|
||||||
}
|
|
||||||
if h := os.Getenv("HOME"); h != "" {
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
return "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
func cacheDir() string {
|
|
||||||
const base = "golang-autocert"
|
|
||||||
switch runtime.GOOS {
|
|
||||||
case "darwin":
|
|
||||||
return filepath.Join(homeDir(), "Library", "Caches", base)
|
|
||||||
case "windows":
|
|
||||||
for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} {
|
|
||||||
if v := os.Getenv(ev); v != "" {
|
|
||||||
return filepath.Join(v, base)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Worst case:
|
|
||||||
return filepath.Join(homeDir(), base)
|
|
||||||
}
|
|
||||||
if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" {
|
|
||||||
return filepath.Join(xdg, base)
|
|
||||||
}
|
|
||||||
return filepath.Join(homeDir(), ".cache", base)
|
|
||||||
}
|
|
||||||
141
vendor/golang.org/x/crypto/acme/autocert/renewal.go
generated
vendored
141
vendor/golang.org/x/crypto/acme/autocert/renewal.go
generated
vendored
@@ -1,141 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package autocert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// renewJitter is the maximum deviation from Manager.RenewBefore.
|
|
||||||
const renewJitter = time.Hour
|
|
||||||
|
|
||||||
// domainRenewal tracks the state used by the periodic timers
|
|
||||||
// renewing a single domain's cert.
|
|
||||||
type domainRenewal struct {
|
|
||||||
m *Manager
|
|
||||||
ck certKey
|
|
||||||
key crypto.Signer
|
|
||||||
|
|
||||||
timerMu sync.Mutex
|
|
||||||
timer *time.Timer
|
|
||||||
}
|
|
||||||
|
|
||||||
// start starts a cert renewal timer at the time
|
|
||||||
// defined by the certificate expiration time exp.
|
|
||||||
//
|
|
||||||
// If the timer is already started, calling start is a noop.
|
|
||||||
func (dr *domainRenewal) start(exp time.Time) {
|
|
||||||
dr.timerMu.Lock()
|
|
||||||
defer dr.timerMu.Unlock()
|
|
||||||
if dr.timer != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dr.timer = time.AfterFunc(dr.next(exp), dr.renew)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stop stops the cert renewal timer.
|
|
||||||
// If the timer is already stopped, calling stop is a noop.
|
|
||||||
func (dr *domainRenewal) stop() {
|
|
||||||
dr.timerMu.Lock()
|
|
||||||
defer dr.timerMu.Unlock()
|
|
||||||
if dr.timer == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dr.timer.Stop()
|
|
||||||
dr.timer = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// renew is called periodically by a timer.
|
|
||||||
// The first renew call is kicked off by dr.start.
|
|
||||||
func (dr *domainRenewal) renew() {
|
|
||||||
dr.timerMu.Lock()
|
|
||||||
defer dr.timerMu.Unlock()
|
|
||||||
if dr.timer == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
|
||||||
defer cancel()
|
|
||||||
// TODO: rotate dr.key at some point?
|
|
||||||
next, err := dr.do(ctx)
|
|
||||||
if err != nil {
|
|
||||||
next = renewJitter / 2
|
|
||||||
next += time.Duration(pseudoRand.int63n(int64(next)))
|
|
||||||
}
|
|
||||||
dr.timer = time.AfterFunc(next, dr.renew)
|
|
||||||
testDidRenewLoop(next, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateState locks and replaces the relevant Manager.state item with the given
|
|
||||||
// state. It additionally updates dr.key with the given state's key.
|
|
||||||
func (dr *domainRenewal) updateState(state *certState) {
|
|
||||||
dr.m.stateMu.Lock()
|
|
||||||
defer dr.m.stateMu.Unlock()
|
|
||||||
dr.key = state.key
|
|
||||||
dr.m.state[dr.ck] = state
|
|
||||||
}
|
|
||||||
|
|
||||||
// do is similar to Manager.createCert but it doesn't lock a Manager.state item.
|
|
||||||
// Instead, it requests a new certificate independently and, upon success,
|
|
||||||
// replaces dr.m.state item with a new one and updates cache for the given domain.
|
|
||||||
//
|
|
||||||
// It may lock and update the Manager.state if the expiration date of the currently
|
|
||||||
// cached cert is far enough in the future.
|
|
||||||
//
|
|
||||||
// The returned value is a time interval after which the renewal should occur again.
|
|
||||||
func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) {
|
|
||||||
// a race is likely unavoidable in a distributed environment
|
|
||||||
// but we try nonetheless
|
|
||||||
if tlscert, err := dr.m.cacheGet(ctx, dr.ck); err == nil {
|
|
||||||
next := dr.next(tlscert.Leaf.NotAfter)
|
|
||||||
if next > dr.m.renewBefore()+renewJitter {
|
|
||||||
signer, ok := tlscert.PrivateKey.(crypto.Signer)
|
|
||||||
if ok {
|
|
||||||
state := &certState{
|
|
||||||
key: signer,
|
|
||||||
cert: tlscert.Certificate,
|
|
||||||
leaf: tlscert.Leaf,
|
|
||||||
}
|
|
||||||
dr.updateState(state)
|
|
||||||
return next, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.ck)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
state := &certState{
|
|
||||||
key: dr.key,
|
|
||||||
cert: der,
|
|
||||||
leaf: leaf,
|
|
||||||
}
|
|
||||||
tlscert, err := state.tlscert()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if err := dr.m.cachePut(ctx, dr.ck, tlscert); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
dr.updateState(state)
|
|
||||||
return dr.next(leaf.NotAfter), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dr *domainRenewal) next(expiry time.Time) time.Duration {
|
|
||||||
d := expiry.Sub(dr.m.now()) - dr.m.renewBefore()
|
|
||||||
// add a bit of randomness to renew deadline
|
|
||||||
n := pseudoRand.int63n(int64(renewJitter))
|
|
||||||
d -= time.Duration(n)
|
|
||||||
if d < 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
var testDidRenewLoop = func(next time.Duration, err error) {}
|
|
||||||
325
vendor/golang.org/x/crypto/acme/http.go
generated
vendored
325
vendor/golang.org/x/crypto/acme/http.go
generated
vendored
@@ -1,325 +0,0 @@
|
|||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package acme
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/big"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// retryTimer encapsulates common logic for retrying unsuccessful requests.
|
|
||||||
// It is not safe for concurrent use.
|
|
||||||
type retryTimer struct {
|
|
||||||
// backoffFn provides backoff delay sequence for retries.
|
|
||||||
// See Client.RetryBackoff doc comment.
|
|
||||||
backoffFn func(n int, r *http.Request, res *http.Response) time.Duration
|
|
||||||
// n is the current retry attempt.
|
|
||||||
n int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *retryTimer) inc() {
|
|
||||||
t.n++
|
|
||||||
}
|
|
||||||
|
|
||||||
// backoff pauses the current goroutine as described in Client.RetryBackoff.
|
|
||||||
func (t *retryTimer) backoff(ctx context.Context, r *http.Request, res *http.Response) error {
|
|
||||||
d := t.backoffFn(t.n, r, res)
|
|
||||||
if d <= 0 {
|
|
||||||
return fmt.Errorf("acme: no more retries for %s; tried %d time(s)", r.URL, t.n)
|
|
||||||
}
|
|
||||||
wakeup := time.NewTimer(d)
|
|
||||||
defer wakeup.Stop()
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
case <-wakeup.C:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) retryTimer() *retryTimer {
|
|
||||||
f := c.RetryBackoff
|
|
||||||
if f == nil {
|
|
||||||
f = defaultBackoff
|
|
||||||
}
|
|
||||||
return &retryTimer{backoffFn: f}
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaultBackoff provides default Client.RetryBackoff implementation
|
|
||||||
// using a truncated exponential backoff algorithm,
|
|
||||||
// as described in Client.RetryBackoff.
|
|
||||||
//
|
|
||||||
// The n argument is always bounded between 1 and 30.
|
|
||||||
// The returned value is always greater than 0.
|
|
||||||
func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration {
|
|
||||||
const max = 10 * time.Second
|
|
||||||
var jitter time.Duration
|
|
||||||
if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil {
|
|
||||||
// Set the minimum to 1ms to avoid a case where
|
|
||||||
// an invalid Retry-After value is parsed into 0 below,
|
|
||||||
// resulting in the 0 returned value which would unintentionally
|
|
||||||
// stop the retries.
|
|
||||||
jitter = (1 + time.Duration(x.Int64())) * time.Millisecond
|
|
||||||
}
|
|
||||||
if v, ok := res.Header["Retry-After"]; ok {
|
|
||||||
return retryAfter(v[0]) + jitter
|
|
||||||
}
|
|
||||||
|
|
||||||
if n < 1 {
|
|
||||||
n = 1
|
|
||||||
}
|
|
||||||
if n > 30 {
|
|
||||||
n = 30
|
|
||||||
}
|
|
||||||
d := time.Duration(1<<uint(n-1))*time.Second + jitter
|
|
||||||
if d > max {
|
|
||||||
return max
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryAfter parses a Retry-After HTTP header value,
|
|
||||||
// trying to convert v into an int (seconds) or use http.ParseTime otherwise.
|
|
||||||
// It returns zero value if v cannot be parsed.
|
|
||||||
func retryAfter(v string) time.Duration {
|
|
||||||
if i, err := strconv.Atoi(v); err == nil {
|
|
||||||
return time.Duration(i) * time.Second
|
|
||||||
}
|
|
||||||
t, err := http.ParseTime(v)
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return t.Sub(timeNow())
|
|
||||||
}
|
|
||||||
|
|
||||||
// resOkay is a function that reports whether the provided response is okay.
|
|
||||||
// It is expected to keep the response body unread.
|
|
||||||
type resOkay func(*http.Response) bool
|
|
||||||
|
|
||||||
// wantStatus returns a function which reports whether the code
|
|
||||||
// matches the status code of a response.
|
|
||||||
func wantStatus(codes ...int) resOkay {
|
|
||||||
return func(res *http.Response) bool {
|
|
||||||
for _, code := range codes {
|
|
||||||
if code == res.StatusCode {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// get issues an unsigned GET request to the specified URL.
|
|
||||||
// It returns a non-error value only when ok reports true.
|
|
||||||
//
|
|
||||||
// get retries unsuccessful attempts according to c.RetryBackoff
|
|
||||||
// until the context is done or a non-retriable error is received.
|
|
||||||
func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) {
|
|
||||||
retry := c.retryTimer()
|
|
||||||
for {
|
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res, err := c.doNoRetry(ctx, req)
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
return nil, err
|
|
||||||
case ok(res):
|
|
||||||
return res, nil
|
|
||||||
case isRetriable(res.StatusCode):
|
|
||||||
retry.inc()
|
|
||||||
resErr := responseError(res)
|
|
||||||
res.Body.Close()
|
|
||||||
// Ignore the error value from retry.backoff
|
|
||||||
// and return the one from last retry, as received from the CA.
|
|
||||||
if retry.backoff(ctx, req, res) != nil {
|
|
||||||
return nil, resErr
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
defer res.Body.Close()
|
|
||||||
return nil, responseError(res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// postAsGet is POST-as-GET, a replacement for GET in RFC8555
|
|
||||||
// as described in https://tools.ietf.org/html/rfc8555#section-6.3.
|
|
||||||
// It makes a POST request in KID form with zero JWS payload.
|
|
||||||
// See nopayload doc comments in jws.go.
|
|
||||||
func (c *Client) postAsGet(ctx context.Context, url string, ok resOkay) (*http.Response, error) {
|
|
||||||
return c.post(ctx, nil, url, noPayload, ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
// post issues a signed POST request in JWS format using the provided key
|
|
||||||
// to the specified URL. If key is nil, c.Key is used instead.
|
|
||||||
// It returns a non-error value only when ok reports true.
|
|
||||||
//
|
|
||||||
// post retries unsuccessful attempts according to c.RetryBackoff
|
|
||||||
// until the context is done or a non-retriable error is received.
|
|
||||||
// It uses postNoRetry to make individual requests.
|
|
||||||
func (c *Client) post(ctx context.Context, key crypto.Signer, url string, body interface{}, ok resOkay) (*http.Response, error) {
|
|
||||||
retry := c.retryTimer()
|
|
||||||
for {
|
|
||||||
res, req, err := c.postNoRetry(ctx, key, url, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ok(res) {
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
resErr := responseError(res)
|
|
||||||
res.Body.Close()
|
|
||||||
switch {
|
|
||||||
// Check for bad nonce before isRetriable because it may have been returned
|
|
||||||
// with an unretriable response code such as 400 Bad Request.
|
|
||||||
case isBadNonce(resErr):
|
|
||||||
// Consider any previously stored nonce values to be invalid.
|
|
||||||
c.clearNonces()
|
|
||||||
case !isRetriable(res.StatusCode):
|
|
||||||
return nil, resErr
|
|
||||||
}
|
|
||||||
retry.inc()
|
|
||||||
// Ignore the error value from retry.backoff
|
|
||||||
// and return the one from last retry, as received from the CA.
|
|
||||||
if err := retry.backoff(ctx, req, res); err != nil {
|
|
||||||
return nil, resErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// postNoRetry signs the body with the given key and POSTs it to the provided url.
|
|
||||||
// It is used by c.post to retry unsuccessful attempts.
|
|
||||||
// The body argument must be JSON-serializable.
|
|
||||||
//
|
|
||||||
// If key argument is nil, c.Key is used to sign the request.
|
|
||||||
// If key argument is nil and c.accountKID returns a non-zero keyID,
|
|
||||||
// the request is sent in KID form. Otherwise, JWK form is used.
|
|
||||||
//
|
|
||||||
// In practice, when interfacing with RFC-compliant CAs most requests are sent in KID form
|
|
||||||
// and JWK is used only when KID is unavailable: new account endpoint and certificate
|
|
||||||
// revocation requests authenticated by a cert key.
|
|
||||||
// See jwsEncodeJSON for other details.
|
|
||||||
func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, *http.Request, error) {
|
|
||||||
kid := noKeyID
|
|
||||||
if key == nil {
|
|
||||||
if c.Key == nil {
|
|
||||||
return nil, nil, errors.New("acme: Client.Key must be populated to make POST requests")
|
|
||||||
}
|
|
||||||
key = c.Key
|
|
||||||
kid = c.accountKID(ctx)
|
|
||||||
}
|
|
||||||
nonce, err := c.popNonce(ctx, url)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
b, err := jwsEncodeJSON(body, key, kid, nonce, url)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("POST", url, bytes.NewReader(b))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/jose+json")
|
|
||||||
res, err := c.doNoRetry(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
c.addNonce(res.Header)
|
|
||||||
return res, req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// doNoRetry issues a request req, replacing its context (if any) with ctx.
|
|
||||||
func (c *Client) doNoRetry(ctx context.Context, req *http.Request) (*http.Response, error) {
|
|
||||||
req.Header.Set("User-Agent", c.userAgent())
|
|
||||||
res, err := c.httpClient().Do(req.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
// Prefer the unadorned context error.
|
|
||||||
// (The acme package had tests assuming this, previously from ctxhttp's
|
|
||||||
// behavior, predating net/http supporting contexts natively)
|
|
||||||
// TODO(bradfitz): reconsider this in the future. But for now this
|
|
||||||
// requires no test updates.
|
|
||||||
return nil, ctx.Err()
|
|
||||||
default:
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) httpClient() *http.Client {
|
|
||||||
if c.HTTPClient != nil {
|
|
||||||
return c.HTTPClient
|
|
||||||
}
|
|
||||||
return http.DefaultClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// packageVersion is the version of the module that contains this package, for
|
|
||||||
// sending as part of the User-Agent header. It's set in version_go112.go.
|
|
||||||
var packageVersion string
|
|
||||||
|
|
||||||
// userAgent returns the User-Agent header value. It includes the package name,
|
|
||||||
// the module version (if available), and the c.UserAgent value (if set).
|
|
||||||
func (c *Client) userAgent() string {
|
|
||||||
ua := "golang.org/x/crypto/acme"
|
|
||||||
if packageVersion != "" {
|
|
||||||
ua += "@" + packageVersion
|
|
||||||
}
|
|
||||||
if c.UserAgent != "" {
|
|
||||||
ua = c.UserAgent + " " + ua
|
|
||||||
}
|
|
||||||
return ua
|
|
||||||
}
|
|
||||||
|
|
||||||
// isBadNonce reports whether err is an ACME "badnonce" error.
|
|
||||||
func isBadNonce(err error) bool {
|
|
||||||
// According to the spec badNonce is urn:ietf:params:acme:error:badNonce.
|
|
||||||
// However, ACME servers in the wild return their versions of the error.
|
|
||||||
// See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4
|
|
||||||
// and https://github.com/letsencrypt/boulder/blob/0e07eacb/docs/acme-divergences.md#section-66.
|
|
||||||
ae, ok := err.(*Error)
|
|
||||||
return ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce")
|
|
||||||
}
|
|
||||||
|
|
||||||
// isRetriable reports whether a request can be retried
|
|
||||||
// based on the response status code.
|
|
||||||
//
|
|
||||||
// Note that a "bad nonce" error is returned with a non-retriable 400 Bad Request code.
|
|
||||||
// Callers should parse the response and check with isBadNonce.
|
|
||||||
func isRetriable(code int) bool {
|
|
||||||
return code <= 399 || code >= 500 || code == http.StatusTooManyRequests
|
|
||||||
}
|
|
||||||
|
|
||||||
// responseError creates an error of Error type from resp.
|
|
||||||
func responseError(resp *http.Response) error {
|
|
||||||
// don't care if ReadAll returns an error:
|
|
||||||
// json.Unmarshal will fail in that case anyway
|
|
||||||
b, _ := ioutil.ReadAll(resp.Body)
|
|
||||||
e := &wireError{Status: resp.StatusCode}
|
|
||||||
if err := json.Unmarshal(b, e); err != nil {
|
|
||||||
// this is not a regular error response:
|
|
||||||
// populate detail with anything we received,
|
|
||||||
// e.Status will already contain HTTP response code value
|
|
||||||
e.Detail = string(b)
|
|
||||||
if e.Detail == "" {
|
|
||||||
e.Detail = resp.Status
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return e.error(resp.Header)
|
|
||||||
}
|
|
||||||
229
vendor/golang.org/x/crypto/acme/jws.go
generated
vendored
229
vendor/golang.org/x/crypto/acme/jws.go
generated
vendored
@@ -1,229 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package acme
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/sha256"
|
|
||||||
_ "crypto/sha512" // need for EC keys
|
|
||||||
"encoding/asn1"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
// keyID is the account identity provided by a CA during registration.
|
|
||||||
type keyID string
|
|
||||||
|
|
||||||
// noKeyID indicates that jwsEncodeJSON should compute and use JWK instead of a KID.
|
|
||||||
// See jwsEncodeJSON for details.
|
|
||||||
const noKeyID = keyID("")
|
|
||||||
|
|
||||||
// noPayload indicates jwsEncodeJSON will encode zero-length octet string
|
|
||||||
// in a JWS request. This is called POST-as-GET in RFC 8555 and is used to make
|
|
||||||
// authenticated GET requests via POSTing with an empty payload.
|
|
||||||
// See https://tools.ietf.org/html/rfc8555#section-6.3 for more details.
|
|
||||||
const noPayload = ""
|
|
||||||
|
|
||||||
// jsonWebSignature can be easily serialized into a JWS following
|
|
||||||
// https://tools.ietf.org/html/rfc7515#section-3.2.
|
|
||||||
type jsonWebSignature struct {
|
|
||||||
Protected string `json:"protected"`
|
|
||||||
Payload string `json:"payload"`
|
|
||||||
Sig string `json:"signature"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// jwsEncodeJSON signs claimset using provided key and a nonce.
|
|
||||||
// The result is serialized in JSON format containing either kid or jwk
|
|
||||||
// fields based on the provided keyID value.
|
|
||||||
//
|
|
||||||
// If kid is non-empty, its quoted value is inserted in the protected head
|
|
||||||
// as "kid" field value. Otherwise, JWK is computed using jwkEncode and inserted
|
|
||||||
// as "jwk" field value. The "jwk" and "kid" fields are mutually exclusive.
|
|
||||||
//
|
|
||||||
// See https://tools.ietf.org/html/rfc7515#section-7.
|
|
||||||
func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid keyID, nonce, url string) ([]byte, error) {
|
|
||||||
alg, sha := jwsHasher(key.Public())
|
|
||||||
if alg == "" || !sha.Available() {
|
|
||||||
return nil, ErrUnsupportedKey
|
|
||||||
}
|
|
||||||
var phead string
|
|
||||||
switch kid {
|
|
||||||
case noKeyID:
|
|
||||||
jwk, err := jwkEncode(key.Public())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
phead = fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q,"url":%q}`, alg, jwk, nonce, url)
|
|
||||||
default:
|
|
||||||
phead = fmt.Sprintf(`{"alg":%q,"kid":%q,"nonce":%q,"url":%q}`, alg, kid, nonce, url)
|
|
||||||
}
|
|
||||||
phead = base64.RawURLEncoding.EncodeToString([]byte(phead))
|
|
||||||
var payload string
|
|
||||||
if claimset != noPayload {
|
|
||||||
cs, err := json.Marshal(claimset)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
payload = base64.RawURLEncoding.EncodeToString(cs)
|
|
||||||
}
|
|
||||||
hash := sha.New()
|
|
||||||
hash.Write([]byte(phead + "." + payload))
|
|
||||||
sig, err := jwsSign(key, sha, hash.Sum(nil))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
enc := jsonWebSignature{
|
|
||||||
Protected: phead,
|
|
||||||
Payload: payload,
|
|
||||||
Sig: base64.RawURLEncoding.EncodeToString(sig),
|
|
||||||
}
|
|
||||||
return json.Marshal(&enc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// jwsWithMAC creates and signs a JWS using the given key and the HS256
|
|
||||||
// algorithm. kid and url are included in the protected header. rawPayload
|
|
||||||
// should not be base64-URL-encoded.
|
|
||||||
func jwsWithMAC(key []byte, kid, url string, rawPayload []byte) (*jsonWebSignature, error) {
|
|
||||||
if len(key) == 0 {
|
|
||||||
return nil, errors.New("acme: cannot sign JWS with an empty MAC key")
|
|
||||||
}
|
|
||||||
header := struct {
|
|
||||||
Algorithm string `json:"alg"`
|
|
||||||
KID string `json:"kid"`
|
|
||||||
URL string `json:"url,omitempty"`
|
|
||||||
}{
|
|
||||||
// Only HMAC-SHA256 is supported.
|
|
||||||
Algorithm: "HS256",
|
|
||||||
KID: kid,
|
|
||||||
URL: url,
|
|
||||||
}
|
|
||||||
rawProtected, err := json.Marshal(header)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
protected := base64.RawURLEncoding.EncodeToString(rawProtected)
|
|
||||||
payload := base64.RawURLEncoding.EncodeToString(rawPayload)
|
|
||||||
|
|
||||||
h := hmac.New(sha256.New, key)
|
|
||||||
if _, err := h.Write([]byte(protected + "." + payload)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
mac := h.Sum(nil)
|
|
||||||
|
|
||||||
return &jsonWebSignature{
|
|
||||||
Protected: protected,
|
|
||||||
Payload: payload,
|
|
||||||
Sig: base64.RawURLEncoding.EncodeToString(mac),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// jwkEncode encodes public part of an RSA or ECDSA key into a JWK.
|
|
||||||
// The result is also suitable for creating a JWK thumbprint.
|
|
||||||
// https://tools.ietf.org/html/rfc7517
|
|
||||||
func jwkEncode(pub crypto.PublicKey) (string, error) {
|
|
||||||
switch pub := pub.(type) {
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
// https://tools.ietf.org/html/rfc7518#section-6.3.1
|
|
||||||
n := pub.N
|
|
||||||
e := big.NewInt(int64(pub.E))
|
|
||||||
// Field order is important.
|
|
||||||
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
|
|
||||||
return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`,
|
|
||||||
base64.RawURLEncoding.EncodeToString(e.Bytes()),
|
|
||||||
base64.RawURLEncoding.EncodeToString(n.Bytes()),
|
|
||||||
), nil
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
// https://tools.ietf.org/html/rfc7518#section-6.2.1
|
|
||||||
p := pub.Curve.Params()
|
|
||||||
n := p.BitSize / 8
|
|
||||||
if p.BitSize%8 != 0 {
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
x := pub.X.Bytes()
|
|
||||||
if n > len(x) {
|
|
||||||
x = append(make([]byte, n-len(x)), x...)
|
|
||||||
}
|
|
||||||
y := pub.Y.Bytes()
|
|
||||||
if n > len(y) {
|
|
||||||
y = append(make([]byte, n-len(y)), y...)
|
|
||||||
}
|
|
||||||
// Field order is important.
|
|
||||||
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
|
|
||||||
return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`,
|
|
||||||
p.Name,
|
|
||||||
base64.RawURLEncoding.EncodeToString(x),
|
|
||||||
base64.RawURLEncoding.EncodeToString(y),
|
|
||||||
), nil
|
|
||||||
}
|
|
||||||
return "", ErrUnsupportedKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// jwsSign signs the digest using the given key.
|
|
||||||
// The hash is unused for ECDSA keys.
|
|
||||||
func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) {
|
|
||||||
switch pub := key.Public().(type) {
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
return key.Sign(rand.Reader, digest, hash)
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
sigASN1, err := key.Sign(rand.Reader, digest, hash)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var rs struct{ R, S *big.Int }
|
|
||||||
if _, err := asn1.Unmarshal(sigASN1, &rs); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rb, sb := rs.R.Bytes(), rs.S.Bytes()
|
|
||||||
size := pub.Params().BitSize / 8
|
|
||||||
if size%8 > 0 {
|
|
||||||
size++
|
|
||||||
}
|
|
||||||
sig := make([]byte, size*2)
|
|
||||||
copy(sig[size-len(rb):], rb)
|
|
||||||
copy(sig[size*2-len(sb):], sb)
|
|
||||||
return sig, nil
|
|
||||||
}
|
|
||||||
return nil, ErrUnsupportedKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// jwsHasher indicates suitable JWS algorithm name and a hash function
|
|
||||||
// to use for signing a digest with the provided key.
|
|
||||||
// It returns ("", 0) if the key is not supported.
|
|
||||||
func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) {
|
|
||||||
switch pub := pub.(type) {
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
return "RS256", crypto.SHA256
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
switch pub.Params().Name {
|
|
||||||
case "P-256":
|
|
||||||
return "ES256", crypto.SHA256
|
|
||||||
case "P-384":
|
|
||||||
return "ES384", crypto.SHA384
|
|
||||||
case "P-521":
|
|
||||||
return "ES512", crypto.SHA512
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// JWKThumbprint creates a JWK thumbprint out of pub
|
|
||||||
// as specified in https://tools.ietf.org/html/rfc7638.
|
|
||||||
func JWKThumbprint(pub crypto.PublicKey) (string, error) {
|
|
||||||
jwk, err := jwkEncode(pub)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
b := sha256.Sum256([]byte(jwk))
|
|
||||||
return base64.RawURLEncoding.EncodeToString(b[:]), nil
|
|
||||||
}
|
|
||||||
412
vendor/golang.org/x/crypto/acme/rfc8555.go
generated
vendored
412
vendor/golang.org/x/crypto/acme/rfc8555.go
generated
vendored
@@ -1,412 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package acme
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/pem"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeactivateReg permanently disables an existing account associated with c.Key.
|
|
||||||
// A deactivated account can no longer request certificate issuance or access
|
|
||||||
// resources related to the account, such as orders or authorizations.
|
|
||||||
//
|
|
||||||
// It only works with CAs implementing RFC 8555.
|
|
||||||
func (c *Client) DeactivateReg(ctx context.Context) error {
|
|
||||||
url := string(c.accountKID(ctx))
|
|
||||||
if url == "" {
|
|
||||||
return ErrNoAccount
|
|
||||||
}
|
|
||||||
req := json.RawMessage(`{"status": "deactivated"}`)
|
|
||||||
res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
res.Body.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// registerRFC is equivalent to c.Register but for CAs implementing RFC 8555.
|
|
||||||
// It expects c.Discover to have already been called.
|
|
||||||
func (c *Client) registerRFC(ctx context.Context, acct *Account, prompt func(tosURL string) bool) (*Account, error) {
|
|
||||||
c.cacheMu.Lock() // guard c.kid access
|
|
||||||
defer c.cacheMu.Unlock()
|
|
||||||
|
|
||||||
req := struct {
|
|
||||||
TermsAgreed bool `json:"termsOfServiceAgreed,omitempty"`
|
|
||||||
Contact []string `json:"contact,omitempty"`
|
|
||||||
ExternalAccountBinding *jsonWebSignature `json:"externalAccountBinding,omitempty"`
|
|
||||||
}{
|
|
||||||
Contact: acct.Contact,
|
|
||||||
}
|
|
||||||
if c.dir.Terms != "" {
|
|
||||||
req.TermsAgreed = prompt(c.dir.Terms)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set 'externalAccountBinding' field if requested
|
|
||||||
if acct.ExternalAccountBinding != nil {
|
|
||||||
eabJWS, err := c.encodeExternalAccountBinding(acct.ExternalAccountBinding)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("acme: failed to encode external account binding: %v", err)
|
|
||||||
}
|
|
||||||
req.ExternalAccountBinding = eabJWS
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := c.post(ctx, c.Key, c.dir.RegURL, req, wantStatus(
|
|
||||||
http.StatusOK, // account with this key already registered
|
|
||||||
http.StatusCreated, // new account created
|
|
||||||
))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer res.Body.Close()
|
|
||||||
a, err := responseAccount(res)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Cache Account URL even if we return an error to the caller.
|
|
||||||
// It is by all means a valid and usable "kid" value for future requests.
|
|
||||||
c.kid = keyID(a.URI)
|
|
||||||
if res.StatusCode == http.StatusOK {
|
|
||||||
return nil, ErrAccountAlreadyExists
|
|
||||||
}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// encodeExternalAccountBinding will encode an external account binding stanza
|
|
||||||
// as described in https://tools.ietf.org/html/rfc8555#section-7.3.4.
|
|
||||||
func (c *Client) encodeExternalAccountBinding(eab *ExternalAccountBinding) (*jsonWebSignature, error) {
|
|
||||||
jwk, err := jwkEncode(c.Key.Public())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return jwsWithMAC(eab.Key, eab.KID, c.dir.RegURL, []byte(jwk))
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateRegRFC is equivalent to c.UpdateReg but for CAs implementing RFC 8555.
|
|
||||||
// It expects c.Discover to have already been called.
|
|
||||||
func (c *Client) updateRegRFC(ctx context.Context, a *Account) (*Account, error) {
|
|
||||||
url := string(c.accountKID(ctx))
|
|
||||||
if url == "" {
|
|
||||||
return nil, ErrNoAccount
|
|
||||||
}
|
|
||||||
req := struct {
|
|
||||||
Contact []string `json:"contact,omitempty"`
|
|
||||||
}{
|
|
||||||
Contact: a.Contact,
|
|
||||||
}
|
|
||||||
res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
return responseAccount(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getGegRFC is equivalent to c.GetReg but for CAs implementing RFC 8555.
|
|
||||||
// It expects c.Discover to have already been called.
|
|
||||||
func (c *Client) getRegRFC(ctx context.Context) (*Account, error) {
|
|
||||||
req := json.RawMessage(`{"onlyReturnExisting": true}`)
|
|
||||||
res, err := c.post(ctx, c.Key, c.dir.RegURL, req, wantStatus(http.StatusOK))
|
|
||||||
if e, ok := err.(*Error); ok && e.ProblemType == "urn:ietf:params:acme:error:accountDoesNotExist" {
|
|
||||||
return nil, ErrNoAccount
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer res.Body.Close()
|
|
||||||
return responseAccount(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
func responseAccount(res *http.Response) (*Account, error) {
|
|
||||||
var v struct {
|
|
||||||
Status string
|
|
||||||
Contact []string
|
|
||||||
Orders string
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
|
|
||||||
return nil, fmt.Errorf("acme: invalid account response: %v", err)
|
|
||||||
}
|
|
||||||
return &Account{
|
|
||||||
URI: res.Header.Get("Location"),
|
|
||||||
Status: v.Status,
|
|
||||||
Contact: v.Contact,
|
|
||||||
OrdersURL: v.Orders,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AuthorizeOrder initiates the order-based application for certificate issuance,
|
|
||||||
// as opposed to pre-authorization in Authorize.
|
|
||||||
// It is only supported by CAs implementing RFC 8555.
|
|
||||||
//
|
|
||||||
// The caller then needs to fetch each authorization with GetAuthorization,
|
|
||||||
// identify those with StatusPending status and fulfill a challenge using Accept.
|
|
||||||
// Once all authorizations are satisfied, the caller will typically want to poll
|
|
||||||
// order status using WaitOrder until it's in StatusReady state.
|
|
||||||
// To finalize the order and obtain a certificate, the caller submits a CSR with CreateOrderCert.
|
|
||||||
func (c *Client) AuthorizeOrder(ctx context.Context, id []AuthzID, opt ...OrderOption) (*Order, error) {
|
|
||||||
dir, err := c.Discover(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
req := struct {
|
|
||||||
Identifiers []wireAuthzID `json:"identifiers"`
|
|
||||||
NotBefore string `json:"notBefore,omitempty"`
|
|
||||||
NotAfter string `json:"notAfter,omitempty"`
|
|
||||||
}{}
|
|
||||||
for _, v := range id {
|
|
||||||
req.Identifiers = append(req.Identifiers, wireAuthzID{
|
|
||||||
Type: v.Type,
|
|
||||||
Value: v.Value,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for _, o := range opt {
|
|
||||||
switch o := o.(type) {
|
|
||||||
case orderNotBeforeOpt:
|
|
||||||
req.NotBefore = time.Time(o).Format(time.RFC3339)
|
|
||||||
case orderNotAfterOpt:
|
|
||||||
req.NotAfter = time.Time(o).Format(time.RFC3339)
|
|
||||||
default:
|
|
||||||
// Package's fault if we let this happen.
|
|
||||||
panic(fmt.Sprintf("unsupported order option type %T", o))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := c.post(ctx, nil, dir.OrderURL, req, wantStatus(http.StatusCreated))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
return responseOrder(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOrder retrives an order identified by the given URL.
|
|
||||||
// For orders created with AuthorizeOrder, the url value is Order.URI.
|
|
||||||
//
|
|
||||||
// If a caller needs to poll an order until its status is final,
|
|
||||||
// see the WaitOrder method.
|
|
||||||
func (c *Client) GetOrder(ctx context.Context, url string) (*Order, error) {
|
|
||||||
if _, err := c.Discover(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
return responseOrder(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitOrder polls an order from the given URL until it is in one of the final states,
|
|
||||||
// StatusReady, StatusValid or StatusInvalid, the CA responded with a non-retryable error
|
|
||||||
// or the context is done.
|
|
||||||
//
|
|
||||||
// It returns a non-nil Order only if its Status is StatusReady or StatusValid.
|
|
||||||
// In all other cases WaitOrder returns an error.
|
|
||||||
// If the Status is StatusInvalid, the returned error is of type *OrderError.
|
|
||||||
func (c *Client) WaitOrder(ctx context.Context, url string) (*Order, error) {
|
|
||||||
if _, err := c.Discover(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
o, err := responseOrder(res)
|
|
||||||
res.Body.Close()
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
// Skip and retry.
|
|
||||||
case o.Status == StatusInvalid:
|
|
||||||
return nil, &OrderError{OrderURL: o.URI, Status: o.Status}
|
|
||||||
case o.Status == StatusReady || o.Status == StatusValid:
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
d := retryAfter(res.Header.Get("Retry-After"))
|
|
||||||
if d == 0 {
|
|
||||||
// Default retry-after.
|
|
||||||
// Same reasoning as in WaitAuthorization.
|
|
||||||
d = time.Second
|
|
||||||
}
|
|
||||||
t := time.NewTimer(d)
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
t.Stop()
|
|
||||||
return nil, ctx.Err()
|
|
||||||
case <-t.C:
|
|
||||||
// Retry.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func responseOrder(res *http.Response) (*Order, error) {
|
|
||||||
var v struct {
|
|
||||||
Status string
|
|
||||||
Expires time.Time
|
|
||||||
Identifiers []wireAuthzID
|
|
||||||
NotBefore time.Time
|
|
||||||
NotAfter time.Time
|
|
||||||
Error *wireError
|
|
||||||
Authorizations []string
|
|
||||||
Finalize string
|
|
||||||
Certificate string
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
|
|
||||||
return nil, fmt.Errorf("acme: error reading order: %v", err)
|
|
||||||
}
|
|
||||||
o := &Order{
|
|
||||||
URI: res.Header.Get("Location"),
|
|
||||||
Status: v.Status,
|
|
||||||
Expires: v.Expires,
|
|
||||||
NotBefore: v.NotBefore,
|
|
||||||
NotAfter: v.NotAfter,
|
|
||||||
AuthzURLs: v.Authorizations,
|
|
||||||
FinalizeURL: v.Finalize,
|
|
||||||
CertURL: v.Certificate,
|
|
||||||
}
|
|
||||||
for _, id := range v.Identifiers {
|
|
||||||
o.Identifiers = append(o.Identifiers, AuthzID{Type: id.Type, Value: id.Value})
|
|
||||||
}
|
|
||||||
if v.Error != nil {
|
|
||||||
o.Error = v.Error.error(nil /* headers */)
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateOrderCert submits the CSR (Certificate Signing Request) to a CA at the specified URL.
|
|
||||||
// The URL is the FinalizeURL field of an Order created with AuthorizeOrder.
|
|
||||||
//
|
|
||||||
// If the bundle argument is true, the returned value also contain the CA (issuer)
|
|
||||||
// certificate chain. Otherwise, only a leaf certificate is returned.
|
|
||||||
// The returned URL can be used to re-fetch the certificate using FetchCert.
|
|
||||||
//
|
|
||||||
// This method is only supported by CAs implementing RFC 8555. See CreateCert for pre-RFC CAs.
|
|
||||||
//
|
|
||||||
// CreateOrderCert returns an error if the CA's response is unreasonably large.
|
|
||||||
// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features.
|
|
||||||
func (c *Client) CreateOrderCert(ctx context.Context, url string, csr []byte, bundle bool) (der [][]byte, certURL string, err error) {
|
|
||||||
if _, err := c.Discover(ctx); err != nil { // required by c.accountKID
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RFC describes this as "finalize order" request.
|
|
||||||
req := struct {
|
|
||||||
CSR string `json:"csr"`
|
|
||||||
}{
|
|
||||||
CSR: base64.RawURLEncoding.EncodeToString(csr),
|
|
||||||
}
|
|
||||||
res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK))
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
o, err := responseOrder(res)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for CA to issue the cert if they haven't.
|
|
||||||
if o.Status != StatusValid {
|
|
||||||
o, err = c.WaitOrder(ctx, o.URI)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
// The only acceptable status post finalize and WaitOrder is "valid".
|
|
||||||
if o.Status != StatusValid {
|
|
||||||
return nil, "", &OrderError{OrderURL: o.URI, Status: o.Status}
|
|
||||||
}
|
|
||||||
crt, err := c.fetchCertRFC(ctx, o.CertURL, bundle)
|
|
||||||
return crt, o.CertURL, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchCertRFC downloads issued certificate from the given URL.
|
|
||||||
// It expects the CA to respond with PEM-encoded certificate chain.
|
|
||||||
//
|
|
||||||
// The URL argument is the CertURL field of Order.
|
|
||||||
func (c *Client) fetchCertRFC(ctx context.Context, url string, bundle bool) ([][]byte, error) {
|
|
||||||
res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
// Get all the bytes up to a sane maximum.
|
|
||||||
// Account very roughly for base64 overhead.
|
|
||||||
const max = maxCertChainSize + maxCertChainSize/33
|
|
||||||
b, err := ioutil.ReadAll(io.LimitReader(res.Body, max+1))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("acme: fetch cert response stream: %v", err)
|
|
||||||
}
|
|
||||||
if len(b) > max {
|
|
||||||
return nil, errors.New("acme: certificate chain is too big")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode PEM chain.
|
|
||||||
var chain [][]byte
|
|
||||||
for {
|
|
||||||
var p *pem.Block
|
|
||||||
p, b = pem.Decode(b)
|
|
||||||
if p == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if p.Type != "CERTIFICATE" {
|
|
||||||
return nil, fmt.Errorf("acme: invalid PEM cert type %q", p.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
chain = append(chain, p.Bytes)
|
|
||||||
if !bundle {
|
|
||||||
return chain, nil
|
|
||||||
}
|
|
||||||
if len(chain) > maxChainLen {
|
|
||||||
return nil, errors.New("acme: certificate chain is too long")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(chain) == 0 {
|
|
||||||
return nil, errors.New("acme: certificate chain is empty")
|
|
||||||
}
|
|
||||||
return chain, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sends a cert revocation request in either JWK form when key is non-nil or KID form otherwise.
|
|
||||||
func (c *Client) revokeCertRFC(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error {
|
|
||||||
req := &struct {
|
|
||||||
Cert string `json:"certificate"`
|
|
||||||
Reason int `json:"reason"`
|
|
||||||
}{
|
|
||||||
Cert: base64.RawURLEncoding.EncodeToString(cert),
|
|
||||||
Reason: int(reason),
|
|
||||||
}
|
|
||||||
res, err := c.post(ctx, key, c.dir.RevokeURL, req, wantStatus(http.StatusOK))
|
|
||||||
if err != nil {
|
|
||||||
if isAlreadyRevoked(err) {
|
|
||||||
// Assume it is not an error to revoke an already revoked cert.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isAlreadyRevoked(err error) bool {
|
|
||||||
e, ok := err.(*Error)
|
|
||||||
return ok && e.ProblemType == "urn:ietf:params:acme:error:alreadyRevoked"
|
|
||||||
}
|
|
||||||
622
vendor/golang.org/x/crypto/acme/types.go
generated
vendored
622
vendor/golang.org/x/crypto/acme/types.go
generated
vendored
@@ -1,622 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package acme
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"crypto/x509"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ACME status values of Account, Order, Authorization and Challenge objects.
|
|
||||||
// See https://tools.ietf.org/html/rfc8555#section-7.1.6 for details.
|
|
||||||
const (
|
|
||||||
StatusDeactivated = "deactivated"
|
|
||||||
StatusExpired = "expired"
|
|
||||||
StatusInvalid = "invalid"
|
|
||||||
StatusPending = "pending"
|
|
||||||
StatusProcessing = "processing"
|
|
||||||
StatusReady = "ready"
|
|
||||||
StatusRevoked = "revoked"
|
|
||||||
StatusUnknown = "unknown"
|
|
||||||
StatusValid = "valid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CRLReasonCode identifies the reason for a certificate revocation.
|
|
||||||
type CRLReasonCode int
|
|
||||||
|
|
||||||
// CRL reason codes as defined in RFC 5280.
|
|
||||||
const (
|
|
||||||
CRLReasonUnspecified CRLReasonCode = 0
|
|
||||||
CRLReasonKeyCompromise CRLReasonCode = 1
|
|
||||||
CRLReasonCACompromise CRLReasonCode = 2
|
|
||||||
CRLReasonAffiliationChanged CRLReasonCode = 3
|
|
||||||
CRLReasonSuperseded CRLReasonCode = 4
|
|
||||||
CRLReasonCessationOfOperation CRLReasonCode = 5
|
|
||||||
CRLReasonCertificateHold CRLReasonCode = 6
|
|
||||||
CRLReasonRemoveFromCRL CRLReasonCode = 8
|
|
||||||
CRLReasonPrivilegeWithdrawn CRLReasonCode = 9
|
|
||||||
CRLReasonAACompromise CRLReasonCode = 10
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrUnsupportedKey is returned when an unsupported key type is encountered.
|
|
||||||
ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported")
|
|
||||||
|
|
||||||
// ErrAccountAlreadyExists indicates that the Client's key has already been registered
|
|
||||||
// with the CA. It is returned by Register method.
|
|
||||||
ErrAccountAlreadyExists = errors.New("acme: account already exists")
|
|
||||||
|
|
||||||
// ErrNoAccount indicates that the Client's key has not been registered with the CA.
|
|
||||||
ErrNoAccount = errors.New("acme: account does not exist")
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Subproblem describes an ACME subproblem as reported in an Error.
|
|
||||||
type Subproblem struct {
|
|
||||||
// Type is a URI reference that identifies the problem type,
|
|
||||||
// typically in a "urn:acme:error:xxx" form.
|
|
||||||
Type string
|
|
||||||
// Detail is a human-readable explanation specific to this occurrence of the problem.
|
|
||||||
Detail string
|
|
||||||
// Instance indicates a URL that the client should direct a human user to visit
|
|
||||||
// in order for instructions on how to agree to the updated Terms of Service.
|
|
||||||
// In such an event CA sets StatusCode to 403, Type to
|
|
||||||
// "urn:ietf:params:acme:error:userActionRequired", and adds a Link header with relation
|
|
||||||
// "terms-of-service" containing the latest TOS URL.
|
|
||||||
Instance string
|
|
||||||
// Identifier may contain the ACME identifier that the error is for.
|
|
||||||
Identifier *AuthzID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sp Subproblem) String() string {
|
|
||||||
str := fmt.Sprintf("%s: ", sp.Type)
|
|
||||||
if sp.Identifier != nil {
|
|
||||||
str += fmt.Sprintf("[%s: %s] ", sp.Identifier.Type, sp.Identifier.Value)
|
|
||||||
}
|
|
||||||
str += sp.Detail
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error is an ACME error, defined in Problem Details for HTTP APIs doc
|
|
||||||
// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem.
|
|
||||||
type Error struct {
|
|
||||||
// StatusCode is The HTTP status code generated by the origin server.
|
|
||||||
StatusCode int
|
|
||||||
// ProblemType is a URI reference that identifies the problem type,
|
|
||||||
// typically in a "urn:acme:error:xxx" form.
|
|
||||||
ProblemType string
|
|
||||||
// Detail is a human-readable explanation specific to this occurrence of the problem.
|
|
||||||
Detail string
|
|
||||||
// Instance indicates a URL that the client should direct a human user to visit
|
|
||||||
// in order for instructions on how to agree to the updated Terms of Service.
|
|
||||||
// In such an event CA sets StatusCode to 403, ProblemType to
|
|
||||||
// "urn:ietf:params:acme:error:userActionRequired" and a Link header with relation
|
|
||||||
// "terms-of-service" containing the latest TOS URL.
|
|
||||||
Instance string
|
|
||||||
// Header is the original server error response headers.
|
|
||||||
// It may be nil.
|
|
||||||
Header http.Header
|
|
||||||
// Subproblems may contain more detailed information about the individual problems
|
|
||||||
// that caused the error. This field is only sent by RFC 8555 compatible ACME
|
|
||||||
// servers. Defined in RFC 8555 Section 6.7.1.
|
|
||||||
Subproblems []Subproblem
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Error) Error() string {
|
|
||||||
str := fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail)
|
|
||||||
if len(e.Subproblems) > 0 {
|
|
||||||
str += fmt.Sprintf("; subproblems:")
|
|
||||||
for _, sp := range e.Subproblems {
|
|
||||||
str += fmt.Sprintf("\n\t%s", sp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
// AuthorizationError indicates that an authorization for an identifier
|
|
||||||
// did not succeed.
|
|
||||||
// It contains all errors from Challenge items of the failed Authorization.
|
|
||||||
type AuthorizationError struct {
|
|
||||||
// URI uniquely identifies the failed Authorization.
|
|
||||||
URI string
|
|
||||||
|
|
||||||
// Identifier is an AuthzID.Value of the failed Authorization.
|
|
||||||
Identifier string
|
|
||||||
|
|
||||||
// Errors is a collection of non-nil error values of Challenge items
|
|
||||||
// of the failed Authorization.
|
|
||||||
Errors []error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *AuthorizationError) Error() string {
|
|
||||||
e := make([]string, len(a.Errors))
|
|
||||||
for i, err := range a.Errors {
|
|
||||||
e[i] = err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.Identifier != "" {
|
|
||||||
return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; "))
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("acme: authorization error: %s", strings.Join(e, "; "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrderError is returned from Client's order related methods.
|
|
||||||
// It indicates the order is unusable and the clients should start over with
|
|
||||||
// AuthorizeOrder.
|
|
||||||
//
|
|
||||||
// The clients can still fetch the order object from CA using GetOrder
|
|
||||||
// to inspect its state.
|
|
||||||
type OrderError struct {
|
|
||||||
OrderURL string
|
|
||||||
Status string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (oe *OrderError) Error() string {
|
|
||||||
return fmt.Sprintf("acme: order %s status: %s", oe.OrderURL, oe.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RateLimit reports whether err represents a rate limit error and
|
|
||||||
// any Retry-After duration returned by the server.
|
|
||||||
//
|
|
||||||
// See the following for more details on rate limiting:
|
|
||||||
// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6
|
|
||||||
func RateLimit(err error) (time.Duration, bool) {
|
|
||||||
e, ok := err.(*Error)
|
|
||||||
if !ok {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
// Some CA implementations may return incorrect values.
|
|
||||||
// Use case-insensitive comparison.
|
|
||||||
if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
if e.Header == nil {
|
|
||||||
return 0, true
|
|
||||||
}
|
|
||||||
return retryAfter(e.Header.Get("Retry-After")), true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Account is a user account. It is associated with a private key.
|
|
||||||
// Non-RFC 8555 fields are empty when interfacing with a compliant CA.
|
|
||||||
type Account struct {
|
|
||||||
// URI is the account unique ID, which is also a URL used to retrieve
|
|
||||||
// account data from the CA.
|
|
||||||
// When interfacing with RFC 8555-compliant CAs, URI is the "kid" field
|
|
||||||
// value in JWS signed requests.
|
|
||||||
URI string
|
|
||||||
|
|
||||||
// Contact is a slice of contact info used during registration.
|
|
||||||
// See https://tools.ietf.org/html/rfc8555#section-7.3 for supported
|
|
||||||
// formats.
|
|
||||||
Contact []string
|
|
||||||
|
|
||||||
// Status indicates current account status as returned by the CA.
|
|
||||||
// Possible values are StatusValid, StatusDeactivated, and StatusRevoked.
|
|
||||||
Status string
|
|
||||||
|
|
||||||
// OrdersURL is a URL from which a list of orders submitted by this account
|
|
||||||
// can be fetched.
|
|
||||||
OrdersURL string
|
|
||||||
|
|
||||||
// The terms user has agreed to.
|
|
||||||
// A value not matching CurrentTerms indicates that the user hasn't agreed
|
|
||||||
// to the actual Terms of Service of the CA.
|
|
||||||
//
|
|
||||||
// It is non-RFC 8555 compliant. Package users can store the ToS they agree to
|
|
||||||
// during Client's Register call in the prompt callback function.
|
|
||||||
AgreedTerms string
|
|
||||||
|
|
||||||
// Actual terms of a CA.
|
|
||||||
//
|
|
||||||
// It is non-RFC 8555 compliant. Use Directory's Terms field.
|
|
||||||
// When a CA updates their terms and requires an account agreement,
|
|
||||||
// a URL at which instructions to do so is available in Error's Instance field.
|
|
||||||
CurrentTerms string
|
|
||||||
|
|
||||||
// Authz is the authorization URL used to initiate a new authz flow.
|
|
||||||
//
|
|
||||||
// It is non-RFC 8555 compliant. Use Directory's AuthzURL or OrderURL.
|
|
||||||
Authz string
|
|
||||||
|
|
||||||
// Authorizations is a URI from which a list of authorizations
|
|
||||||
// granted to this account can be fetched via a GET request.
|
|
||||||
//
|
|
||||||
// It is non-RFC 8555 compliant and is obsoleted by OrdersURL.
|
|
||||||
Authorizations string
|
|
||||||
|
|
||||||
// Certificates is a URI from which a list of certificates
|
|
||||||
// issued for this account can be fetched via a GET request.
|
|
||||||
//
|
|
||||||
// It is non-RFC 8555 compliant and is obsoleted by OrdersURL.
|
|
||||||
Certificates string
|
|
||||||
|
|
||||||
// ExternalAccountBinding represents an arbitrary binding to an account of
|
|
||||||
// the CA which the ACME server is tied to.
|
|
||||||
// See https://tools.ietf.org/html/rfc8555#section-7.3.4 for more details.
|
|
||||||
ExternalAccountBinding *ExternalAccountBinding
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExternalAccountBinding contains the data needed to form a request with
|
|
||||||
// an external account binding.
|
|
||||||
// See https://tools.ietf.org/html/rfc8555#section-7.3.4 for more details.
|
|
||||||
type ExternalAccountBinding struct {
|
|
||||||
// KID is the Key ID of the symmetric MAC key that the CA provides to
|
|
||||||
// identify an external account from ACME.
|
|
||||||
KID string
|
|
||||||
|
|
||||||
// Key is the bytes of the symmetric key that the CA provides to identify
|
|
||||||
// the account. Key must correspond to the KID.
|
|
||||||
Key []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ExternalAccountBinding) String() string {
|
|
||||||
return fmt.Sprintf("&{KID: %q, Key: redacted}", e.KID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Directory is ACME server discovery data.
|
|
||||||
// See https://tools.ietf.org/html/rfc8555#section-7.1.1 for more details.
|
|
||||||
type Directory struct {
|
|
||||||
// NonceURL indicates an endpoint where to fetch fresh nonce values from.
|
|
||||||
NonceURL string
|
|
||||||
|
|
||||||
// RegURL is an account endpoint URL, allowing for creating new accounts.
|
|
||||||
// Pre-RFC 8555 CAs also allow modifying existing accounts at this URL.
|
|
||||||
RegURL string
|
|
||||||
|
|
||||||
// OrderURL is used to initiate the certificate issuance flow
|
|
||||||
// as described in RFC 8555.
|
|
||||||
OrderURL string
|
|
||||||
|
|
||||||
// AuthzURL is used to initiate identifier pre-authorization flow.
|
|
||||||
// Empty string indicates the flow is unsupported by the CA.
|
|
||||||
AuthzURL string
|
|
||||||
|
|
||||||
// CertURL is a new certificate issuance endpoint URL.
|
|
||||||
// It is non-RFC 8555 compliant and is obsoleted by OrderURL.
|
|
||||||
CertURL string
|
|
||||||
|
|
||||||
// RevokeURL is used to initiate a certificate revocation flow.
|
|
||||||
RevokeURL string
|
|
||||||
|
|
||||||
// KeyChangeURL allows to perform account key rollover flow.
|
|
||||||
KeyChangeURL string
|
|
||||||
|
|
||||||
// Term is a URI identifying the current terms of service.
|
|
||||||
Terms string
|
|
||||||
|
|
||||||
// Website is an HTTP or HTTPS URL locating a website
|
|
||||||
// providing more information about the ACME server.
|
|
||||||
Website string
|
|
||||||
|
|
||||||
// CAA consists of lowercase hostname elements, which the ACME server
|
|
||||||
// recognises as referring to itself for the purposes of CAA record validation
|
|
||||||
// as defined in RFC6844.
|
|
||||||
CAA []string
|
|
||||||
|
|
||||||
// ExternalAccountRequired indicates that the CA requires for all account-related
|
|
||||||
// requests to include external account binding information.
|
|
||||||
ExternalAccountRequired bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// rfcCompliant reports whether the ACME server implements RFC 8555.
|
|
||||||
// Note that some servers may have incomplete RFC implementation
|
|
||||||
// even if the returned value is true.
|
|
||||||
// If rfcCompliant reports false, the server most likely implements draft-02.
|
|
||||||
func (d *Directory) rfcCompliant() bool {
|
|
||||||
return d.OrderURL != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Order represents a client's request for a certificate.
|
|
||||||
// It tracks the request flow progress through to issuance.
|
|
||||||
type Order struct {
|
|
||||||
// URI uniquely identifies an order.
|
|
||||||
URI string
|
|
||||||
|
|
||||||
// Status represents the current status of the order.
|
|
||||||
// It indicates which action the client should take.
|
|
||||||
//
|
|
||||||
// Possible values are StatusPending, StatusReady, StatusProcessing, StatusValid and StatusInvalid.
|
|
||||||
// Pending means the CA does not believe that the client has fulfilled the requirements.
|
|
||||||
// Ready indicates that the client has fulfilled all the requirements and can submit a CSR
|
|
||||||
// to obtain a certificate. This is done with Client's CreateOrderCert.
|
|
||||||
// Processing means the certificate is being issued.
|
|
||||||
// Valid indicates the CA has issued the certificate. It can be downloaded
|
|
||||||
// from the Order's CertURL. This is done with Client's FetchCert.
|
|
||||||
// Invalid means the certificate will not be issued. Users should consider this order
|
|
||||||
// abandoned.
|
|
||||||
Status string
|
|
||||||
|
|
||||||
// Expires is the timestamp after which CA considers this order invalid.
|
|
||||||
Expires time.Time
|
|
||||||
|
|
||||||
// Identifiers contains all identifier objects which the order pertains to.
|
|
||||||
Identifiers []AuthzID
|
|
||||||
|
|
||||||
// NotBefore is the requested value of the notBefore field in the certificate.
|
|
||||||
NotBefore time.Time
|
|
||||||
|
|
||||||
// NotAfter is the requested value of the notAfter field in the certificate.
|
|
||||||
NotAfter time.Time
|
|
||||||
|
|
||||||
// AuthzURLs represents authorizations to complete before a certificate
|
|
||||||
// for identifiers specified in the order can be issued.
|
|
||||||
// It also contains unexpired authorizations that the client has completed
|
|
||||||
// in the past.
|
|
||||||
//
|
|
||||||
// Authorization objects can be fetched using Client's GetAuthorization method.
|
|
||||||
//
|
|
||||||
// The required authorizations are dictated by CA policies.
|
|
||||||
// There may not be a 1:1 relationship between the identifiers and required authorizations.
|
|
||||||
// Required authorizations can be identified by their StatusPending status.
|
|
||||||
//
|
|
||||||
// For orders in the StatusValid or StatusInvalid state these are the authorizations
|
|
||||||
// which were completed.
|
|
||||||
AuthzURLs []string
|
|
||||||
|
|
||||||
// FinalizeURL is the endpoint at which a CSR is submitted to obtain a certificate
|
|
||||||
// once all the authorizations are satisfied.
|
|
||||||
FinalizeURL string
|
|
||||||
|
|
||||||
// CertURL points to the certificate that has been issued in response to this order.
|
|
||||||
CertURL string
|
|
||||||
|
|
||||||
// The error that occurred while processing the order as received from a CA, if any.
|
|
||||||
Error *Error
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrderOption allows customizing Client.AuthorizeOrder call.
|
|
||||||
type OrderOption interface {
|
|
||||||
privateOrderOpt()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithOrderNotBefore sets order's NotBefore field.
|
|
||||||
func WithOrderNotBefore(t time.Time) OrderOption {
|
|
||||||
return orderNotBeforeOpt(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithOrderNotAfter sets order's NotAfter field.
|
|
||||||
func WithOrderNotAfter(t time.Time) OrderOption {
|
|
||||||
return orderNotAfterOpt(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
type orderNotBeforeOpt time.Time
|
|
||||||
|
|
||||||
func (orderNotBeforeOpt) privateOrderOpt() {}
|
|
||||||
|
|
||||||
type orderNotAfterOpt time.Time
|
|
||||||
|
|
||||||
func (orderNotAfterOpt) privateOrderOpt() {}
|
|
||||||
|
|
||||||
// Authorization encodes an authorization response.
|
|
||||||
type Authorization struct {
|
|
||||||
// URI uniquely identifies a authorization.
|
|
||||||
URI string
|
|
||||||
|
|
||||||
// Status is the current status of an authorization.
|
|
||||||
// Possible values are StatusPending, StatusValid, StatusInvalid, StatusDeactivated,
|
|
||||||
// StatusExpired and StatusRevoked.
|
|
||||||
Status string
|
|
||||||
|
|
||||||
// Identifier is what the account is authorized to represent.
|
|
||||||
Identifier AuthzID
|
|
||||||
|
|
||||||
// The timestamp after which the CA considers the authorization invalid.
|
|
||||||
Expires time.Time
|
|
||||||
|
|
||||||
// Wildcard is true for authorizations of a wildcard domain name.
|
|
||||||
Wildcard bool
|
|
||||||
|
|
||||||
// Challenges that the client needs to fulfill in order to prove possession
|
|
||||||
// of the identifier (for pending authorizations).
|
|
||||||
// For valid authorizations, the challenge that was validated.
|
|
||||||
// For invalid authorizations, the challenge that was attempted and failed.
|
|
||||||
//
|
|
||||||
// RFC 8555 compatible CAs require users to fuflfill only one of the challenges.
|
|
||||||
Challenges []*Challenge
|
|
||||||
|
|
||||||
// A collection of sets of challenges, each of which would be sufficient
|
|
||||||
// to prove possession of the identifier.
|
|
||||||
// Clients must complete a set of challenges that covers at least one set.
|
|
||||||
// Challenges are identified by their indices in the challenges array.
|
|
||||||
// If this field is empty, the client needs to complete all challenges.
|
|
||||||
//
|
|
||||||
// This field is unused in RFC 8555.
|
|
||||||
Combinations [][]int
|
|
||||||
}
|
|
||||||
|
|
||||||
// AuthzID is an identifier that an account is authorized to represent.
|
|
||||||
type AuthzID struct {
|
|
||||||
Type string // The type of identifier, "dns" or "ip".
|
|
||||||
Value string // The identifier itself, e.g. "example.org".
|
|
||||||
}
|
|
||||||
|
|
||||||
// DomainIDs creates a slice of AuthzID with "dns" identifier type.
|
|
||||||
func DomainIDs(names ...string) []AuthzID {
|
|
||||||
a := make([]AuthzID, len(names))
|
|
||||||
for i, v := range names {
|
|
||||||
a[i] = AuthzID{Type: "dns", Value: v}
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPIDs creates a slice of AuthzID with "ip" identifier type.
|
|
||||||
// Each element of addr is textual form of an address as defined
|
|
||||||
// in RFC1123 Section 2.1 for IPv4 and in RFC5952 Section 4 for IPv6.
|
|
||||||
func IPIDs(addr ...string) []AuthzID {
|
|
||||||
a := make([]AuthzID, len(addr))
|
|
||||||
for i, v := range addr {
|
|
||||||
a[i] = AuthzID{Type: "ip", Value: v}
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
// wireAuthzID is ACME JSON representation of authorization identifier objects.
|
|
||||||
type wireAuthzID struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
Value string `json:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// wireAuthz is ACME JSON representation of Authorization objects.
|
|
||||||
type wireAuthz struct {
|
|
||||||
Identifier wireAuthzID
|
|
||||||
Status string
|
|
||||||
Expires time.Time
|
|
||||||
Wildcard bool
|
|
||||||
Challenges []wireChallenge
|
|
||||||
Combinations [][]int
|
|
||||||
Error *wireError
|
|
||||||
}
|
|
||||||
|
|
||||||
func (z *wireAuthz) authorization(uri string) *Authorization {
|
|
||||||
a := &Authorization{
|
|
||||||
URI: uri,
|
|
||||||
Status: z.Status,
|
|
||||||
Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value},
|
|
||||||
Expires: z.Expires,
|
|
||||||
Wildcard: z.Wildcard,
|
|
||||||
Challenges: make([]*Challenge, len(z.Challenges)),
|
|
||||||
Combinations: z.Combinations, // shallow copy
|
|
||||||
}
|
|
||||||
for i, v := range z.Challenges {
|
|
||||||
a.Challenges[i] = v.challenge()
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
func (z *wireAuthz) error(uri string) *AuthorizationError {
|
|
||||||
err := &AuthorizationError{
|
|
||||||
URI: uri,
|
|
||||||
Identifier: z.Identifier.Value,
|
|
||||||
}
|
|
||||||
|
|
||||||
if z.Error != nil {
|
|
||||||
err.Errors = append(err.Errors, z.Error.error(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, raw := range z.Challenges {
|
|
||||||
if raw.Error != nil {
|
|
||||||
err.Errors = append(err.Errors, raw.Error.error(nil))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Challenge encodes a returned CA challenge.
|
|
||||||
// Its Error field may be non-nil if the challenge is part of an Authorization
|
|
||||||
// with StatusInvalid.
|
|
||||||
type Challenge struct {
|
|
||||||
// Type is the challenge type, e.g. "http-01", "tls-alpn-01", "dns-01".
|
|
||||||
Type string
|
|
||||||
|
|
||||||
// URI is where a challenge response can be posted to.
|
|
||||||
URI string
|
|
||||||
|
|
||||||
// Token is a random value that uniquely identifies the challenge.
|
|
||||||
Token string
|
|
||||||
|
|
||||||
// Status identifies the status of this challenge.
|
|
||||||
// In RFC 8555, possible values are StatusPending, StatusProcessing, StatusValid,
|
|
||||||
// and StatusInvalid.
|
|
||||||
Status string
|
|
||||||
|
|
||||||
// Validated is the time at which the CA validated this challenge.
|
|
||||||
// Always zero value in pre-RFC 8555.
|
|
||||||
Validated time.Time
|
|
||||||
|
|
||||||
// Error indicates the reason for an authorization failure
|
|
||||||
// when this challenge was used.
|
|
||||||
// The type of a non-nil value is *Error.
|
|
||||||
Error error
|
|
||||||
}
|
|
||||||
|
|
||||||
// wireChallenge is ACME JSON challenge representation.
|
|
||||||
type wireChallenge struct {
|
|
||||||
URL string `json:"url"` // RFC
|
|
||||||
URI string `json:"uri"` // pre-RFC
|
|
||||||
Type string
|
|
||||||
Token string
|
|
||||||
Status string
|
|
||||||
Validated time.Time
|
|
||||||
Error *wireError
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *wireChallenge) challenge() *Challenge {
|
|
||||||
v := &Challenge{
|
|
||||||
URI: c.URL,
|
|
||||||
Type: c.Type,
|
|
||||||
Token: c.Token,
|
|
||||||
Status: c.Status,
|
|
||||||
}
|
|
||||||
if v.URI == "" {
|
|
||||||
v.URI = c.URI // c.URL was empty; use legacy
|
|
||||||
}
|
|
||||||
if v.Status == "" {
|
|
||||||
v.Status = StatusPending
|
|
||||||
}
|
|
||||||
if c.Error != nil {
|
|
||||||
v.Error = c.Error.error(nil)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// wireError is a subset of fields of the Problem Details object
|
|
||||||
// as described in https://tools.ietf.org/html/rfc7807#section-3.1.
|
|
||||||
type wireError struct {
|
|
||||||
Status int
|
|
||||||
Type string
|
|
||||||
Detail string
|
|
||||||
Instance string
|
|
||||||
Subproblems []Subproblem
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *wireError) error(h http.Header) *Error {
|
|
||||||
err := &Error{
|
|
||||||
StatusCode: e.Status,
|
|
||||||
ProblemType: e.Type,
|
|
||||||
Detail: e.Detail,
|
|
||||||
Instance: e.Instance,
|
|
||||||
Header: h,
|
|
||||||
Subproblems: e.Subproblems,
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CertOption is an optional argument type for the TLS ChallengeCert methods for
|
|
||||||
// customizing a temporary certificate for TLS-based challenges.
|
|
||||||
type CertOption interface {
|
|
||||||
privateCertOpt()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithKey creates an option holding a private/public key pair.
|
|
||||||
// The private part signs a certificate, and the public part represents the signee.
|
|
||||||
func WithKey(key crypto.Signer) CertOption {
|
|
||||||
return &certOptKey{key}
|
|
||||||
}
|
|
||||||
|
|
||||||
type certOptKey struct {
|
|
||||||
key crypto.Signer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*certOptKey) privateCertOpt() {}
|
|
||||||
|
|
||||||
// WithTemplate creates an option for specifying a certificate template.
|
|
||||||
// See x509.CreateCertificate for template usage details.
|
|
||||||
//
|
|
||||||
// In TLS ChallengeCert methods, the template is also used as parent,
|
|
||||||
// resulting in a self-signed certificate.
|
|
||||||
// The DNSNames field of t is always overwritten for tls-sni challenge certs.
|
|
||||||
func WithTemplate(t *x509.Certificate) CertOption {
|
|
||||||
return (*certOptTemplate)(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
type certOptTemplate x509.Certificate
|
|
||||||
|
|
||||||
func (*certOptTemplate) privateCertOpt() {}
|
|
||||||
28
vendor/golang.org/x/crypto/acme/version_go112.go
generated
vendored
28
vendor/golang.org/x/crypto/acme/version_go112.go
generated
vendored
@@ -1,28 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
//go:build go1.12
|
|
||||||
// +build go1.12
|
|
||||||
|
|
||||||
package acme
|
|
||||||
|
|
||||||
import "runtime/debug"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Set packageVersion if the binary was built in modules mode and x/crypto
|
|
||||||
// was not replaced with a different module.
|
|
||||||
info, ok := debug.ReadBuildInfo()
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, m := range info.Deps {
|
|
||||||
if m.Path != "golang.org/x/crypto" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if m.Replace == nil {
|
|
||||||
packageVersion = m.Version
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
4
vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# See the OWNERS docs at https://go.k8s.io/owners
|
||||||
|
|
||||||
|
approvers:
|
||||||
|
- feature-approvers
|
||||||
39
vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go
generated
vendored
Normal file
39
vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package features
|
||||||
|
|
||||||
|
import (
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
"k8s.io/component-base/featuregate"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Every feature gate should add method here following this template:
|
||||||
|
//
|
||||||
|
// // owner: @username
|
||||||
|
// // alpha: v1.4
|
||||||
|
// MyFeature() bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates)
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
|
||||||
|
// To add a new feature, define a key for it above and add it here. The features will be
|
||||||
|
// available throughout Kubernetes binaries.
|
||||||
|
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{}
|
||||||
202
vendor/k8s.io/apiserver/LICENSE
generated
vendored
Normal file
202
vendor/k8s.io/apiserver/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
4
vendor/k8s.io/apiserver/pkg/features/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/apiserver/pkg/features/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# See the OWNERS docs at https://go.k8s.io/owners
|
||||||
|
|
||||||
|
approvers:
|
||||||
|
- feature-approvers
|
||||||
178
vendor/k8s.io/apiserver/pkg/features/kube_features.go
generated
vendored
Normal file
178
vendor/k8s.io/apiserver/pkg/features/kube_features.go
generated
vendored
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package features
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
"k8s.io/component-base/featuregate"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Every feature gate should add method here following this template:
|
||||||
|
//
|
||||||
|
// // owner: @username
|
||||||
|
// // alpha: v1.4
|
||||||
|
// MyFeature() bool
|
||||||
|
|
||||||
|
// owner: @tallclair
|
||||||
|
// alpha: v1.5
|
||||||
|
// beta: v1.6
|
||||||
|
// deprecated: v1.18
|
||||||
|
//
|
||||||
|
// StreamingProxyRedirects controls whether the apiserver should intercept (and follow)
|
||||||
|
// redirects from the backend (Kubelet) for streaming requests (exec/attach/port-forward).
|
||||||
|
//
|
||||||
|
// This feature is deprecated, and will be removed in v1.22.
|
||||||
|
StreamingProxyRedirects featuregate.Feature = "StreamingProxyRedirects"
|
||||||
|
|
||||||
|
// owner: @tallclair
|
||||||
|
// alpha: v1.12
|
||||||
|
// beta: v1.14
|
||||||
|
//
|
||||||
|
// ValidateProxyRedirects controls whether the apiserver should validate that redirects are only
|
||||||
|
// followed to the same host. Only used if StreamingProxyRedirects is enabled.
|
||||||
|
ValidateProxyRedirects featuregate.Feature = "ValidateProxyRedirects"
|
||||||
|
|
||||||
|
// owner: @tallclair
|
||||||
|
// alpha: v1.7
|
||||||
|
// beta: v1.8
|
||||||
|
// GA: v1.12
|
||||||
|
//
|
||||||
|
// AdvancedAuditing enables a much more general API auditing pipeline, which includes support for
|
||||||
|
// pluggable output backends and an audit policy specifying how different requests should be
|
||||||
|
// audited.
|
||||||
|
AdvancedAuditing featuregate.Feature = "AdvancedAuditing"
|
||||||
|
|
||||||
|
// owner: @pbarker
|
||||||
|
// alpha: v1.13
|
||||||
|
//
|
||||||
|
// DynamicAuditing enables configuration of audit policy and webhook backends through an
|
||||||
|
// AuditSink API object.
|
||||||
|
DynamicAuditing featuregate.Feature = "DynamicAuditing"
|
||||||
|
|
||||||
|
// owner: @ilackams
|
||||||
|
// alpha: v1.7
|
||||||
|
//
|
||||||
|
// Enables compression of REST responses (GET and LIST only)
|
||||||
|
APIResponseCompression featuregate.Feature = "APIResponseCompression"
|
||||||
|
|
||||||
|
// owner: @smarterclayton
|
||||||
|
// alpha: v1.8
|
||||||
|
// beta: v1.9
|
||||||
|
//
|
||||||
|
// Allow API clients to retrieve resource lists in chunks rather than
|
||||||
|
// all at once.
|
||||||
|
APIListChunking featuregate.Feature = "APIListChunking"
|
||||||
|
|
||||||
|
// owner: @apelisse
|
||||||
|
// alpha: v1.12
|
||||||
|
// beta: v1.13
|
||||||
|
//
|
||||||
|
// Allow requests to be processed but not stored, so that
|
||||||
|
// validation, merging, mutation can be tested without
|
||||||
|
// committing.
|
||||||
|
DryRun featuregate.Feature = "DryRun"
|
||||||
|
|
||||||
|
// owner: @caesarxuchao
|
||||||
|
// alpha: v1.15
|
||||||
|
//
|
||||||
|
// Allow apiservers to show a count of remaining items in the response
|
||||||
|
// to a chunking list request.
|
||||||
|
RemainingItemCount featuregate.Feature = "RemainingItemCount"
|
||||||
|
|
||||||
|
// owner: @apelisse, @lavalamp
|
||||||
|
// alpha: v1.14
|
||||||
|
// beta: v1.16
|
||||||
|
//
|
||||||
|
// Server-side apply. Merging happens on the server.
|
||||||
|
ServerSideApply featuregate.Feature = "ServerSideApply"
|
||||||
|
|
||||||
|
// owner: @caesarxuchao
|
||||||
|
// alpha: v1.14
|
||||||
|
// beta: v1.15
|
||||||
|
//
|
||||||
|
// Allow apiservers to expose the storage version hash in the discovery
|
||||||
|
// document.
|
||||||
|
StorageVersionHash featuregate.Feature = "StorageVersionHash"
|
||||||
|
|
||||||
|
// owner: @ksubrmnn
|
||||||
|
// alpha: v1.14
|
||||||
|
//
|
||||||
|
// Allows kube-proxy to run in Overlay mode for Windows
|
||||||
|
WinOverlay featuregate.Feature = "WinOverlay"
|
||||||
|
|
||||||
|
// owner: @ksubrmnn
|
||||||
|
// alpha: v1.14
|
||||||
|
//
|
||||||
|
// Allows kube-proxy to create DSR loadbalancers for Windows
|
||||||
|
WinDSR featuregate.Feature = "WinDSR"
|
||||||
|
|
||||||
|
// owner: @wojtek-t
|
||||||
|
// alpha: v1.15
|
||||||
|
// beta: v1.16
|
||||||
|
// GA: v1.17
|
||||||
|
//
|
||||||
|
// Enables support for watch bookmark events.
|
||||||
|
WatchBookmark featuregate.Feature = "WatchBookmark"
|
||||||
|
|
||||||
|
// owner: @MikeSpreitzer @yue9944882
|
||||||
|
// alpha: v1.15
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Enables managing request concurrency with prioritization and fairness at each server
|
||||||
|
APIPriorityAndFairness featuregate.Feature = "APIPriorityAndFairness"
|
||||||
|
|
||||||
|
// owner: @wojtek-t
|
||||||
|
// alpha: v1.16
|
||||||
|
//
|
||||||
|
// Deprecates and removes SelfLink from ObjectMeta and ListMeta.
|
||||||
|
RemoveSelfLink featuregate.Feature = "RemoveSelfLink"
|
||||||
|
|
||||||
|
// owner: @shaloulcy
|
||||||
|
// alpha: v1.18
|
||||||
|
//
|
||||||
|
// Allows label and field based indexes in apiserver watch cache to accelerate list operations.
|
||||||
|
SelectorIndex featuregate.Feature = "SelectorIndex"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates))
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
|
||||||
|
// To add a new feature, define a key for it above and add it here. The features will be
|
||||||
|
// available throughout Kubernetes binaries.
|
||||||
|
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
||||||
|
StreamingProxyRedirects: {Default: true, PreRelease: featuregate.Deprecated},
|
||||||
|
ValidateProxyRedirects: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
AdvancedAuditing: {Default: true, PreRelease: featuregate.GA},
|
||||||
|
DynamicAuditing: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
APIResponseCompression: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
APIListChunking: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
DryRun: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
RemainingItemCount: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
ServerSideApply: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
StorageVersionHash: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
WinOverlay: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
WinDSR: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||||
|
APIPriorityAndFairness: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
RemoveSelfLink: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
SelectorIndex: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
}
|
||||||
33
vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go
generated
vendored
Normal file
33
vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package feature
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/component-base/featuregate"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate.
|
||||||
|
// Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this.
|
||||||
|
// Tests that need to modify feature gates for the duration of their test should use:
|
||||||
|
// defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.<FeatureName>, <value>)()
|
||||||
|
DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
|
||||||
|
|
||||||
|
// DefaultFeatureGate is a shared global FeatureGate.
|
||||||
|
// Top-level commands/options setup that needs to modify this feature gate should use DefaultMutableFeatureGate.
|
||||||
|
DefaultFeatureGate featuregate.FeatureGate = DefaultMutableFeatureGate
|
||||||
|
)
|
||||||
202
vendor/k8s.io/component-base/LICENSE
generated
vendored
Normal file
202
vendor/k8s.io/component-base/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
364
vendor/k8s.io/component-base/featuregate/feature_gate.go
generated
vendored
Normal file
364
vendor/k8s.io/component-base/featuregate/feature_gate.go
generated
vendored
Normal file
@@ -0,0 +1,364 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package featuregate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/util/naming"
|
||||||
|
"k8s.io/klog"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Feature string
|
||||||
|
|
||||||
|
const (
|
||||||
|
flagName = "feature-gates"
|
||||||
|
|
||||||
|
// allAlphaGate is a global toggle for alpha features. Per-feature key
|
||||||
|
// values override the default set by allAlphaGate. Examples:
|
||||||
|
// AllAlpha=false,NewFeature=true will result in newFeature=true
|
||||||
|
// AllAlpha=true,NewFeature=false will result in newFeature=false
|
||||||
|
allAlphaGate Feature = "AllAlpha"
|
||||||
|
|
||||||
|
// allBetaGate is a global toggle for beta features. Per-feature key
|
||||||
|
// values override the default set by allBetaGate. Examples:
|
||||||
|
// AllBeta=false,NewFeature=true will result in NewFeature=true
|
||||||
|
// AllBeta=true,NewFeature=false will result in NewFeature=false
|
||||||
|
allBetaGate Feature = "AllBeta"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// The generic features.
|
||||||
|
defaultFeatures = map[Feature]FeatureSpec{
|
||||||
|
allAlphaGate: {Default: false, PreRelease: Alpha},
|
||||||
|
allBetaGate: {Default: false, PreRelease: Beta},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special handling for a few gates.
|
||||||
|
specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){
|
||||||
|
allAlphaGate: setUnsetAlphaGates,
|
||||||
|
allBetaGate: setUnsetBetaGates,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type FeatureSpec struct {
|
||||||
|
// Default is the default enablement state for the feature
|
||||||
|
Default bool
|
||||||
|
// LockToDefault indicates that the feature is locked to its default and cannot be changed
|
||||||
|
LockToDefault bool
|
||||||
|
// PreRelease indicates the maturity level of the feature
|
||||||
|
PreRelease prerelease
|
||||||
|
}
|
||||||
|
|
||||||
|
type prerelease string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Values for PreRelease.
|
||||||
|
Alpha = prerelease("ALPHA")
|
||||||
|
Beta = prerelease("BETA")
|
||||||
|
GA = prerelease("")
|
||||||
|
|
||||||
|
// Deprecated
|
||||||
|
Deprecated = prerelease("DEPRECATED")
|
||||||
|
)
|
||||||
|
|
||||||
|
// FeatureGate indicates whether a given feature is enabled or not
|
||||||
|
type FeatureGate interface {
|
||||||
|
// Enabled returns true if the key is enabled.
|
||||||
|
Enabled(key Feature) bool
|
||||||
|
// KnownFeatures returns a slice of strings describing the FeatureGate's known features.
|
||||||
|
KnownFeatures() []string
|
||||||
|
// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be
|
||||||
|
// set on the copy without mutating the original. This is useful for validating
|
||||||
|
// config against potential feature gate changes before committing those changes.
|
||||||
|
DeepCopy() MutableFeatureGate
|
||||||
|
}
|
||||||
|
|
||||||
|
// MutableFeatureGate parses and stores flag gates for known features from
|
||||||
|
// a string like feature1=true,feature2=false,...
|
||||||
|
type MutableFeatureGate interface {
|
||||||
|
FeatureGate
|
||||||
|
|
||||||
|
// AddFlag adds a flag for setting global feature gates to the specified FlagSet.
|
||||||
|
AddFlag(fs *pflag.FlagSet)
|
||||||
|
// Set parses and stores flag gates for known features
|
||||||
|
// from a string like feature1=true,feature2=false,...
|
||||||
|
Set(value string) error
|
||||||
|
// SetFromMap stores flag gates for known features from a map[string]bool or returns an error
|
||||||
|
SetFromMap(m map[string]bool) error
|
||||||
|
// Add adds features to the featureGate.
|
||||||
|
Add(features map[Feature]FeatureSpec) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// featureGate implements FeatureGate as well as pflag.Value for flag parsing.
|
||||||
|
type featureGate struct {
|
||||||
|
featureGateName string
|
||||||
|
|
||||||
|
special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool)
|
||||||
|
|
||||||
|
// lock guards writes to known, enabled, and reads/writes of closed
|
||||||
|
lock sync.Mutex
|
||||||
|
// known holds a map[Feature]FeatureSpec
|
||||||
|
known *atomic.Value
|
||||||
|
// enabled holds a map[Feature]bool
|
||||||
|
enabled *atomic.Value
|
||||||
|
// closed is set to true when AddFlag is called, and prevents subsequent calls to Add
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) {
|
||||||
|
for k, v := range known {
|
||||||
|
if v.PreRelease == Alpha {
|
||||||
|
if _, found := enabled[k]; !found {
|
||||||
|
enabled[k] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setUnsetBetaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) {
|
||||||
|
for k, v := range known {
|
||||||
|
if v.PreRelease == Beta {
|
||||||
|
if _, found := enabled[k]; !found {
|
||||||
|
enabled[k] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set, String, and Type implement pflag.Value
|
||||||
|
var _ pflag.Value = &featureGate{}
|
||||||
|
|
||||||
|
// internalPackages are packages that ignored when creating a name for featureGates. These packages are in the common
|
||||||
|
// call chains, so they'd be unhelpful as names.
|
||||||
|
var internalPackages = []string{"k8s.io/component-base/featuregate/feature_gate.go"}
|
||||||
|
|
||||||
|
func NewFeatureGate() *featureGate {
|
||||||
|
known := map[Feature]FeatureSpec{}
|
||||||
|
for k, v := range defaultFeatures {
|
||||||
|
known[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
knownValue := &atomic.Value{}
|
||||||
|
knownValue.Store(known)
|
||||||
|
|
||||||
|
enabled := map[Feature]bool{}
|
||||||
|
enabledValue := &atomic.Value{}
|
||||||
|
enabledValue.Store(enabled)
|
||||||
|
|
||||||
|
f := &featureGate{
|
||||||
|
featureGateName: naming.GetNameFromCallsite(internalPackages...),
|
||||||
|
known: knownValue,
|
||||||
|
special: specialFeatures,
|
||||||
|
enabled: enabledValue,
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set parses a string of the form "key1=value1,key2=value2,..." into a
|
||||||
|
// map[string]bool of known keys or returns an error.
|
||||||
|
func (f *featureGate) Set(value string) error {
|
||||||
|
m := make(map[string]bool)
|
||||||
|
for _, s := range strings.Split(value, ",") {
|
||||||
|
if len(s) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
arr := strings.SplitN(s, "=", 2)
|
||||||
|
k := strings.TrimSpace(arr[0])
|
||||||
|
if len(arr) != 2 {
|
||||||
|
return fmt.Errorf("missing bool value for %s", k)
|
||||||
|
}
|
||||||
|
v := strings.TrimSpace(arr[1])
|
||||||
|
boolValue, err := strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid value of %s=%s, err: %v", k, v, err)
|
||||||
|
}
|
||||||
|
m[k] = boolValue
|
||||||
|
}
|
||||||
|
return f.SetFromMap(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFromMap stores flag gates for known features from a map[string]bool or returns an error
|
||||||
|
func (f *featureGate) SetFromMap(m map[string]bool) error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
// Copy existing state
|
||||||
|
known := map[Feature]FeatureSpec{}
|
||||||
|
for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
|
||||||
|
known[k] = v
|
||||||
|
}
|
||||||
|
enabled := map[Feature]bool{}
|
||||||
|
for k, v := range f.enabled.Load().(map[Feature]bool) {
|
||||||
|
enabled[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range m {
|
||||||
|
k := Feature(k)
|
||||||
|
featureSpec, ok := known[k]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unrecognized feature gate: %s", k)
|
||||||
|
}
|
||||||
|
if featureSpec.LockToDefault && featureSpec.Default != v {
|
||||||
|
return fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", k, v, featureSpec.Default)
|
||||||
|
}
|
||||||
|
enabled[k] = v
|
||||||
|
// Handle "special" features like "all alpha gates"
|
||||||
|
if fn, found := f.special[k]; found {
|
||||||
|
fn(known, enabled, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
if featureSpec.PreRelease == Deprecated {
|
||||||
|
klog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v)
|
||||||
|
} else if featureSpec.PreRelease == GA {
|
||||||
|
klog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Persist changes
|
||||||
|
f.known.Store(known)
|
||||||
|
f.enabled.Store(enabled)
|
||||||
|
|
||||||
|
klog.V(1).Infof("feature gates: %v", f.enabled)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...".
|
||||||
|
func (f *featureGate) String() string {
|
||||||
|
pairs := []string{}
|
||||||
|
for k, v := range f.enabled.Load().(map[Feature]bool) {
|
||||||
|
pairs = append(pairs, fmt.Sprintf("%s=%t", k, v))
|
||||||
|
}
|
||||||
|
sort.Strings(pairs)
|
||||||
|
return strings.Join(pairs, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *featureGate) Type() string {
|
||||||
|
return "mapStringBool"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds features to the featureGate.
|
||||||
|
func (f *featureGate) Add(features map[Feature]FeatureSpec) error {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.closed {
|
||||||
|
return fmt.Errorf("cannot add a feature gate after adding it to the flag set")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy existing state
|
||||||
|
known := map[Feature]FeatureSpec{}
|
||||||
|
for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
|
||||||
|
known[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, spec := range features {
|
||||||
|
if existingSpec, found := known[name]; found {
|
||||||
|
if existingSpec == spec {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec)
|
||||||
|
}
|
||||||
|
|
||||||
|
known[name] = spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// Persist updated state
|
||||||
|
f.known.Store(known)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled returns true if the key is enabled. If the key is not known, this call will panic.
|
||||||
|
func (f *featureGate) Enabled(key Feature) bool {
|
||||||
|
if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
if v, ok := f.known.Load().(map[Feature]FeatureSpec)[key]; ok {
|
||||||
|
return v.Default
|
||||||
|
}
|
||||||
|
|
||||||
|
panic(fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddFlag adds a flag for setting global feature gates to the specified FlagSet.
|
||||||
|
func (f *featureGate) AddFlag(fs *pflag.FlagSet) {
|
||||||
|
f.lock.Lock()
|
||||||
|
// TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead?
|
||||||
|
// Not all components expose a feature gates flag using this AddFlag method, and
|
||||||
|
// in the future, all components will completely stop exposing a feature gates flag,
|
||||||
|
// in favor of componentconfig.
|
||||||
|
f.closed = true
|
||||||
|
f.lock.Unlock()
|
||||||
|
|
||||||
|
known := f.KnownFeatures()
|
||||||
|
fs.Var(f, flagName, ""+
|
||||||
|
"A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
||||||
|
"Options are:\n"+strings.Join(known, "\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KnownFeatures returns a slice of strings describing the FeatureGate's known features.
|
||||||
|
// Deprecated and GA features are hidden from the list.
|
||||||
|
func (f *featureGate) KnownFeatures() []string {
|
||||||
|
var known []string
|
||||||
|
for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
|
||||||
|
if v.PreRelease == GA || v.PreRelease == Deprecated {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v.PreRelease, v.Default))
|
||||||
|
}
|
||||||
|
sort.Strings(known)
|
||||||
|
return known
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be
|
||||||
|
// set on the copy without mutating the original. This is useful for validating
|
||||||
|
// config against potential feature gate changes before committing those changes.
|
||||||
|
func (f *featureGate) DeepCopy() MutableFeatureGate {
|
||||||
|
// Copy existing state.
|
||||||
|
known := map[Feature]FeatureSpec{}
|
||||||
|
for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
|
||||||
|
known[k] = v
|
||||||
|
}
|
||||||
|
enabled := map[Feature]bool{}
|
||||||
|
for k, v := range f.enabled.Load().(map[Feature]bool) {
|
||||||
|
enabled[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store copied state in new atomics.
|
||||||
|
knownValue := &atomic.Value{}
|
||||||
|
knownValue.Store(known)
|
||||||
|
enabledValue := &atomic.Value{}
|
||||||
|
enabledValue.Store(enabled)
|
||||||
|
|
||||||
|
// Construct a new featureGate around the copied state.
|
||||||
|
// Note that specialFeatures is treated as immutable by convention,
|
||||||
|
// and we maintain the value of f.closed across the copy.
|
||||||
|
return &featureGate{
|
||||||
|
special: specialFeatures,
|
||||||
|
known: knownValue,
|
||||||
|
enabled: enabledValue,
|
||||||
|
closed: f.closed,
|
||||||
|
}
|
||||||
|
}
|
||||||
202
vendor/k8s.io/kubernetes/LICENSE
generated
vendored
Normal file
202
vendor/k8s.io/kubernetes/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
46
vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
load(
|
||||||
|
"@io_bazel_rules_go//go:def.bzl",
|
||||||
|
"go_library",
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"doc.go",
|
||||||
|
"register.go",
|
||||||
|
"types.go",
|
||||||
|
"zz_generated.deepcopy.go",
|
||||||
|
],
|
||||||
|
importpath = "k8s.io/kubernetes/pkg/apis/apps",
|
||||||
|
deps = [
|
||||||
|
"//pkg/apis/autoscaling:go_default_library",
|
||||||
|
"//pkg/apis/core:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [
|
||||||
|
":package-srcs",
|
||||||
|
"//pkg/apis/apps/fuzzer:all-srcs",
|
||||||
|
"//pkg/apis/apps/install:all-srcs",
|
||||||
|
"//pkg/apis/apps/v1:all-srcs",
|
||||||
|
"//pkg/apis/apps/v1beta1:all-srcs",
|
||||||
|
"//pkg/apis/apps/v1beta2:all-srcs",
|
||||||
|
"//pkg/apis/apps/validation:all-srcs",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
)
|
||||||
21
vendor/k8s.io/kubernetes/pkg/apis/apps/OWNERS
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/pkg/apis/apps/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
reviewers:
|
||||||
|
- thockin
|
||||||
|
- lavalamp
|
||||||
|
- smarterclayton
|
||||||
|
- deads2k
|
||||||
|
- caesarxuchao
|
||||||
|
- pmorie
|
||||||
|
- sttts
|
||||||
|
- saad-ali
|
||||||
|
- ncdc
|
||||||
|
- tallclair
|
||||||
|
- dims
|
||||||
|
- errordeveloper
|
||||||
|
- mml
|
||||||
|
- m1093782566
|
||||||
|
- mbohlool
|
||||||
|
- david-mcmahon
|
||||||
|
- kevin-wangzefeng
|
||||||
|
- jianhuiz
|
||||||
|
labels:
|
||||||
|
- sig/apps
|
||||||
19
vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen=package
|
||||||
|
|
||||||
|
package apps // import "k8s.io/kubernetes/pkg/apis/apps"
|
||||||
64
vendor/k8s.io/kubernetes/pkg/apis/apps/register.go
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/pkg/apis/apps/register.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package apps
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||||
|
AddToScheme = SchemeBuilder.AddToScheme
|
||||||
|
)
|
||||||
|
|
||||||
|
// GroupName is the group name use in this package
|
||||||
|
const GroupName = "apps"
|
||||||
|
|
||||||
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||||
|
|
||||||
|
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||||
|
func Kind(kind string) schema.GroupKind {
|
||||||
|
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||||
|
func Resource(resource string) schema.GroupResource {
|
||||||
|
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds the list of known types to the given scheme.
|
||||||
|
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||||
|
// TODO this will get cleaned up with the scheme types are fixed
|
||||||
|
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||||
|
&DaemonSet{},
|
||||||
|
&DaemonSetList{},
|
||||||
|
&Deployment{},
|
||||||
|
&DeploymentList{},
|
||||||
|
&DeploymentRollback{},
|
||||||
|
&autoscaling.Scale{},
|
||||||
|
&StatefulSet{},
|
||||||
|
&StatefulSetList{},
|
||||||
|
&ControllerRevision{},
|
||||||
|
&ControllerRevisionList{},
|
||||||
|
&ReplicaSet{},
|
||||||
|
&ReplicaSetList{},
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
801
vendor/k8s.io/kubernetes/pkg/apis/apps/types.go
generated
vendored
Normal file
801
vendor/k8s.io/kubernetes/pkg/apis/apps/types.go
generated
vendored
Normal file
@@ -0,0 +1,801 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package apps
|
||||||
|
|
||||||
|
import (
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// +genclient
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// StatefulSet represents a set of pods with consistent identities.
|
||||||
|
// Identities are defined as:
|
||||||
|
// - Network: A single stable DNS and hostname.
|
||||||
|
// - Storage: As many VolumeClaims as requested.
|
||||||
|
// The StatefulSet guarantees that a given network identity will always
|
||||||
|
// map to the same storage identity.
|
||||||
|
type StatefulSet struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// +optional
|
||||||
|
metav1.ObjectMeta
|
||||||
|
|
||||||
|
// Spec defines the desired identities of pods in this set.
|
||||||
|
// +optional
|
||||||
|
Spec StatefulSetSpec
|
||||||
|
|
||||||
|
// Status is the current status of Pods in this StatefulSet. This data
|
||||||
|
// may be out of date by some window of time.
|
||||||
|
// +optional
|
||||||
|
Status StatefulSetStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// PodManagementPolicyType defines the policy for creating pods under a stateful set.
|
||||||
|
type PodManagementPolicyType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// OrderedReadyPodManagement will create pods in strictly increasing order on
|
||||||
|
// scale up and strictly decreasing order on scale down, progressing only when
|
||||||
|
// the previous pod is ready or terminated. At most one pod will be changed
|
||||||
|
// at any time.
|
||||||
|
OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady"
|
||||||
|
// ParallelPodManagement will create and delete pods as soon as the stateful set
|
||||||
|
// replica count is changed, and will not wait for pods to be ready or complete
|
||||||
|
// termination.
|
||||||
|
ParallelPodManagement = "Parallel"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
|
||||||
|
// controller will use to perform updates. It includes any additional parameters
|
||||||
|
// necessary to perform the update for the indicated strategy.
|
||||||
|
type StatefulSetUpdateStrategy struct {
|
||||||
|
// Type indicates the type of the StatefulSetUpdateStrategy.
|
||||||
|
Type StatefulSetUpdateStrategyType
|
||||||
|
// RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
|
||||||
|
RollingUpdate *RollingUpdateStatefulSetStrategy
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatefulSetUpdateStrategyType is a string enumeration type that enumerates
|
||||||
|
// all possible update strategies for the StatefulSet controller.
|
||||||
|
type StatefulSetUpdateStrategyType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RollingUpdateStatefulSetStrategyType indicates that update will be
|
||||||
|
// applied to all Pods in the StatefulSet with respect to the StatefulSet
|
||||||
|
// ordering constraints. When a scale operation is performed with this
|
||||||
|
// strategy, new Pods will be created from the specification version indicated
|
||||||
|
// by the StatefulSet's updateRevision.
|
||||||
|
RollingUpdateStatefulSetStrategyType = "RollingUpdate"
|
||||||
|
// OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version
|
||||||
|
// tracking and ordered rolling restarts are disabled. Pods are recreated
|
||||||
|
// from the StatefulSetSpec when they are manually deleted. When a scale
|
||||||
|
// operation is performed with this strategy,specification version indicated
|
||||||
|
// by the StatefulSet's currentRevision.
|
||||||
|
OnDeleteStatefulSetStrategyType = "OnDelete"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
|
||||||
|
type RollingUpdateStatefulSetStrategy struct {
|
||||||
|
// Partition indicates the ordinal at which the StatefulSet should be
|
||||||
|
// partitioned.
|
||||||
|
Partition int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// A StatefulSetSpec is the specification of a StatefulSet.
|
||||||
|
type StatefulSetSpec struct {
|
||||||
|
// Replicas is the desired number of replicas of the given Template.
|
||||||
|
// These are replicas in the sense that they are instantiations of the
|
||||||
|
// same Template, but individual replicas also have a consistent identity.
|
||||||
|
// If unspecified, defaults to 1.
|
||||||
|
// TODO: Consider a rename of this field.
|
||||||
|
// +optional
|
||||||
|
Replicas int32
|
||||||
|
|
||||||
|
// Selector is a label query over pods that should match the replica count.
|
||||||
|
// If empty, defaulted to labels on the pod template.
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||||
|
// +optional
|
||||||
|
Selector *metav1.LabelSelector
|
||||||
|
|
||||||
|
// Template is the object that describes the pod that will be created if
|
||||||
|
// insufficient replicas are detected. Each pod stamped out by the StatefulSet
|
||||||
|
// will fulfill this Template, but have a unique identity from the rest
|
||||||
|
// of the StatefulSet.
|
||||||
|
Template api.PodTemplateSpec
|
||||||
|
|
||||||
|
// VolumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||||
|
// The StatefulSet controller is responsible for mapping network identities to
|
||||||
|
// claims in a way that maintains the identity of a pod. Every claim in
|
||||||
|
// this list must have at least one matching (by name) volumeMount in one
|
||||||
|
// container in the template. A claim in this list takes precedence over
|
||||||
|
// any volumes in the template, with the same name.
|
||||||
|
// TODO: Define the behavior if a claim already exists with the same name.
|
||||||
|
// +optional
|
||||||
|
VolumeClaimTemplates []api.PersistentVolumeClaim
|
||||||
|
|
||||||
|
// ServiceName is the name of the service that governs this StatefulSet.
|
||||||
|
// This service must exist before the StatefulSet, and is responsible for
|
||||||
|
// the network identity of the set. Pods get DNS/hostnames that follow the
|
||||||
|
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
|
||||||
|
// where "pod-specific-string" is managed by the StatefulSet controller.
|
||||||
|
ServiceName string
|
||||||
|
|
||||||
|
// PodManagementPolicy controls how pods are created during initial scale up,
|
||||||
|
// when replacing pods on nodes, or when scaling down. The default policy is
|
||||||
|
// `OrderedReady`, where pods are created in increasing order (pod-0, then
|
||||||
|
// pod-1, etc) and the controller will wait until each pod is ready before
|
||||||
|
// continuing. When scaling down, the pods are removed in the opposite order.
|
||||||
|
// The alternative policy is `Parallel` which will create pods in parallel
|
||||||
|
// to match the desired scale without waiting, and on scale down will delete
|
||||||
|
// all pods at once.
|
||||||
|
// +optional
|
||||||
|
PodManagementPolicy PodManagementPolicyType
|
||||||
|
|
||||||
|
// updateStrategy indicates the StatefulSetUpdateStrategy that will be
|
||||||
|
// employed to update Pods in the StatefulSet when a revision is made to
|
||||||
|
// Template.
|
||||||
|
UpdateStrategy StatefulSetUpdateStrategy
|
||||||
|
|
||||||
|
// revisionHistoryLimit is the maximum number of revisions that will
|
||||||
|
// be maintained in the StatefulSet's revision history. The revision history
|
||||||
|
// consists of all revisions not represented by a currently applied
|
||||||
|
// StatefulSetSpec version. The default value is 10.
|
||||||
|
RevisionHistoryLimit *int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatefulSetStatus represents the current state of a StatefulSet.
|
||||||
|
type StatefulSetStatus struct {
|
||||||
|
// observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
|
||||||
|
// StatefulSet's generation, which is updated on mutation by the API Server.
|
||||||
|
// +optional
|
||||||
|
ObservedGeneration *int64
|
||||||
|
|
||||||
|
// replicas is the number of Pods created by the StatefulSet controller.
|
||||||
|
Replicas int32
|
||||||
|
|
||||||
|
// readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
|
||||||
|
ReadyReplicas int32
|
||||||
|
|
||||||
|
// currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
|
||||||
|
// indicated by currentRevision.
|
||||||
|
CurrentReplicas int32
|
||||||
|
|
||||||
|
// updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
|
||||||
|
// indicated by updateRevision.
|
||||||
|
UpdatedReplicas int32
|
||||||
|
|
||||||
|
// currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
|
||||||
|
// sequence [0,currentReplicas).
|
||||||
|
CurrentRevision string
|
||||||
|
|
||||||
|
// updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
|
||||||
|
// [replicas-updatedReplicas,replicas)
|
||||||
|
UpdateRevision string
|
||||||
|
|
||||||
|
// collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
|
||||||
|
// uses this field as a collision avoidance mechanism when it needs to create the name for the
|
||||||
|
// newest ControllerRevision.
|
||||||
|
// +optional
|
||||||
|
CollisionCount *int32
|
||||||
|
|
||||||
|
// Represents the latest available observations of a statefulset's current state.
|
||||||
|
Conditions []StatefulSetCondition
|
||||||
|
}
|
||||||
|
|
||||||
|
type StatefulSetConditionType string
|
||||||
|
|
||||||
|
// TODO: Add valid condition types for Statefulsets.
|
||||||
|
|
||||||
|
// StatefulSetCondition describes the state of a statefulset at a certain point.
|
||||||
|
type StatefulSetCondition struct {
|
||||||
|
// Type of statefulset condition.
|
||||||
|
Type StatefulSetConditionType
|
||||||
|
// Status of the condition, one of True, False, Unknown.
|
||||||
|
Status api.ConditionStatus
|
||||||
|
// The last time this condition was updated.
|
||||||
|
LastTransitionTime metav1.Time
|
||||||
|
// The reason for the condition's last transition.
|
||||||
|
Reason string
|
||||||
|
// A human readable message indicating details about the transition.
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// StatefulSetList is a collection of StatefulSets.
|
||||||
|
type StatefulSetList struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// +optional
|
||||||
|
metav1.ListMeta
|
||||||
|
Items []StatefulSet
|
||||||
|
}
|
||||||
|
|
||||||
|
// +genclient
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// ControllerRevision implements an immutable snapshot of state data. Clients
|
||||||
|
// are responsible for serializing and deserializing the objects that contain
|
||||||
|
// their internal state.
|
||||||
|
// Once a ControllerRevision has been successfully created, it can not be updated.
|
||||||
|
// The API Server will fail validation of all requests that attempt to mutate
|
||||||
|
// the Data field. ControllerRevisions may, however, be deleted.
|
||||||
|
type ControllerRevision struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// Standard object's metadata.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||||
|
// +optional
|
||||||
|
metav1.ObjectMeta
|
||||||
|
|
||||||
|
// Data is the Object representing the state.
|
||||||
|
Data runtime.Object
|
||||||
|
|
||||||
|
// Revision indicates the revision of the state represented by Data.
|
||||||
|
Revision int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
|
||||||
|
type ControllerRevisionList struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// +optional
|
||||||
|
metav1.ListMeta
|
||||||
|
|
||||||
|
// Items is the list of ControllerRevision objects.
|
||||||
|
Items []ControllerRevision
|
||||||
|
}
|
||||||
|
|
||||||
|
// +genclient
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
type Deployment struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// +optional
|
||||||
|
metav1.ObjectMeta
|
||||||
|
|
||||||
|
// Specification of the desired behavior of the Deployment.
|
||||||
|
// +optional
|
||||||
|
Spec DeploymentSpec
|
||||||
|
|
||||||
|
// Most recently observed status of the Deployment.
|
||||||
|
// +optional
|
||||||
|
Status DeploymentStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeploymentSpec struct {
|
||||||
|
// Number of desired pods. This is a pointer to distinguish between explicit
|
||||||
|
// zero and not specified. Defaults to 1.
|
||||||
|
// +optional
|
||||||
|
Replicas int32
|
||||||
|
|
||||||
|
// Label selector for pods. Existing ReplicaSets whose pods are
|
||||||
|
// selected by this will be the ones affected by this deployment.
|
||||||
|
// +optional
|
||||||
|
Selector *metav1.LabelSelector
|
||||||
|
|
||||||
|
// Template describes the pods that will be created.
|
||||||
|
Template api.PodTemplateSpec
|
||||||
|
|
||||||
|
// The deployment strategy to use to replace existing pods with new ones.
|
||||||
|
// +optional
|
||||||
|
Strategy DeploymentStrategy
|
||||||
|
|
||||||
|
// Minimum number of seconds for which a newly created pod should be ready
|
||||||
|
// without any of its container crashing, for it to be considered available.
|
||||||
|
// Defaults to 0 (pod will be considered available as soon as it is ready)
|
||||||
|
// +optional
|
||||||
|
MinReadySeconds int32
|
||||||
|
|
||||||
|
// The number of old ReplicaSets to retain to allow rollback.
|
||||||
|
// This is a pointer to distinguish between explicit zero and not specified.
|
||||||
|
// This is set to the max value of int32 (i.e. 2147483647) by default, which means
|
||||||
|
// "retaining all old ReplicaSets".
|
||||||
|
// +optional
|
||||||
|
RevisionHistoryLimit *int32
|
||||||
|
|
||||||
|
// Indicates that the deployment is paused and will not be processed by the
|
||||||
|
// deployment controller.
|
||||||
|
// +optional
|
||||||
|
Paused bool
|
||||||
|
|
||||||
|
// DEPRECATED.
|
||||||
|
// The config this deployment is rolling back to. Will be cleared after rollback is done.
|
||||||
|
// +optional
|
||||||
|
RollbackTo *RollbackConfig
|
||||||
|
|
||||||
|
// The maximum time in seconds for a deployment to make progress before it
|
||||||
|
// is considered to be failed. The deployment controller will continue to
|
||||||
|
// process failed deployments and a condition with a ProgressDeadlineExceeded
|
||||||
|
// reason will be surfaced in the deployment status. Note that progress will
|
||||||
|
// not be estimated during the time a deployment is paused. This is set to
|
||||||
|
// the max value of int32 (i.e. 2147483647) by default, which means "no deadline".
|
||||||
|
// +optional
|
||||||
|
ProgressDeadlineSeconds *int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// DEPRECATED.
|
||||||
|
// DeploymentRollback stores the information required to rollback a deployment.
|
||||||
|
type DeploymentRollback struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// Required: This must match the Name of a deployment.
|
||||||
|
Name string
|
||||||
|
// The annotations to be updated to a deployment
|
||||||
|
// +optional
|
||||||
|
UpdatedAnnotations map[string]string
|
||||||
|
// The config of this deployment rollback.
|
||||||
|
RollbackTo RollbackConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// DEPRECATED.
|
||||||
|
type RollbackConfig struct {
|
||||||
|
// The revision to rollback to. If set to 0, rollback to the last revision.
|
||||||
|
// +optional
|
||||||
|
Revision int64
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
|
||||||
|
// to existing RCs (and label key that is added to its pods) to prevent the existing RCs
|
||||||
|
// to select new pods (and old pods being select by new RC).
|
||||||
|
DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DeploymentStrategy struct {
|
||||||
|
// Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
|
||||||
|
// +optional
|
||||||
|
Type DeploymentStrategyType
|
||||||
|
|
||||||
|
// Rolling update config params. Present only if DeploymentStrategyType =
|
||||||
|
// RollingUpdate.
|
||||||
|
//---
|
||||||
|
// TODO: Update this to follow our convention for oneOf, whatever we decide it
|
||||||
|
// to be.
|
||||||
|
// +optional
|
||||||
|
RollingUpdate *RollingUpdateDeployment
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeploymentStrategyType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Kill all existing pods before creating new ones.
|
||||||
|
RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
|
||||||
|
|
||||||
|
// Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one.
|
||||||
|
RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Spec to control the desired behavior of rolling update.
|
||||||
|
type RollingUpdateDeployment struct {
|
||||||
|
// The maximum number of pods that can be unavailable during the update.
|
||||||
|
// Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%).
|
||||||
|
// Absolute number is calculated from percentage by rounding down.
|
||||||
|
// This can not be 0 if MaxSurge is 0.
|
||||||
|
// By default, a fixed value of 1 is used.
|
||||||
|
// Example: when this is set to 30%, the old RC can be scaled down by 30%
|
||||||
|
// immediately when the rolling update starts. Once new pods are ready, old RC
|
||||||
|
// can be scaled down further, followed by scaling up the new RC, ensuring
|
||||||
|
// that at least 70% of original number of pods are available at all times
|
||||||
|
// during the update.
|
||||||
|
// +optional
|
||||||
|
MaxUnavailable intstr.IntOrString
|
||||||
|
|
||||||
|
// The maximum number of pods that can be scheduled above the original number of
|
||||||
|
// pods.
|
||||||
|
// Value can be an absolute number (ex: 5) or a percentage of total pods at
|
||||||
|
// the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0.
|
||||||
|
// Absolute number is calculated from percentage by rounding up.
|
||||||
|
// By default, a value of 1 is used.
|
||||||
|
// Example: when this is set to 30%, the new RC can be scaled up by 30%
|
||||||
|
// immediately when the rolling update starts. Once old pods have been killed,
|
||||||
|
// new RC can be scaled up further, ensuring that total number of pods running
|
||||||
|
// at any time during the update is atmost 130% of original pods.
|
||||||
|
// +optional
|
||||||
|
MaxSurge intstr.IntOrString
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeploymentStatus struct {
|
||||||
|
// The generation observed by the deployment controller.
|
||||||
|
// +optional
|
||||||
|
ObservedGeneration int64
|
||||||
|
|
||||||
|
// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
|
||||||
|
// +optional
|
||||||
|
Replicas int32
|
||||||
|
|
||||||
|
// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
|
||||||
|
// +optional
|
||||||
|
UpdatedReplicas int32
|
||||||
|
|
||||||
|
// Total number of ready pods targeted by this deployment.
|
||||||
|
// +optional
|
||||||
|
ReadyReplicas int32
|
||||||
|
|
||||||
|
// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
|
||||||
|
// +optional
|
||||||
|
AvailableReplicas int32
|
||||||
|
|
||||||
|
// Total number of unavailable pods targeted by this deployment. This is the total number of
|
||||||
|
// pods that are still required for the deployment to have 100% available capacity. They may
|
||||||
|
// either be pods that are running but not yet available or pods that still have not been created.
|
||||||
|
// +optional
|
||||||
|
UnavailableReplicas int32
|
||||||
|
|
||||||
|
// Represents the latest available observations of a deployment's current state.
|
||||||
|
Conditions []DeploymentCondition
|
||||||
|
|
||||||
|
// Count of hash collisions for the Deployment. The Deployment controller uses this
|
||||||
|
// field as a collision avoidance mechanism when it needs to create the name for the
|
||||||
|
// newest ReplicaSet.
|
||||||
|
// +optional
|
||||||
|
CollisionCount *int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeploymentConditionType string
|
||||||
|
|
||||||
|
// These are valid conditions of a deployment.
|
||||||
|
const (
|
||||||
|
// Available means the deployment is available, ie. at least the minimum available
|
||||||
|
// replicas required are up and running for at least minReadySeconds.
|
||||||
|
DeploymentAvailable DeploymentConditionType = "Available"
|
||||||
|
// Progressing means the deployment is progressing. Progress for a deployment is
|
||||||
|
// considered when a new replica set is created or adopted, and when new pods scale
|
||||||
|
// up or old pods scale down. Progress is not estimated for paused deployments or
|
||||||
|
// when progressDeadlineSeconds is not specified.
|
||||||
|
DeploymentProgressing DeploymentConditionType = "Progressing"
|
||||||
|
// ReplicaFailure is added in a deployment when one of its pods fails to be created
|
||||||
|
// or deleted.
|
||||||
|
DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeploymentCondition describes the state of a deployment at a certain point.
|
||||||
|
type DeploymentCondition struct {
|
||||||
|
// Type of deployment condition.
|
||||||
|
Type DeploymentConditionType
|
||||||
|
// Status of the condition, one of True, False, Unknown.
|
||||||
|
Status api.ConditionStatus
|
||||||
|
// The last time this condition was updated.
|
||||||
|
LastUpdateTime metav1.Time
|
||||||
|
// Last time the condition transitioned from one status to another.
|
||||||
|
LastTransitionTime metav1.Time
|
||||||
|
// The reason for the condition's last transition.
|
||||||
|
Reason string
|
||||||
|
// A human readable message indicating details about the transition.
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
type DeploymentList struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// +optional
|
||||||
|
metav1.ListMeta
|
||||||
|
|
||||||
|
// Items is the list of deployments.
|
||||||
|
Items []Deployment
|
||||||
|
}
|
||||||
|
|
||||||
|
type DaemonSetUpdateStrategy struct {
|
||||||
|
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
|
||||||
|
// Default is OnDelete.
|
||||||
|
// +optional
|
||||||
|
Type DaemonSetUpdateStrategyType
|
||||||
|
|
||||||
|
// Rolling update config params. Present only if type = "RollingUpdate".
|
||||||
|
//---
|
||||||
|
// TODO: Update this to follow our convention for oneOf, whatever we decide it
|
||||||
|
// to be. Same as Deployment `strategy.rollingUpdate`.
|
||||||
|
// See https://github.com/kubernetes/kubernetes/issues/35345
|
||||||
|
// +optional
|
||||||
|
RollingUpdate *RollingUpdateDaemonSet
|
||||||
|
}
|
||||||
|
|
||||||
|
type DaemonSetUpdateStrategyType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
|
||||||
|
RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
|
||||||
|
|
||||||
|
// Replace the old daemons only when it's killed
|
||||||
|
OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Spec to control the desired behavior of daemon set rolling update.
|
||||||
|
type RollingUpdateDaemonSet struct {
|
||||||
|
// The maximum number of DaemonSet pods that can be unavailable during the
|
||||||
|
// update. Value can be an absolute number (ex: 5) or a percentage of total
|
||||||
|
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
|
||||||
|
// number is calculated from percentage by rounding up.
|
||||||
|
// This cannot be 0.
|
||||||
|
// Default value is 1.
|
||||||
|
// Example: when this is set to 30%, at most 30% of the total number of nodes
|
||||||
|
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
|
||||||
|
// can have their pods stopped for an update at any given
|
||||||
|
// time. The update starts by stopping at most 30% of those DaemonSet pods
|
||||||
|
// and then brings up new DaemonSet pods in their place. Once the new pods
|
||||||
|
// are available, it then proceeds onto other DaemonSet pods, thus ensuring
|
||||||
|
// that at least 70% of original number of DaemonSet pods are available at
|
||||||
|
// all times during the update.
|
||||||
|
// +optional
|
||||||
|
MaxUnavailable intstr.IntOrString
|
||||||
|
}
|
||||||
|
|
||||||
|
// DaemonSetSpec is the specification of a daemon set.
|
||||||
|
type DaemonSetSpec struct {
|
||||||
|
// A label query over pods that are managed by the daemon set.
|
||||||
|
// Must match in order to be controlled.
|
||||||
|
// If empty, defaulted to labels on Pod template.
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||||
|
// +optional
|
||||||
|
Selector *metav1.LabelSelector
|
||||||
|
|
||||||
|
// An object that describes the pod that will be created.
|
||||||
|
// The DaemonSet will create exactly one copy of this pod on every node
|
||||||
|
// that matches the template's node selector (or on every node if no node
|
||||||
|
// selector is specified).
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
|
||||||
|
Template api.PodTemplateSpec
|
||||||
|
|
||||||
|
// An update strategy to replace existing DaemonSet pods with new pods.
|
||||||
|
// +optional
|
||||||
|
UpdateStrategy DaemonSetUpdateStrategy
|
||||||
|
|
||||||
|
// The minimum number of seconds for which a newly created DaemonSet pod should
|
||||||
|
// be ready without any of its container crashing, for it to be considered
|
||||||
|
// available. Defaults to 0 (pod will be considered available as soon as it
|
||||||
|
// is ready).
|
||||||
|
// +optional
|
||||||
|
MinReadySeconds int32
|
||||||
|
|
||||||
|
// DEPRECATED.
|
||||||
|
// A sequence number representing a specific generation of the template.
|
||||||
|
// Populated by the system. It can be set only during the creation.
|
||||||
|
// +optional
|
||||||
|
TemplateGeneration int64
|
||||||
|
|
||||||
|
// The number of old history to retain to allow rollback.
|
||||||
|
// This is a pointer to distinguish between explicit zero and not specified.
|
||||||
|
// Defaults to 10.
|
||||||
|
// +optional
|
||||||
|
RevisionHistoryLimit *int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// DaemonSetStatus represents the current status of a daemon set.
|
||||||
|
type DaemonSetStatus struct {
|
||||||
|
// The number of nodes that are running at least 1
|
||||||
|
// daemon pod and are supposed to run the daemon pod.
|
||||||
|
CurrentNumberScheduled int32
|
||||||
|
|
||||||
|
// The number of nodes that are running the daemon pod, but are
|
||||||
|
// not supposed to run the daemon pod.
|
||||||
|
NumberMisscheduled int32
|
||||||
|
|
||||||
|
// The total number of nodes that should be running the daemon
|
||||||
|
// pod (including nodes correctly running the daemon pod).
|
||||||
|
DesiredNumberScheduled int32
|
||||||
|
|
||||||
|
// The number of nodes that should be running the daemon pod and have one
|
||||||
|
// or more of the daemon pod running and ready.
|
||||||
|
NumberReady int32
|
||||||
|
|
||||||
|
// The most recent generation observed by the daemon set controller.
|
||||||
|
// +optional
|
||||||
|
ObservedGeneration int64
|
||||||
|
|
||||||
|
// The total number of nodes that are running updated daemon pod
|
||||||
|
// +optional
|
||||||
|
UpdatedNumberScheduled int32
|
||||||
|
|
||||||
|
// The number of nodes that should be running the
|
||||||
|
// daemon pod and have one or more of the daemon pod running and
|
||||||
|
// available (ready for at least spec.minReadySeconds)
|
||||||
|
// +optional
|
||||||
|
NumberAvailable int32
|
||||||
|
|
||||||
|
// The number of nodes that should be running the
|
||||||
|
// daemon pod and have none of the daemon pod running and available
|
||||||
|
// (ready for at least spec.minReadySeconds)
|
||||||
|
// +optional
|
||||||
|
NumberUnavailable int32
|
||||||
|
|
||||||
|
// Count of hash collisions for the DaemonSet. The DaemonSet controller
|
||||||
|
// uses this field as a collision avoidance mechanism when it needs to
|
||||||
|
// create the name for the newest ControllerRevision.
|
||||||
|
// +optional
|
||||||
|
CollisionCount *int32
|
||||||
|
|
||||||
|
// Represents the latest available observations of a DaemonSet's current state.
|
||||||
|
Conditions []DaemonSetCondition
|
||||||
|
}
|
||||||
|
|
||||||
|
type DaemonSetConditionType string
|
||||||
|
|
||||||
|
// TODO: Add valid condition types of a DaemonSet.
|
||||||
|
|
||||||
|
// DaemonSetCondition describes the state of a DaemonSet at a certain point.
|
||||||
|
type DaemonSetCondition struct {
|
||||||
|
// Type of DaemonSet condition.
|
||||||
|
Type DaemonSetConditionType
|
||||||
|
// Status of the condition, one of True, False, Unknown.
|
||||||
|
Status api.ConditionStatus
|
||||||
|
// Last time the condition transitioned from one status to another.
|
||||||
|
LastTransitionTime metav1.Time
|
||||||
|
// The reason for the condition's last transition.
|
||||||
|
Reason string
|
||||||
|
// A human readable message indicating details about the transition.
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
// +genclient
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// DaemonSet represents the configuration of a daemon set.
|
||||||
|
type DaemonSet struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// Standard object's metadata.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||||
|
// +optional
|
||||||
|
metav1.ObjectMeta
|
||||||
|
|
||||||
|
// The desired behavior of this daemon set.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||||
|
// +optional
|
||||||
|
Spec DaemonSetSpec
|
||||||
|
|
||||||
|
// The current status of this daemon set. This data may be
|
||||||
|
// out of date by some window of time.
|
||||||
|
// Populated by the system.
|
||||||
|
// Read-only.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||||
|
// +optional
|
||||||
|
Status DaemonSetStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead.
|
||||||
|
// DaemonSetTemplateGenerationKey is the key of the labels that is added
|
||||||
|
// to daemon set pods to distinguish between old and new pod templates
|
||||||
|
// during DaemonSet template update.
|
||||||
|
DaemonSetTemplateGenerationKey string = "pod-template-generation"
|
||||||
|
)
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// DaemonSetList is a collection of daemon sets.
|
||||||
|
type DaemonSetList struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// Standard list metadata.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||||
|
// +optional
|
||||||
|
metav1.ListMeta
|
||||||
|
|
||||||
|
// A list of daemon sets.
|
||||||
|
Items []DaemonSet
|
||||||
|
}
|
||||||
|
|
||||||
|
// +genclient
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
|
||||||
|
type ReplicaSet struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// +optional
|
||||||
|
metav1.ObjectMeta
|
||||||
|
|
||||||
|
// Spec defines the desired behavior of this ReplicaSet.
|
||||||
|
// +optional
|
||||||
|
Spec ReplicaSetSpec
|
||||||
|
|
||||||
|
// Status is the current status of this ReplicaSet. This data may be
|
||||||
|
// out of date by some window of time.
|
||||||
|
// +optional
|
||||||
|
Status ReplicaSetStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// ReplicaSetList is a collection of ReplicaSets.
|
||||||
|
type ReplicaSetList struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// +optional
|
||||||
|
metav1.ListMeta
|
||||||
|
|
||||||
|
Items []ReplicaSet
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplicaSetSpec is the specification of a ReplicaSet.
|
||||||
|
// As the internal representation of a ReplicaSet, it must have
|
||||||
|
// a Template set.
|
||||||
|
type ReplicaSetSpec struct {
|
||||||
|
// Replicas is the number of desired replicas.
|
||||||
|
Replicas int32
|
||||||
|
|
||||||
|
// Minimum number of seconds for which a newly created pod should be ready
|
||||||
|
// without any of its container crashing, for it to be considered available.
|
||||||
|
// Defaults to 0 (pod will be considered available as soon as it is ready)
|
||||||
|
// +optional
|
||||||
|
MinReadySeconds int32
|
||||||
|
|
||||||
|
// Selector is a label query over pods that should match the replica count.
|
||||||
|
// Must match in order to be controlled.
|
||||||
|
// If empty, defaulted to labels on pod template.
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||||
|
// +optional
|
||||||
|
Selector *metav1.LabelSelector
|
||||||
|
|
||||||
|
// Template is the object that describes the pod that will be created if
|
||||||
|
// insufficient replicas are detected.
|
||||||
|
// +optional
|
||||||
|
Template api.PodTemplateSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplicaSetStatus represents the current status of a ReplicaSet.
|
||||||
|
type ReplicaSetStatus struct {
|
||||||
|
// Replicas is the number of actual replicas.
|
||||||
|
Replicas int32
|
||||||
|
|
||||||
|
// The number of pods that have labels matching the labels of the pod template of the replicaset.
|
||||||
|
// +optional
|
||||||
|
FullyLabeledReplicas int32
|
||||||
|
|
||||||
|
// The number of ready replicas for this replica set.
|
||||||
|
// +optional
|
||||||
|
ReadyReplicas int32
|
||||||
|
|
||||||
|
// The number of available replicas (ready for at least minReadySeconds) for this replica set.
|
||||||
|
// +optional
|
||||||
|
AvailableReplicas int32
|
||||||
|
|
||||||
|
// ObservedGeneration is the most recent generation observed by the controller.
|
||||||
|
// +optional
|
||||||
|
ObservedGeneration int64
|
||||||
|
|
||||||
|
// Represents the latest available observations of a replica set's current state.
|
||||||
|
// +optional
|
||||||
|
Conditions []ReplicaSetCondition
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReplicaSetConditionType string
|
||||||
|
|
||||||
|
// These are valid conditions of a replica set.
|
||||||
|
const (
|
||||||
|
// ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created
|
||||||
|
// due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted
|
||||||
|
// due to kubelet being down or finalizers are failing.
|
||||||
|
ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReplicaSetCondition describes the state of a replica set at a certain point.
|
||||||
|
type ReplicaSetCondition struct {
|
||||||
|
// Type of replica set condition.
|
||||||
|
Type ReplicaSetConditionType
|
||||||
|
// Status of the condition, one of True, False, Unknown.
|
||||||
|
Status api.ConditionStatus
|
||||||
|
// The last time the condition transitioned from one status to another.
|
||||||
|
// +optional
|
||||||
|
LastTransitionTime metav1.Time
|
||||||
|
// The reason for the condition's last transition.
|
||||||
|
// +optional
|
||||||
|
Reason string
|
||||||
|
// A human readable message indicating details about the transition.
|
||||||
|
// +optional
|
||||||
|
Message string
|
||||||
|
}
|
||||||
800
vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go
generated
vendored
Normal file
800
vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go
generated
vendored
Normal file
@@ -0,0 +1,800 @@
|
|||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package apps
|
||||||
|
|
||||||
|
import (
|
||||||
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
core "k8s.io/kubernetes/pkg/apis/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
if in.Data != nil {
|
||||||
|
out.Data = in.Data.DeepCopyObject()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision.
|
||||||
|
func (in *ControllerRevision) DeepCopy() *ControllerRevision {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ControllerRevision)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ControllerRevision) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
out.ListMeta = in.ListMeta
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]ControllerRevision, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList.
|
||||||
|
func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ControllerRevisionList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
|
||||||
|
func (in *DaemonSet) DeepCopy() *DaemonSet {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DaemonSet)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *DaemonSet) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) {
|
||||||
|
*out = *in
|
||||||
|
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition.
|
||||||
|
func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DaemonSetCondition)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
out.ListMeta = in.ListMeta
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]DaemonSet, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
|
||||||
|
func (in *DaemonSetList) DeepCopy() *DaemonSetList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DaemonSetList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Selector != nil {
|
||||||
|
in, out := &in.Selector, &out.Selector
|
||||||
|
*out = new(v1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
in.Template.DeepCopyInto(&out.Template)
|
||||||
|
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
|
||||||
|
if in.RevisionHistoryLimit != nil {
|
||||||
|
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
|
||||||
|
func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DaemonSetSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.CollisionCount != nil {
|
||||||
|
in, out := &in.CollisionCount, &out.CollisionCount
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Conditions != nil {
|
||||||
|
in, out := &in.Conditions, &out.Conditions
|
||||||
|
*out = make([]DaemonSetCondition, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
|
||||||
|
func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DaemonSetStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
|
||||||
|
*out = *in
|
||||||
|
if in.RollingUpdate != nil {
|
||||||
|
in, out := &in.RollingUpdate, &out.RollingUpdate
|
||||||
|
*out = new(RollingUpdateDaemonSet)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
|
||||||
|
func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DaemonSetUpdateStrategy)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *Deployment) DeepCopyInto(out *Deployment) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
|
||||||
|
func (in *Deployment) DeepCopy() *Deployment {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Deployment)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *Deployment) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
|
||||||
|
*out = *in
|
||||||
|
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
|
||||||
|
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
|
||||||
|
func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DeploymentCondition)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
out.ListMeta = in.ListMeta
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]Deployment, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
|
||||||
|
func (in *DeploymentList) DeepCopy() *DeploymentList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DeploymentList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *DeploymentList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
if in.UpdatedAnnotations != nil {
|
||||||
|
in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out.RollbackTo = in.RollbackTo
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback.
|
||||||
|
func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DeploymentRollback)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Selector != nil {
|
||||||
|
in, out := &in.Selector, &out.Selector
|
||||||
|
*out = new(v1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
in.Template.DeepCopyInto(&out.Template)
|
||||||
|
in.Strategy.DeepCopyInto(&out.Strategy)
|
||||||
|
if in.RevisionHistoryLimit != nil {
|
||||||
|
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.RollbackTo != nil {
|
||||||
|
in, out := &in.RollbackTo, &out.RollbackTo
|
||||||
|
*out = new(RollbackConfig)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ProgressDeadlineSeconds != nil {
|
||||||
|
in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
|
||||||
|
func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DeploymentSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.Conditions != nil {
|
||||||
|
in, out := &in.Conditions, &out.Conditions
|
||||||
|
*out = make([]DeploymentCondition, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.CollisionCount != nil {
|
||||||
|
in, out := &in.CollisionCount, &out.CollisionCount
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
|
||||||
|
func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DeploymentStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
|
||||||
|
*out = *in
|
||||||
|
if in.RollingUpdate != nil {
|
||||||
|
in, out := &in.RollingUpdate, &out.RollingUpdate
|
||||||
|
*out = new(RollingUpdateDeployment)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
|
||||||
|
func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DeploymentStrategy)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet.
|
||||||
|
func (in *ReplicaSet) DeepCopy() *ReplicaSet {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ReplicaSet)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ReplicaSet) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) {
|
||||||
|
*out = *in
|
||||||
|
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition.
|
||||||
|
func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ReplicaSetCondition)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
out.ListMeta = in.ListMeta
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]ReplicaSet, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList.
|
||||||
|
func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ReplicaSetList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Selector != nil {
|
||||||
|
in, out := &in.Selector, &out.Selector
|
||||||
|
*out = new(v1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
in.Template.DeepCopyInto(&out.Template)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec.
|
||||||
|
func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ReplicaSetSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.Conditions != nil {
|
||||||
|
in, out := &in.Conditions, &out.Conditions
|
||||||
|
*out = make([]ReplicaSetCondition, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus.
|
||||||
|
func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ReplicaSetStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig.
|
||||||
|
func (in *RollbackConfig) DeepCopy() *RollbackConfig {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RollbackConfig)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
|
||||||
|
*out = *in
|
||||||
|
out.MaxUnavailable = in.MaxUnavailable
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
|
||||||
|
func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RollingUpdateDaemonSet)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
|
||||||
|
*out = *in
|
||||||
|
out.MaxUnavailable = in.MaxUnavailable
|
||||||
|
out.MaxSurge = in.MaxSurge
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
|
||||||
|
func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RollingUpdateDeployment)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy.
|
||||||
|
func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RollingUpdateStatefulSetStrategy)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet.
|
||||||
|
func (in *StatefulSet) DeepCopy() *StatefulSet {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(StatefulSet)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *StatefulSet) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) {
|
||||||
|
*out = *in
|
||||||
|
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition.
|
||||||
|
func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(StatefulSetCondition)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
out.ListMeta = in.ListMeta
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]StatefulSet, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList.
|
||||||
|
func (in *StatefulSetList) DeepCopy() *StatefulSetList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(StatefulSetList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *StatefulSetList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Selector != nil {
|
||||||
|
in, out := &in.Selector, &out.Selector
|
||||||
|
*out = new(v1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
in.Template.DeepCopyInto(&out.Template)
|
||||||
|
if in.VolumeClaimTemplates != nil {
|
||||||
|
in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
|
||||||
|
*out = make([]core.PersistentVolumeClaim, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
|
||||||
|
if in.RevisionHistoryLimit != nil {
|
||||||
|
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
|
||||||
|
func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(StatefulSetSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.ObservedGeneration != nil {
|
||||||
|
in, out := &in.ObservedGeneration, &out.ObservedGeneration
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.CollisionCount != nil {
|
||||||
|
in, out := &in.CollisionCount, &out.CollisionCount
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Conditions != nil {
|
||||||
|
in, out := &in.Conditions, &out.Conditions
|
||||||
|
*out = make([]StatefulSetCondition, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
|
||||||
|
func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(StatefulSetStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) {
|
||||||
|
*out = *in
|
||||||
|
if in.RollingUpdate != nil {
|
||||||
|
in, out := &in.RollingUpdate, &out.RollingUpdate
|
||||||
|
*out = new(RollingUpdateStatefulSetStrategy)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy.
|
||||||
|
func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(StatefulSetUpdateStrategy)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
46
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
load(
|
||||||
|
"@io_bazel_rules_go//go:def.bzl",
|
||||||
|
"go_library",
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"annotations.go",
|
||||||
|
"doc.go",
|
||||||
|
"register.go",
|
||||||
|
"types.go",
|
||||||
|
"zz_generated.deepcopy.go",
|
||||||
|
],
|
||||||
|
importpath = "k8s.io/kubernetes/pkg/apis/autoscaling",
|
||||||
|
deps = [
|
||||||
|
"//pkg/apis/core:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [
|
||||||
|
":package-srcs",
|
||||||
|
"//pkg/apis/autoscaling/fuzzer:all-srcs",
|
||||||
|
"//pkg/apis/autoscaling/install:all-srcs",
|
||||||
|
"//pkg/apis/autoscaling/v1:all-srcs",
|
||||||
|
"//pkg/apis/autoscaling/v2beta1:all-srcs",
|
||||||
|
"//pkg/apis/autoscaling/v2beta2:all-srcs",
|
||||||
|
"//pkg/apis/autoscaling/validation:all-srcs",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
)
|
||||||
19
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
reviewers:
|
||||||
|
- thockin
|
||||||
|
- lavalamp
|
||||||
|
- smarterclayton
|
||||||
|
- wojtek-t
|
||||||
|
- deads2k
|
||||||
|
- caesarxuchao
|
||||||
|
- erictune
|
||||||
|
- sttts
|
||||||
|
- ncdc
|
||||||
|
- piosz
|
||||||
|
- dims
|
||||||
|
- errordeveloper
|
||||||
|
- madhusudancs
|
||||||
|
- mml
|
||||||
|
- mbohlool
|
||||||
|
- david-mcmahon
|
||||||
|
- jianhuiz
|
||||||
|
- directxman12
|
||||||
34
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package autoscaling
|
||||||
|
|
||||||
|
// MetricSpecsAnnotation is the annotation which holds non-CPU-utilization HPA metric
|
||||||
|
// specs when converting the `Metrics` field from autoscaling/v2beta1
|
||||||
|
const MetricSpecsAnnotation = "autoscaling.alpha.kubernetes.io/metrics"
|
||||||
|
|
||||||
|
// MetricStatusesAnnotation is the annotation which holds non-CPU-utilization HPA metric
|
||||||
|
// statuses when converting the `CurrentMetrics` field from autoscaling/v2beta1
|
||||||
|
const MetricStatusesAnnotation = "autoscaling.alpha.kubernetes.io/current-metrics"
|
||||||
|
|
||||||
|
// HorizontalPodAutoscalerConditionsAnnotation is the annotation which holds the conditions
|
||||||
|
// of an HPA when converting the `Conditions` field from autoscaling/v2beta1
|
||||||
|
const HorizontalPodAutoscalerConditionsAnnotation = "autoscaling.alpha.kubernetes.io/conditions"
|
||||||
|
|
||||||
|
// DefaultCPUUtilization is the default value for CPU utilization, provided no other
|
||||||
|
// metrics are present. This is here because it's used by both the v2beta1 defaulting
|
||||||
|
// logic, and the pseudo-defaulting done in v1 conversion.
|
||||||
|
const DefaultCPUUtilization = 80
|
||||||
19
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen=package
|
||||||
|
|
||||||
|
package autoscaling // import "k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||||
53
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package autoscaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GroupName is the group name use in this package
|
||||||
|
const GroupName = "autoscaling"
|
||||||
|
|
||||||
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||||
|
|
||||||
|
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||||
|
func Kind(kind string) schema.GroupKind {
|
||||||
|
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||||
|
func Resource(resource string) schema.GroupResource {
|
||||||
|
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||||
|
AddToScheme = SchemeBuilder.AddToScheme
|
||||||
|
)
|
||||||
|
|
||||||
|
// Adds the list of known types to the given scheme.
|
||||||
|
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||||
|
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||||
|
&Scale{},
|
||||||
|
&HorizontalPodAutoscaler{},
|
||||||
|
&HorizontalPodAutoscalerList{},
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
416
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go
generated
vendored
Normal file
416
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go
generated
vendored
Normal file
@@ -0,0 +1,416 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package autoscaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// Scale represents a scaling request for a resource.
|
||||||
|
type Scale struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
|
||||||
|
// +optional
|
||||||
|
metav1.ObjectMeta
|
||||||
|
|
||||||
|
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
|
||||||
|
// +optional
|
||||||
|
Spec ScaleSpec
|
||||||
|
|
||||||
|
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
|
||||||
|
// +optional
|
||||||
|
Status ScaleStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScaleSpec describes the attributes of a scale subresource.
|
||||||
|
type ScaleSpec struct {
|
||||||
|
// desired number of instances for the scaled object.
|
||||||
|
// +optional
|
||||||
|
Replicas int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScaleStatus represents the current status of a scale subresource.
|
||||||
|
type ScaleStatus struct {
|
||||||
|
// actual number of observed instances of the scaled object.
|
||||||
|
Replicas int32
|
||||||
|
|
||||||
|
// label query over pods that should match the replicas count. This is same
|
||||||
|
// as the label selector but in the string format to avoid introspection
|
||||||
|
// by clients. The string will be in the same format as the query-param syntax.
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||||
|
// +optional
|
||||||
|
Selector string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CrossVersionObjectReference contains enough information to let you identify the referred resource.
|
||||||
|
type CrossVersionObjectReference struct {
|
||||||
|
// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
|
||||||
|
Kind string
|
||||||
|
// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
|
||||||
|
Name string
|
||||||
|
// API version of the referent
|
||||||
|
// +optional
|
||||||
|
APIVersion string
|
||||||
|
}
|
||||||
|
|
||||||
|
// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
|
||||||
|
type HorizontalPodAutoscalerSpec struct {
|
||||||
|
// ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
|
||||||
|
// should be collected, as well as to actually change the replica count.
|
||||||
|
ScaleTargetRef CrossVersionObjectReference
|
||||||
|
// MinReplicas is the lower limit for the number of replicas to which the autoscaler can scale down.
|
||||||
|
// It defaults to 1 pod.
|
||||||
|
// +optional
|
||||||
|
MinReplicas *int32
|
||||||
|
// MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
|
||||||
|
// It cannot be less that minReplicas.
|
||||||
|
MaxReplicas int32
|
||||||
|
// Metrics contains the specifications for which to use to calculate the
|
||||||
|
// desired replica count (the maximum replica count across all metrics will
|
||||||
|
// be used). The desired replica count is calculated multiplying the
|
||||||
|
// ratio between the target value and the current value by the current
|
||||||
|
// number of pods. Ergo, metrics used must decrease as the pod count is
|
||||||
|
// increased, and vice-versa. See the individual metric source types for
|
||||||
|
// more information about how each type of metric must respond.
|
||||||
|
// +optional
|
||||||
|
Metrics []MetricSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricSourceType indicates the type of metric.
|
||||||
|
type MetricSourceType string
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ObjectMetricSourceType is a metric describing a kubernetes object
|
||||||
|
// (for example, hits-per-second on an Ingress object).
|
||||||
|
ObjectMetricSourceType MetricSourceType = "Object"
|
||||||
|
// PodsMetricSourceType is a metric describing each pod in the current scale
|
||||||
|
// target (for example, transactions-processed-per-second). The values
|
||||||
|
// will be averaged together before being compared to the target value.
|
||||||
|
PodsMetricSourceType MetricSourceType = "Pods"
|
||||||
|
// ResourceMetricSourceType is a resource metric known to Kubernetes, as
|
||||||
|
// specified in requests and limits, describing each pod in the current
|
||||||
|
// scale target (e.g. CPU or memory). Such metrics are built in to
|
||||||
|
// Kubernetes, and have special scaling options on top of those available
|
||||||
|
// to normal per-pod metrics (the "pods" source).
|
||||||
|
ResourceMetricSourceType MetricSourceType = "Resource"
|
||||||
|
// ExternalMetricSourceType is a global metric that is not associated
|
||||||
|
// with any Kubernetes object. It allows autoscaling based on information
|
||||||
|
// coming from components running outside of cluster
|
||||||
|
// (for example length of queue in cloud messaging service, or
|
||||||
|
// QPS from loadbalancer running outside of cluster).
|
||||||
|
ExternalMetricSourceType MetricSourceType = "External"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetricSpec specifies how to scale based on a single metric
|
||||||
|
// (only `type` and one other matching field should be set at once).
|
||||||
|
type MetricSpec struct {
|
||||||
|
// Type is the type of metric source. It should be one of "Object",
|
||||||
|
// "Pods" or "Resource", each mapping to a matching field in the object.
|
||||||
|
Type MetricSourceType
|
||||||
|
|
||||||
|
// Object refers to a metric describing a single kubernetes object
|
||||||
|
// (for example, hits-per-second on an Ingress object).
|
||||||
|
// +optional
|
||||||
|
Object *ObjectMetricSource
|
||||||
|
// Pods refers to a metric describing each pod in the current scale target
|
||||||
|
// (for example, transactions-processed-per-second). The values will be
|
||||||
|
// averaged together before being compared to the target value.
|
||||||
|
// +optional
|
||||||
|
Pods *PodsMetricSource
|
||||||
|
// Resource refers to a resource metric (such as those specified in
|
||||||
|
// requests and limits) known to Kubernetes describing each pod in the
|
||||||
|
// current scale target (e.g. CPU or memory). Such metrics are built in to
|
||||||
|
// Kubernetes, and have special scaling options on top of those available
|
||||||
|
// to normal per-pod metrics using the "pods" source.
|
||||||
|
// +optional
|
||||||
|
Resource *ResourceMetricSource
|
||||||
|
// External refers to a global metric that is not associated
|
||||||
|
// with any Kubernetes object. It allows autoscaling based on information
|
||||||
|
// coming from components running outside of cluster
|
||||||
|
// (for example length of queue in cloud messaging service, or
|
||||||
|
// QPS from loadbalancer running outside of cluster).
|
||||||
|
// +optional
|
||||||
|
External *ExternalMetricSource
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectMetricSource indicates how to scale on a metric describing a
|
||||||
|
// kubernetes object (for example, hits-per-second on an Ingress object).
|
||||||
|
type ObjectMetricSource struct {
|
||||||
|
DescribedObject CrossVersionObjectReference
|
||||||
|
Target MetricTarget
|
||||||
|
Metric MetricIdentifier
|
||||||
|
}
|
||||||
|
|
||||||
|
// PodsMetricSource indicates how to scale on a metric describing each pod in
|
||||||
|
// the current scale target (for example, transactions-processed-per-second).
|
||||||
|
// The values will be averaged together before being compared to the target
|
||||||
|
// value.
|
||||||
|
type PodsMetricSource struct {
|
||||||
|
// metric identifies the target metric by name and selector
|
||||||
|
Metric MetricIdentifier
|
||||||
|
// target specifies the target value for the given metric
|
||||||
|
Target MetricTarget
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceMetricSource indicates how to scale on a resource metric known to
|
||||||
|
// Kubernetes, as specified in requests and limits, describing each pod in the
|
||||||
|
// current scale target (e.g. CPU or memory). The values will be averaged
|
||||||
|
// together before being compared to the target. Such metrics are built in to
|
||||||
|
// Kubernetes, and have special scaling options on top of those available to
|
||||||
|
// normal per-pod metrics using the "pods" source. Only one "target" type
|
||||||
|
// should be set.
|
||||||
|
type ResourceMetricSource struct {
|
||||||
|
// Name is the name of the resource in question.
|
||||||
|
Name api.ResourceName
|
||||||
|
// Target specifies the target value for the given metric
|
||||||
|
Target MetricTarget
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExternalMetricSource indicates how to scale on a metric not associated with
|
||||||
|
// any Kubernetes object (for example length of queue in cloud
|
||||||
|
// messaging service, or QPS from loadbalancer running outside of cluster).
|
||||||
|
type ExternalMetricSource struct {
|
||||||
|
// Metric identifies the target metric by name and selector
|
||||||
|
Metric MetricIdentifier
|
||||||
|
// Target specifies the target value for the given metric
|
||||||
|
Target MetricTarget
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricIdentifier defines the name and optionally selector for a metric
|
||||||
|
type MetricIdentifier struct {
|
||||||
|
// Name is the name of the given metric
|
||||||
|
Name string
|
||||||
|
// Selector is the selector for the given metric
|
||||||
|
// it is the string-encoded form of a standard kubernetes label selector
|
||||||
|
// +optional
|
||||||
|
Selector *metav1.LabelSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricTarget defines the target value, average value, or average utilization of a specific metric
|
||||||
|
type MetricTarget struct {
|
||||||
|
// Type represents whether the metric type is Utilization, Value, or AverageValue
|
||||||
|
Type MetricTargetType
|
||||||
|
// Value is the target value of the metric (as a quantity).
|
||||||
|
Value *resource.Quantity
|
||||||
|
// TargetAverageValue is the target value of the average of the
|
||||||
|
// metric across all relevant pods (as a quantity)
|
||||||
|
AverageValue *resource.Quantity
|
||||||
|
|
||||||
|
// AverageUtilization is the target value of the average of the
|
||||||
|
// resource metric across all relevant pods, represented as a percentage of
|
||||||
|
// the requested value of the resource for the pods.
|
||||||
|
// Currently only valid for Resource metric source type
|
||||||
|
AverageUtilization *int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricTargetType specifies the type of metric being targeted, and should be either
|
||||||
|
// "Value", "AverageValue", or "Utilization"
|
||||||
|
type MetricTargetType string
|
||||||
|
|
||||||
|
var (
|
||||||
|
UtilizationMetricType MetricTargetType = "Utilization"
|
||||||
|
ValueMetricType MetricTargetType = "Value"
|
||||||
|
AverageValueMetricType MetricTargetType = "AverageValue"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
|
||||||
|
type HorizontalPodAutoscalerStatus struct {
|
||||||
|
// ObservedGeneration is the most recent generation observed by this autoscaler.
|
||||||
|
// +optional
|
||||||
|
ObservedGeneration *int64
|
||||||
|
|
||||||
|
// LastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
|
||||||
|
// used by the autoscaler to control how often the number of pods is changed.
|
||||||
|
// +optional
|
||||||
|
LastScaleTime *metav1.Time
|
||||||
|
|
||||||
|
// CurrentReplicas is current number of replicas of pods managed by this autoscaler,
|
||||||
|
// as last seen by the autoscaler.
|
||||||
|
CurrentReplicas int32
|
||||||
|
|
||||||
|
// DesiredReplicas is the desired number of replicas of pods managed by this autoscaler,
|
||||||
|
// as last calculated by the autoscaler.
|
||||||
|
DesiredReplicas int32
|
||||||
|
|
||||||
|
// CurrentMetrics is the last read state of the metrics used by this autoscaler.
|
||||||
|
// +optional
|
||||||
|
CurrentMetrics []MetricStatus
|
||||||
|
|
||||||
|
// Conditions is the set of conditions required for this autoscaler to scale its target,
|
||||||
|
// and indicates whether or not those conditions are met.
|
||||||
|
Conditions []HorizontalPodAutoscalerCondition
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConditionStatus indicates the status of a condition (true, false, or unknown).
|
||||||
|
type ConditionStatus string
|
||||||
|
|
||||||
|
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition;
|
||||||
|
// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes
|
||||||
|
// can't decide if a resource is in the condition or not. In the future, we could add other
|
||||||
|
// intermediate conditions, e.g. ConditionDegraded.
|
||||||
|
const (
|
||||||
|
ConditionTrue ConditionStatus = "True"
|
||||||
|
ConditionFalse ConditionStatus = "False"
|
||||||
|
ConditionUnknown ConditionStatus = "Unknown"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HorizontalPodAutoscalerConditionType are the valid conditions of
|
||||||
|
// a HorizontalPodAutoscaler.
|
||||||
|
type HorizontalPodAutoscalerConditionType string
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ScalingActive indicates that the HPA controller is able to scale if necessary:
|
||||||
|
// it's correctly configured, can fetch the desired metrics, and isn't disabled.
|
||||||
|
ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"
|
||||||
|
// AbleToScale indicates a lack of transient issues which prevent scaling from occurring,
|
||||||
|
// such as being in a backoff window, or being unable to access/update the target scale.
|
||||||
|
AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale"
|
||||||
|
// ScalingLimited indicates that the calculated scale based on metrics would be above or
|
||||||
|
// below the range for the HPA, and has thus been capped.
|
||||||
|
ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HorizontalPodAutoscalerCondition describes the state of
|
||||||
|
// a HorizontalPodAutoscaler at a certain point.
|
||||||
|
type HorizontalPodAutoscalerCondition struct {
|
||||||
|
// Type describes the current condition
|
||||||
|
Type HorizontalPodAutoscalerConditionType
|
||||||
|
// Status is the status of the condition (True, False, Unknown)
|
||||||
|
Status ConditionStatus
|
||||||
|
// LastTransitionTime is the last time the condition transitioned from
|
||||||
|
// one status to another
|
||||||
|
// +optional
|
||||||
|
LastTransitionTime metav1.Time
|
||||||
|
// Reason is the reason for the condition's last transition.
|
||||||
|
// +optional
|
||||||
|
Reason string
|
||||||
|
// Message is a human-readable explanation containing details about
|
||||||
|
// the transition
|
||||||
|
// +optional
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricStatus describes the last-read state of a single metric.
|
||||||
|
type MetricStatus struct {
|
||||||
|
// Type is the type of metric source. It will be one of "Object",
|
||||||
|
// "Pods" or "Resource", each corresponds to a matching field in the object.
|
||||||
|
Type MetricSourceType
|
||||||
|
|
||||||
|
// Object refers to a metric describing a single kubernetes object
|
||||||
|
// (for example, hits-per-second on an Ingress object).
|
||||||
|
// +optional
|
||||||
|
Object *ObjectMetricStatus
|
||||||
|
// Pods refers to a metric describing each pod in the current scale target
|
||||||
|
// (for example, transactions-processed-per-second). The values will be
|
||||||
|
// averaged together before being compared to the target value.
|
||||||
|
// +optional
|
||||||
|
Pods *PodsMetricStatus
|
||||||
|
// Resource refers to a resource metric (such as those specified in
|
||||||
|
// requests and limits) known to Kubernetes describing each pod in the
|
||||||
|
// current scale target (e.g. CPU or memory). Such metrics are built in to
|
||||||
|
// Kubernetes, and have special scaling options on top of those available
|
||||||
|
// to normal per-pod metrics using the "pods" source.
|
||||||
|
// +optional
|
||||||
|
Resource *ResourceMetricStatus
|
||||||
|
// External refers to a global metric that is not associated
|
||||||
|
// with any Kubernetes object. It allows autoscaling based on information
|
||||||
|
// coming from components running outside of cluster
|
||||||
|
// (for example length of queue in cloud messaging service, or
|
||||||
|
// QPS from loadbalancer running outside of cluster).
|
||||||
|
// +optional
|
||||||
|
External *ExternalMetricStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectMetricStatus indicates the current value of a metric describing a
|
||||||
|
// kubernetes object (for example, hits-per-second on an Ingress object).
|
||||||
|
type ObjectMetricStatus struct {
|
||||||
|
Metric MetricIdentifier
|
||||||
|
Current MetricValueStatus
|
||||||
|
|
||||||
|
DescribedObject CrossVersionObjectReference
|
||||||
|
}
|
||||||
|
|
||||||
|
// PodsMetricStatus indicates the current value of a metric describing each pod in
|
||||||
|
// the current scale target (for example, transactions-processed-per-second).
|
||||||
|
type PodsMetricStatus struct {
|
||||||
|
Metric MetricIdentifier
|
||||||
|
Current MetricValueStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceMetricStatus indicates the current value of a resource metric known to
|
||||||
|
// Kubernetes, as specified in requests and limits, describing each pod in the
|
||||||
|
// current scale target (e.g. CPU or memory). Such metrics are built in to
|
||||||
|
// Kubernetes, and have special scaling options on top of those available to
|
||||||
|
// normal per-pod metrics using the "pods" source.
|
||||||
|
type ResourceMetricStatus struct {
|
||||||
|
// Name is the name of the resource in question.
|
||||||
|
Name api.ResourceName
|
||||||
|
Current MetricValueStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExternalMetricStatus indicates the current value of a global metric
|
||||||
|
// not associated with any Kubernetes object.
|
||||||
|
type ExternalMetricStatus struct {
|
||||||
|
Metric MetricIdentifier
|
||||||
|
Current MetricValueStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
type MetricValueStatus struct {
|
||||||
|
Value *resource.Quantity
|
||||||
|
AverageValue *resource.Quantity
|
||||||
|
AverageUtilization *int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// +genclient
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// HorizontalPodAutoscaler is the configuration for a horizontal pod
|
||||||
|
// autoscaler, which automatically manages the replica count of any resource
|
||||||
|
// implementing the scale subresource based on the metrics specified.
|
||||||
|
type HorizontalPodAutoscaler struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// Metadata is the standard object metadata.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||||
|
// +optional
|
||||||
|
metav1.ObjectMeta
|
||||||
|
|
||||||
|
// Spec is the specification for the behaviour of the autoscaler.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
|
||||||
|
// +optional
|
||||||
|
Spec HorizontalPodAutoscalerSpec
|
||||||
|
|
||||||
|
// Status is the current information about the autoscaler.
|
||||||
|
// +optional
|
||||||
|
Status HorizontalPodAutoscalerStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.
|
||||||
|
type HorizontalPodAutoscalerList struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
// Metadata is the standard list metadata.
|
||||||
|
// +optional
|
||||||
|
metav1.ListMeta
|
||||||
|
|
||||||
|
// Items is the list of horizontal pod autoscaler objects.
|
||||||
|
Items []HorizontalPodAutoscaler
|
||||||
|
}
|
||||||
547
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go
generated
vendored
Normal file
547
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go
generated
vendored
Normal file
@@ -0,0 +1,547 @@
|
|||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package autoscaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference.
|
||||||
|
func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(CrossVersionObjectReference)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) {
|
||||||
|
*out = *in
|
||||||
|
in.Metric.DeepCopyInto(&out.Metric)
|
||||||
|
in.Target.DeepCopyInto(&out.Target)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource.
|
||||||
|
func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ExternalMetricSource)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) {
|
||||||
|
*out = *in
|
||||||
|
in.Metric.DeepCopyInto(&out.Metric)
|
||||||
|
in.Current.DeepCopyInto(&out.Current)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus.
|
||||||
|
func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ExternalMetricStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler.
|
||||||
|
func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(HorizontalPodAutoscaler)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) {
|
||||||
|
*out = *in
|
||||||
|
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition.
|
||||||
|
func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(HorizontalPodAutoscalerCondition)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
out.ListMeta = in.ListMeta
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]HorizontalPodAutoscaler, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList.
|
||||||
|
func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(HorizontalPodAutoscalerList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) {
|
||||||
|
*out = *in
|
||||||
|
out.ScaleTargetRef = in.ScaleTargetRef
|
||||||
|
if in.MinReplicas != nil {
|
||||||
|
in, out := &in.MinReplicas, &out.MinReplicas
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Metrics != nil {
|
||||||
|
in, out := &in.Metrics, &out.Metrics
|
||||||
|
*out = make([]MetricSpec, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec.
|
||||||
|
func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(HorizontalPodAutoscalerSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.ObservedGeneration != nil {
|
||||||
|
in, out := &in.ObservedGeneration, &out.ObservedGeneration
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.LastScaleTime != nil {
|
||||||
|
in, out := &in.LastScaleTime, &out.LastScaleTime
|
||||||
|
*out = (*in).DeepCopy()
|
||||||
|
}
|
||||||
|
if in.CurrentMetrics != nil {
|
||||||
|
in, out := &in.CurrentMetrics, &out.CurrentMetrics
|
||||||
|
*out = make([]MetricStatus, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.Conditions != nil {
|
||||||
|
in, out := &in.Conditions, &out.Conditions
|
||||||
|
*out = make([]HorizontalPodAutoscalerCondition, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus.
|
||||||
|
func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(HorizontalPodAutoscalerStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *MetricIdentifier) DeepCopyInto(out *MetricIdentifier) {
|
||||||
|
*out = *in
|
||||||
|
if in.Selector != nil {
|
||||||
|
in, out := &in.Selector, &out.Selector
|
||||||
|
*out = new(v1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricIdentifier.
|
||||||
|
func (in *MetricIdentifier) DeepCopy() *MetricIdentifier {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(MetricIdentifier)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Object != nil {
|
||||||
|
in, out := &in.Object, &out.Object
|
||||||
|
*out = new(ObjectMetricSource)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.Pods != nil {
|
||||||
|
in, out := &in.Pods, &out.Pods
|
||||||
|
*out = new(PodsMetricSource)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.Resource != nil {
|
||||||
|
in, out := &in.Resource, &out.Resource
|
||||||
|
*out = new(ResourceMetricSource)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.External != nil {
|
||||||
|
in, out := &in.External, &out.External
|
||||||
|
*out = new(ExternalMetricSource)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec.
|
||||||
|
func (in *MetricSpec) DeepCopy() *MetricSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(MetricSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.Object != nil {
|
||||||
|
in, out := &in.Object, &out.Object
|
||||||
|
*out = new(ObjectMetricStatus)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.Pods != nil {
|
||||||
|
in, out := &in.Pods, &out.Pods
|
||||||
|
*out = new(PodsMetricStatus)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.Resource != nil {
|
||||||
|
in, out := &in.Resource, &out.Resource
|
||||||
|
*out = new(ResourceMetricStatus)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.External != nil {
|
||||||
|
in, out := &in.External, &out.External
|
||||||
|
*out = new(ExternalMetricStatus)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus.
|
||||||
|
func (in *MetricStatus) DeepCopy() *MetricStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(MetricStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *MetricTarget) DeepCopyInto(out *MetricTarget) {
|
||||||
|
*out = *in
|
||||||
|
if in.Value != nil {
|
||||||
|
in, out := &in.Value, &out.Value
|
||||||
|
x := (*in).DeepCopy()
|
||||||
|
*out = &x
|
||||||
|
}
|
||||||
|
if in.AverageValue != nil {
|
||||||
|
in, out := &in.AverageValue, &out.AverageValue
|
||||||
|
x := (*in).DeepCopy()
|
||||||
|
*out = &x
|
||||||
|
}
|
||||||
|
if in.AverageUtilization != nil {
|
||||||
|
in, out := &in.AverageUtilization, &out.AverageUtilization
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTarget.
|
||||||
|
func (in *MetricTarget) DeepCopy() *MetricTarget {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(MetricTarget)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *MetricValueStatus) DeepCopyInto(out *MetricValueStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.Value != nil {
|
||||||
|
in, out := &in.Value, &out.Value
|
||||||
|
x := (*in).DeepCopy()
|
||||||
|
*out = &x
|
||||||
|
}
|
||||||
|
if in.AverageValue != nil {
|
||||||
|
in, out := &in.AverageValue, &out.AverageValue
|
||||||
|
x := (*in).DeepCopy()
|
||||||
|
*out = &x
|
||||||
|
}
|
||||||
|
if in.AverageUtilization != nil {
|
||||||
|
in, out := &in.AverageUtilization, &out.AverageUtilization
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricValueStatus.
|
||||||
|
func (in *MetricValueStatus) DeepCopy() *MetricValueStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(MetricValueStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) {
|
||||||
|
*out = *in
|
||||||
|
out.DescribedObject = in.DescribedObject
|
||||||
|
in.Target.DeepCopyInto(&out.Target)
|
||||||
|
in.Metric.DeepCopyInto(&out.Metric)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource.
|
||||||
|
func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ObjectMetricSource)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) {
|
||||||
|
*out = *in
|
||||||
|
in.Metric.DeepCopyInto(&out.Metric)
|
||||||
|
in.Current.DeepCopyInto(&out.Current)
|
||||||
|
out.DescribedObject = in.DescribedObject
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus.
|
||||||
|
func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ObjectMetricStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) {
|
||||||
|
*out = *in
|
||||||
|
in.Metric.DeepCopyInto(&out.Metric)
|
||||||
|
in.Target.DeepCopyInto(&out.Target)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource.
|
||||||
|
func (in *PodsMetricSource) DeepCopy() *PodsMetricSource {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PodsMetricSource)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) {
|
||||||
|
*out = *in
|
||||||
|
in.Metric.DeepCopyInto(&out.Metric)
|
||||||
|
in.Current.DeepCopyInto(&out.Current)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus.
|
||||||
|
func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PodsMetricStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
|
||||||
|
*out = *in
|
||||||
|
in.Target.DeepCopyInto(&out.Target)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource.
|
||||||
|
func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ResourceMetricSource)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) {
|
||||||
|
*out = *in
|
||||||
|
in.Current.DeepCopyInto(&out.Current)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus.
|
||||||
|
func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ResourceMetricStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *Scale) DeepCopyInto(out *Scale) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
out.Spec = in.Spec
|
||||||
|
out.Status = in.Status
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
|
||||||
|
func (in *Scale) DeepCopy() *Scale {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Scale)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *Scale) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
|
||||||
|
func (in *ScaleSpec) DeepCopy() *ScaleSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ScaleSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
|
||||||
|
func (in *ScaleStatus) DeepCopy() *ScaleStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ScaleStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
60
vendor/k8s.io/kubernetes/pkg/apis/core/BUILD
generated
vendored
Normal file
60
vendor/k8s.io/kubernetes/pkg/apis/core/BUILD
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"annotation_key_constants.go",
|
||||||
|
"doc.go",
|
||||||
|
"field_constants.go",
|
||||||
|
"json.go",
|
||||||
|
"objectreference.go",
|
||||||
|
"register.go",
|
||||||
|
"resource.go",
|
||||||
|
"taint.go",
|
||||||
|
"toleration.go",
|
||||||
|
"types.go",
|
||||||
|
"zz_generated.deepcopy.go",
|
||||||
|
],
|
||||||
|
importpath = "k8s.io/kubernetes/pkg/apis/core",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "go_default_test",
|
||||||
|
srcs = [
|
||||||
|
"taint_test.go",
|
||||||
|
"toleration_test.go",
|
||||||
|
],
|
||||||
|
embed = [":go_default_library"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [
|
||||||
|
":package-srcs",
|
||||||
|
"//pkg/apis/core/fuzzer:all-srcs",
|
||||||
|
"//pkg/apis/core/helper:all-srcs",
|
||||||
|
"//pkg/apis/core/install:all-srcs",
|
||||||
|
"//pkg/apis/core/pods:all-srcs",
|
||||||
|
"//pkg/apis/core/v1:all-srcs",
|
||||||
|
"//pkg/apis/core/validation:all-srcs",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
45
vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
approvers:
|
||||||
|
- erictune
|
||||||
|
- lavalamp
|
||||||
|
- smarterclayton
|
||||||
|
- thockin
|
||||||
|
- liggitt
|
||||||
|
# - bgrant0607 # manual escalations only
|
||||||
|
reviewers:
|
||||||
|
- thockin
|
||||||
|
- lavalamp
|
||||||
|
- smarterclayton
|
||||||
|
- wojtek-t
|
||||||
|
- deads2k
|
||||||
|
- yujuhong
|
||||||
|
- brendandburns
|
||||||
|
- derekwaynecarr
|
||||||
|
- caesarxuchao
|
||||||
|
- vishh
|
||||||
|
- mikedanese
|
||||||
|
- liggitt
|
||||||
|
- nikhiljindal
|
||||||
|
- gmarek
|
||||||
|
- erictune
|
||||||
|
- davidopp
|
||||||
|
- pmorie
|
||||||
|
- sttts
|
||||||
|
- dchen1107
|
||||||
|
- saad-ali
|
||||||
|
- zmerlynn
|
||||||
|
- luxas
|
||||||
|
- janetkuo
|
||||||
|
- justinsb
|
||||||
|
- pwittrock
|
||||||
|
- roberthbailey
|
||||||
|
- ncdc
|
||||||
|
- tallclair
|
||||||
|
- yifan-gu
|
||||||
|
- eparis
|
||||||
|
- mwielgus
|
||||||
|
- soltysh
|
||||||
|
- piosz
|
||||||
|
- jsafrane
|
||||||
|
- jbeda
|
||||||
|
labels:
|
||||||
|
- sig/apps
|
||||||
104
vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go
generated
vendored
Normal file
104
vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// This file should be consistent with pkg/api/v1/annotation_key_constants.go.
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy
|
||||||
|
// webhook backend fails.
|
||||||
|
ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open"
|
||||||
|
|
||||||
|
// PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation
|
||||||
|
PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude"
|
||||||
|
|
||||||
|
// MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods
|
||||||
|
MirrorPodAnnotationKey string = "kubernetes.io/config.mirror"
|
||||||
|
|
||||||
|
// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
|
||||||
|
// in the Annotations of a Pod.
|
||||||
|
TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
|
||||||
|
|
||||||
|
// TaintsAnnotationKey represents the key of taints data (json serialized)
|
||||||
|
// in the Annotations of a Node.
|
||||||
|
TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
|
||||||
|
|
||||||
|
// SeccompPodAnnotationKey represents the key of a seccomp profile applied
|
||||||
|
// to all containers of a pod.
|
||||||
|
SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
|
||||||
|
|
||||||
|
// SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
|
||||||
|
// to one container of a pod.
|
||||||
|
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
|
||||||
|
|
||||||
|
// SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime.
|
||||||
|
SeccompProfileRuntimeDefault string = "runtime/default"
|
||||||
|
|
||||||
|
// DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker.
|
||||||
|
// This is now deprecated and should be replaced by SeccompProfileRuntimeDefault.
|
||||||
|
DeprecatedSeccompProfileDockerDefault string = "docker/default"
|
||||||
|
|
||||||
|
// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
|
||||||
|
// in the Annotations of a Node.
|
||||||
|
PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"
|
||||||
|
|
||||||
|
// ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache
|
||||||
|
// an object (e.g. secret, config map) before fetching it again from apiserver.
|
||||||
|
// This annotation can be attached to node.
|
||||||
|
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
|
||||||
|
|
||||||
|
// BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by
|
||||||
|
// the kubelet prior to running
|
||||||
|
BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint"
|
||||||
|
|
||||||
|
// annotation key prefix used to identify non-convertible json paths.
|
||||||
|
NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"
|
||||||
|
|
||||||
|
kubectlPrefix = "kubectl.kubernetes.io/"
|
||||||
|
|
||||||
|
// LastAppliedConfigAnnotation is the annotation used to store the previous
|
||||||
|
// configuration of a resource for use in a three way diff by UpdateApplyAnnotation.
|
||||||
|
LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration"
|
||||||
|
|
||||||
|
// AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers
|
||||||
|
//
|
||||||
|
// It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to
|
||||||
|
// allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow
|
||||||
|
// access only from the CIDRs currently allocated to MIT & the USPS.
|
||||||
|
//
|
||||||
|
// Not all cloud providers support this annotation, though AWS & GCE do.
|
||||||
|
AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges"
|
||||||
|
|
||||||
|
// EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that
|
||||||
|
// represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z')
|
||||||
|
// of the last change, of some Pod or Service object, that triggered the endpoints object change.
|
||||||
|
// In other words, if a Pod / Service changed at time T0, that change was observed by endpoints
|
||||||
|
// controller at T1, and the Endpoints object was changed at T2, the
|
||||||
|
// EndpointsLastChangeTriggerTime would be set to T0.
|
||||||
|
//
|
||||||
|
// The "endpoints change trigger" here means any Pod or Service change that resulted in the
|
||||||
|
// Endpoints object change.
|
||||||
|
//
|
||||||
|
// Given the definition of the "endpoints change trigger", please note that this annotation will
|
||||||
|
// be set ONLY for endpoints object changes triggered by either Pod or Service change. If the
|
||||||
|
// Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's
|
||||||
|
// already set).
|
||||||
|
//
|
||||||
|
// This annotation will be used to compute the in-cluster network programming latency SLI, see
|
||||||
|
// https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md
|
||||||
|
EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time"
|
||||||
|
)
|
||||||
24
vendor/k8s.io/kubernetes/pkg/apis/core/doc.go
generated
vendored
Normal file
24
vendor/k8s.io/kubernetes/pkg/apis/core/doc.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen=package
|
||||||
|
|
||||||
|
// Package api contains the latest (or "internal") version of the
|
||||||
|
// Kubernetes API objects. This is the API objects as represented in memory.
|
||||||
|
// The contract presented to clients is located in the versioned packages,
|
||||||
|
// which are sub-directories. The first one is "v1". Those packages
|
||||||
|
// describe how a particular version is serialized to storage/network.
|
||||||
|
package core // import "k8s.io/kubernetes/pkg/apis/core"
|
||||||
38
vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
// Field path constants that are specific to the internal API
|
||||||
|
// representation.
|
||||||
|
const (
|
||||||
|
NodeUnschedulableField = "spec.unschedulable"
|
||||||
|
ObjectNameField = "metadata.name"
|
||||||
|
PodHostField = "spec.nodeName"
|
||||||
|
PodStatusField = "status.phase"
|
||||||
|
SecretTypeField = "type"
|
||||||
|
|
||||||
|
EventReasonField = "action"
|
||||||
|
EventSourceField = "reportingComponent"
|
||||||
|
EventTypeField = "type"
|
||||||
|
EventInvolvedKindField = "involvedObject.kind"
|
||||||
|
EventInvolvedNamespaceField = "involvedObject.namespace"
|
||||||
|
EventInvolvedNameField = "involvedObject.name"
|
||||||
|
EventInvolvedUIDField = "involvedObject.uid"
|
||||||
|
EventInvolvedAPIVersionField = "involvedObject.apiVersion"
|
||||||
|
EventInvolvedResourceVersionField = "involvedObject.resourceVersion"
|
||||||
|
EventInvolvedFieldPathField = "involvedObject.fieldPath"
|
||||||
|
)
|
||||||
28
vendor/k8s.io/kubernetes/pkg/apis/core/json.go
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/pkg/apis/core/json.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import "encoding/json"
|
||||||
|
|
||||||
|
// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations
|
||||||
|
// to prevent anyone from marshaling these internal structs.
|
||||||
|
|
||||||
|
var _ = json.Marshaler(&AvoidPods{})
|
||||||
|
var _ = json.Unmarshaler(&AvoidPods{})
|
||||||
|
|
||||||
|
func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") }
|
||||||
|
func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") }
|
||||||
34
vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
//TODO: consider making these methods functions, because we don't want helper
|
||||||
|
//functions in the k8s.io/api repo.
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
|
||||||
|
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind {
|
||||||
|
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj }
|
||||||
98
vendor/k8s.io/kubernetes/pkg/apis/core/register.go
generated
vendored
Normal file
98
vendor/k8s.io/kubernetes/pkg/apis/core/register.go
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GroupName is the group name use in this package
|
||||||
|
const GroupName = ""
|
||||||
|
|
||||||
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||||
|
|
||||||
|
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||||
|
func Kind(kind string) schema.GroupKind {
|
||||||
|
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||||
|
func Resource(resource string) schema.GroupResource {
|
||||||
|
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||||
|
AddToScheme = SchemeBuilder.AddToScheme
|
||||||
|
)
|
||||||
|
|
||||||
|
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||||
|
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||||
|
&Pod{},
|
||||||
|
&PodList{},
|
||||||
|
&PodStatusResult{},
|
||||||
|
&PodTemplate{},
|
||||||
|
&PodTemplateList{},
|
||||||
|
&ReplicationControllerList{},
|
||||||
|
&ReplicationController{},
|
||||||
|
&ServiceList{},
|
||||||
|
&Service{},
|
||||||
|
&ServiceProxyOptions{},
|
||||||
|
&NodeList{},
|
||||||
|
&Node{},
|
||||||
|
&NodeProxyOptions{},
|
||||||
|
&Endpoints{},
|
||||||
|
&EndpointsList{},
|
||||||
|
&Binding{},
|
||||||
|
&Event{},
|
||||||
|
&EventList{},
|
||||||
|
&List{},
|
||||||
|
&LimitRange{},
|
||||||
|
&LimitRangeList{},
|
||||||
|
&ResourceQuota{},
|
||||||
|
&ResourceQuotaList{},
|
||||||
|
&Namespace{},
|
||||||
|
&NamespaceList{},
|
||||||
|
&ServiceAccount{},
|
||||||
|
&ServiceAccountList{},
|
||||||
|
&Secret{},
|
||||||
|
&SecretList{},
|
||||||
|
&PersistentVolume{},
|
||||||
|
&PersistentVolumeList{},
|
||||||
|
&PersistentVolumeClaim{},
|
||||||
|
&PersistentVolumeClaimList{},
|
||||||
|
&PodAttachOptions{},
|
||||||
|
&PodLogOptions{},
|
||||||
|
&PodExecOptions{},
|
||||||
|
&PodPortForwardOptions{},
|
||||||
|
&PodProxyOptions{},
|
||||||
|
&ComponentStatus{},
|
||||||
|
&ComponentStatusList{},
|
||||||
|
&SerializedReference{},
|
||||||
|
&RangeAllocation{},
|
||||||
|
&ConfigMap{},
|
||||||
|
&ConfigMapList{},
|
||||||
|
)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
55
vendor/k8s.io/kubernetes/pkg/apis/core/resource.go
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/apis/core/resource.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (self ResourceName) String() string {
|
||||||
|
return string(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the CPU limit if specified.
|
||||||
|
func (self *ResourceList) Cpu() *resource.Quantity {
|
||||||
|
if val, ok := (*self)[ResourceCPU]; ok {
|
||||||
|
return &val
|
||||||
|
}
|
||||||
|
return &resource.Quantity{Format: resource.DecimalSI}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the Memory limit if specified.
|
||||||
|
func (self *ResourceList) Memory() *resource.Quantity {
|
||||||
|
if val, ok := (*self)[ResourceMemory]; ok {
|
||||||
|
return &val
|
||||||
|
}
|
||||||
|
return &resource.Quantity{Format: resource.BinarySI}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ResourceList) Pods() *resource.Quantity {
|
||||||
|
if val, ok := (*self)[ResourcePods]; ok {
|
||||||
|
return &val
|
||||||
|
}
|
||||||
|
return &resource.Quantity{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ResourceList) StorageEphemeral() *resource.Quantity {
|
||||||
|
if val, ok := (*self)[ResourceEphemeralStorage]; ok {
|
||||||
|
return &val
|
||||||
|
}
|
||||||
|
return &resource.Quantity{}
|
||||||
|
}
|
||||||
36
vendor/k8s.io/kubernetes/pkg/apis/core/taint.go
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/pkg/apis/core/taint.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
//TODO: consider making these methods functions, because we don't want helper
|
||||||
|
//functions in the k8s.io/api repo.
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect,
|
||||||
|
// if the two taints have same key:effect, regard as they match.
|
||||||
|
func (t *Taint) MatchTaint(taintToMatch Taint) bool {
|
||||||
|
return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect
|
||||||
|
}
|
||||||
|
|
||||||
|
// taint.ToString() converts taint struct to string in format key=value:effect or key:effect.
|
||||||
|
func (t *Taint) ToString() string {
|
||||||
|
if len(t.Value) == 0 {
|
||||||
|
return fmt.Sprintf("%v:%v", t.Key, t.Effect)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect)
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user