mirror of
https://github.com/1Password/onepassword-operator.git
synced 2025-10-24 08:20:45 +00:00
Compare commits
7 Commits
v1.6.0
...
feature/mi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
209bc7cd17 | ||
|
|
493b311564 | ||
|
|
e39cff881d | ||
|
|
1a085562e4 | ||
|
|
21111fec90 | ||
|
|
69cc7cedb0 | ||
|
|
b30c6130f7 |
8
.github/workflows/build.yml
vendored
8
.github/workflows/build.yml
vendored
@@ -7,15 +7,15 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.x
|
- name: Set up Go 1.x
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: ^1.19
|
go-version: ^1.15
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: go build -v ./...
|
run: go build -v ./...
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: make test
|
run: go test -v ./... -cover
|
||||||
|
|||||||
9
.github/workflows/release-pr.yml
vendored
9
.github/workflows/release-pr.yml
vendored
@@ -14,10 +14,9 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
result: ${{ steps.is_release_branch_without_pr.outputs.result }}
|
result: ${{ steps.is_release_branch_without_pr.outputs.result }}
|
||||||
steps:
|
steps:
|
||||||
-
|
- id: is_release_branch_without_pr
|
||||||
id: is_release_branch_without_pr
|
|
||||||
name: Find matching PR
|
name: Find matching PR
|
||||||
uses: actions/github-script@v6
|
uses: actions/github-script@v3
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
script: |
|
script: |
|
||||||
@@ -28,7 +27,7 @@ jobs:
|
|||||||
|
|
||||||
if(!releaseBranchName) { return false }
|
if(!releaseBranchName) { return false }
|
||||||
|
|
||||||
const {data: prs} = await github.rest.pulls.list({
|
const {data: prs} = await github.pulls.list({
|
||||||
...context.repo,
|
...context.repo,
|
||||||
state: 'open',
|
state: 'open',
|
||||||
head: `1Password:${releaseBranchName}`,
|
head: `1Password:${releaseBranchName}`,
|
||||||
@@ -43,7 +42,7 @@ jobs:
|
|||||||
name: Create Release Pull Request
|
name: Create Release Pull Request
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
- name: Parse release version
|
- name: Parse release version
|
||||||
id: get_version
|
id: get_version
|
||||||
|
|||||||
36
.github/workflows/release.yml
vendored
36
.github/workflows/release.yml
vendored
@@ -11,14 +11,15 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
-
|
||||||
uses: actions/checkout@v3
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
-
|
||||||
- name: Docker meta
|
name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v4
|
uses: crazy-max/ghaction-docker-meta@v2
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
1password/onepassword-operator
|
1password/onepassword-operator
|
||||||
@@ -27,25 +28,24 @@ jobs:
|
|||||||
tags: |
|
tags: |
|
||||||
type=semver,pattern={{version}}
|
type=semver,pattern={{version}}
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
|
||||||
- name: Get the version from tag
|
- name: Get the version from tag
|
||||||
id: get_version
|
id: get_version
|
||||||
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/v}
|
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/v}
|
||||||
|
-
|
||||||
- name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v1
|
||||||
|
-
|
||||||
- name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v1
|
||||||
|
-
|
||||||
- name: Docker Login
|
name: Docker Login
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v1
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
-
|
||||||
- name: Build and push
|
name: Build and push
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v2
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
|
|||||||
28
CHANGELOG.md
28
CHANGELOG.md
@@ -12,34 +12,6 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
[//]: # (START/v1.6.0)
|
|
||||||
# v1.6.0
|
|
||||||
|
|
||||||
This version of the operator highlights the migration of the operator
|
|
||||||
to use the latest version of the `operator-sdk` (`1.25.0` at the time of this release).
|
|
||||||
|
|
||||||
For the users, this shouldn't affect the functionality of the operator.
|
|
||||||
|
|
||||||
This migration enables us to use the new project structure, as well as updated packages that enables
|
|
||||||
the team (as well as the contributors) to develop the operator more effective.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
* Migrate the operator to use the latest `operator-sdk` {#124}
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
[//]: # (START/v1.5.0)
|
|
||||||
# v1.5.0
|
|
||||||
|
|
||||||
## Features
|
|
||||||
* `OnePasswordItem` now contains a `status` which contains the status of creating the kubernetes secret for a OnePasswordItem. {#52}
|
|
||||||
|
|
||||||
## Fixes
|
|
||||||
* The operator no longer logs an error about changing the secret type if the secret type is not actually being changed.
|
|
||||||
* Annotations on a deployment are no longer removed when the operator triggers a restart. {#112}
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
[//]: # "START/v1.4.1"
|
[//]: # "START/v1.4.1"
|
||||||
|
|
||||||
# v1.4.1
|
# v1.4.1
|
||||||
|
|||||||
19
Dockerfile
19
Dockerfile
@@ -1,7 +1,5 @@
|
|||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM golang:1.19 as builder
|
FROM golang:1.17 as builder
|
||||||
ARG TARGETOS
|
|
||||||
ARG TARGETARCH
|
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
# Copy the Go Modules manifests
|
# Copy the Go Modules manifests
|
||||||
@@ -15,21 +13,9 @@ RUN go mod download
|
|||||||
COPY main.go main.go
|
COPY main.go main.go
|
||||||
COPY api/ api/
|
COPY api/ api/
|
||||||
COPY controllers/ controllers/
|
COPY controllers/ controllers/
|
||||||
COPY pkg/ pkg/
|
|
||||||
COPY version/ version/
|
|
||||||
COPY vendor/ vendor/
|
|
||||||
|
|
||||||
# Build
|
# Build
|
||||||
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go
|
||||||
# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO
|
|
||||||
# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
|
|
||||||
# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
|
|
||||||
RUN CGO_ENABLED=0 \
|
|
||||||
GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} \
|
|
||||||
go build \
|
|
||||||
-ldflags "-X \"github.com/1Password/onepassword-operator/version.Version=$operator_version\"" \
|
|
||||||
-mod vendor \
|
|
||||||
-a -o manager main.go
|
|
||||||
|
|
||||||
# Use distroless as minimal base image to package the manager binary
|
# Use distroless as minimal base image to package the manager binary
|
||||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||||
@@ -37,6 +23,5 @@ FROM gcr.io/distroless/static:nonroot
|
|||||||
WORKDIR /
|
WORKDIR /
|
||||||
COPY --from=builder /workspace/manager .
|
COPY --from=builder /workspace/manager .
|
||||||
USER 65532:65532
|
USER 65532:65532
|
||||||
COPY config/connect/ config/connect/
|
|
||||||
|
|
||||||
ENTRYPOINT ["/manager"]
|
ENTRYPOINT ["/manager"]
|
||||||
|
|||||||
130
Makefile
130
Makefile
@@ -1,11 +1,9 @@
|
|||||||
export MAIN_BRANCH ?= main
|
|
||||||
|
|
||||||
# VERSION defines the project version for the bundle.
|
# VERSION defines the project version for the bundle.
|
||||||
# Update this value when you upgrade the version of your project.
|
# Update this value when you upgrade the version of your project.
|
||||||
# To re-generate a bundle for another specific version without changing the standard setup, you can:
|
# To re-generate a bundle for another specific version without changing the standard setup, you can:
|
||||||
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
|
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
|
||||||
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
|
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
|
||||||
VERSION ?= 1.5.0
|
VERSION ?= 0.0.1
|
||||||
|
|
||||||
# CHANNELS define the bundle channels used in the bundle.
|
# CHANNELS define the bundle channels used in the bundle.
|
||||||
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
|
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
|
||||||
@@ -30,8 +28,8 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
|
|||||||
# This variable is used to construct full image tags for bundle and catalog images.
|
# This variable is used to construct full image tags for bundle and catalog images.
|
||||||
#
|
#
|
||||||
# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
|
# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
|
||||||
# onepassword.com/onepassword-operator-bundle:$VERSION and onepassword.com/onepassword-operator-catalog:$VERSION.
|
# onepassword.com/onepassword-operator-new-bundle:$VERSION and onepassword.com/onepassword-operator-new-catalog:$VERSION.
|
||||||
IMAGE_TAG_BASE ?= onepassword.com/onepassword-operator
|
IMAGE_TAG_BASE ?= onepassword.com/onepassword-operator-new
|
||||||
|
|
||||||
# BUNDLE_IMG defines the image:tag used for the bundle.
|
# BUNDLE_IMG defines the image:tag used for the bundle.
|
||||||
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
|
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
|
||||||
@@ -49,9 +47,9 @@ ifeq ($(USE_IMAGE_DIGESTS), true)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
# Image URL to use all building/pushing image targets
|
# Image URL to use all building/pushing image targets
|
||||||
IMG ?= 1password/onepassword-operator:latest
|
IMG ?= controller:latest
|
||||||
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
||||||
ENVTEST_K8S_VERSION = 1.24.2
|
ENVTEST_K8S_VERSION = 1.23
|
||||||
|
|
||||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||||
ifeq (,$(shell go env GOBIN))
|
ifeq (,$(shell go env GOBIN))
|
||||||
@@ -61,6 +59,7 @@ GOBIN=$(shell go env GOBIN)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
# Setting SHELL to bash allows bash commands to be executed by recipes.
|
# Setting SHELL to bash allows bash commands to be executed by recipes.
|
||||||
|
# This is a requirement for 'setup-envtest.sh' in the test target.
|
||||||
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
|
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
|
||||||
SHELL = /usr/bin/env bash -o pipefail
|
SHELL = /usr/bin/env bash -o pipefail
|
||||||
.SHELLFLAGS = -ec
|
.SHELLFLAGS = -ec
|
||||||
@@ -105,7 +104,7 @@ vet: ## Run go vet against code.
|
|||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: manifests generate fmt vet envtest ## Run tests.
|
test: manifests generate fmt vet envtest ## Run tests.
|
||||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out
|
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out
|
||||||
|
|
||||||
##@ Build
|
##@ Build
|
||||||
|
|
||||||
@@ -117,9 +116,6 @@ build: generate fmt vet ## Build manager binary.
|
|||||||
run: manifests generate fmt vet ## Run a controller from your host.
|
run: manifests generate fmt vet ## Run a controller from your host.
|
||||||
go run ./main.go
|
go run ./main.go
|
||||||
|
|
||||||
# If you wish built the manager image targeting other platforms you can use the --platform flag.
|
|
||||||
# (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it.
|
|
||||||
# More info: https://docs.docker.com/develop/develop-images/build_enhancements/
|
|
||||||
.PHONY: docker-build
|
.PHONY: docker-build
|
||||||
docker-build: test ## Build docker image with the manager.
|
docker-build: test ## Build docker image with the manager.
|
||||||
docker build -t ${IMG} .
|
docker build -t ${IMG} .
|
||||||
@@ -128,23 +124,6 @@ docker-build: test ## Build docker image with the manager.
|
|||||||
docker-push: ## Push docker image with the manager.
|
docker-push: ## Push docker image with the manager.
|
||||||
docker push ${IMG}
|
docker push ${IMG}
|
||||||
|
|
||||||
# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
|
|
||||||
# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to:
|
|
||||||
# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/
|
|
||||||
# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/
|
|
||||||
# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=<myregistry/image:<tag>> than the export will fail)
|
|
||||||
# To properly provided solutions that supports more than one platform you should use this option.
|
|
||||||
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
|
|
||||||
.PHONY: docker-buildx
|
|
||||||
docker-buildx: test ## Build and push docker image for the manager for cross-platform support
|
|
||||||
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
|
|
||||||
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
|
|
||||||
- docker buildx create --name project-v3-builder
|
|
||||||
docker buildx use project-v3-builder
|
|
||||||
- docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross
|
|
||||||
- docker buildx rm project-v3-builder
|
|
||||||
rm Dockerfile.cross
|
|
||||||
|
|
||||||
##@ Deployment
|
##@ Deployment
|
||||||
|
|
||||||
ifndef ignore-not-found
|
ifndef ignore-not-found
|
||||||
@@ -159,12 +138,8 @@ install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~
|
|||||||
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||||
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
||||||
|
|
||||||
.PHONY: set-namespace
|
|
||||||
set-namespace:
|
|
||||||
cd config/default && $(KUSTOMIZE) edit set namespace $(shell kubectl config view --minify -o jsonpath={..namespace})
|
|
||||||
|
|
||||||
.PHONY: deploy
|
.PHONY: deploy
|
||||||
deploy: manifests kustomize set-namespace ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||||
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
||||||
|
|
||||||
@@ -172,37 +147,34 @@ deploy: manifests kustomize set-namespace ## Deploy controller to the K8s cluste
|
|||||||
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||||
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
||||||
|
|
||||||
##@ Build Dependencies
|
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||||
|
|
||||||
## Location to install dependencies to
|
|
||||||
LOCALBIN ?= $(shell pwd)/bin
|
|
||||||
$(LOCALBIN):
|
|
||||||
mkdir -p $(LOCALBIN)
|
|
||||||
|
|
||||||
## Tool Binaries
|
|
||||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize
|
|
||||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
|
|
||||||
ENVTEST ?= $(LOCALBIN)/setup-envtest
|
|
||||||
|
|
||||||
## Tool Versions
|
|
||||||
KUSTOMIZE_VERSION ?= v4.5.7
|
|
||||||
CONTROLLER_TOOLS_VERSION ?= v0.10.0
|
|
||||||
|
|
||||||
KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh"
|
|
||||||
.PHONY: kustomize
|
|
||||||
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
|
|
||||||
$(KUSTOMIZE): $(LOCALBIN)
|
|
||||||
test -s $(LOCALBIN)/kustomize || { curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); }
|
|
||||||
|
|
||||||
.PHONY: controller-gen
|
.PHONY: controller-gen
|
||||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
controller-gen: ## Download controller-gen locally if necessary.
|
||||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0)
|
||||||
test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
|
|
||||||
|
|
||||||
|
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||||
|
.PHONY: kustomize
|
||||||
|
kustomize: ## Download kustomize locally if necessary.
|
||||||
|
$(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7)
|
||||||
|
|
||||||
|
ENVTEST = $(shell pwd)/bin/setup-envtest
|
||||||
.PHONY: envtest
|
.PHONY: envtest
|
||||||
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
|
envtest: ## Download envtest-setup locally if necessary.
|
||||||
$(ENVTEST): $(LOCALBIN)
|
$(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest)
|
||||||
test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
|
|
||||||
|
# go-get-tool will 'go get' any package $2 and install it to $1.
|
||||||
|
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||||
|
define go-get-tool
|
||||||
|
@[ -f $(1) ] || { \
|
||||||
|
set -e ;\
|
||||||
|
TMP_DIR=$$(mktemp -d) ;\
|
||||||
|
cd $$TMP_DIR ;\
|
||||||
|
go mod init tmp ;\
|
||||||
|
echo "Downloading $(2)" ;\
|
||||||
|
GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\
|
||||||
|
rm -rf $$TMP_DIR ;\
|
||||||
|
}
|
||||||
|
endef
|
||||||
|
|
||||||
.PHONY: bundle
|
.PHONY: bundle
|
||||||
bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
|
bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
|
||||||
@@ -228,7 +200,7 @@ ifeq (,$(shell which opm 2>/dev/null))
|
|||||||
set -e ;\
|
set -e ;\
|
||||||
mkdir -p $(dir $(OPM)) ;\
|
mkdir -p $(dir $(OPM)) ;\
|
||||||
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
|
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
|
||||||
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$${OS}-$${ARCH}-opm ;\
|
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.19.1/$${OS}-$${ARCH}-opm ;\
|
||||||
chmod +x $(OPM) ;\
|
chmod +x $(OPM) ;\
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@@ -259,37 +231,3 @@ catalog-build: opm ## Build a catalog image.
|
|||||||
.PHONY: catalog-push
|
.PHONY: catalog-push
|
||||||
catalog-push: ## Push a catalog image.
|
catalog-push: ## Push a catalog image.
|
||||||
$(MAKE) docker-push IMG=$(CATALOG_IMG)
|
$(MAKE) docker-push IMG=$(CATALOG_IMG)
|
||||||
|
|
||||||
## Release functions =====================
|
|
||||||
GIT_BRANCH := $(shell git symbolic-ref --short HEAD)
|
|
||||||
WORKTREE_CLEAN := $(shell git status --porcelain 1>/dev/null 2>&1; echo $$?)
|
|
||||||
SCRIPTS_DIR := $(CURDIR)/scripts
|
|
||||||
|
|
||||||
versionFile = $(CURDIR)/.VERSION
|
|
||||||
curVersion := $(shell cat $(versionFile) | sed 's/^v//')
|
|
||||||
|
|
||||||
release/prepare: .check_git_clean ## Updates changelog and creates release branch (call with 'release/prepare version=<new_version_number>')
|
|
||||||
|
|
||||||
@test $(version) || (echo "[ERROR] version argument not set."; exit 1)
|
|
||||||
@git fetch --quiet origin $(MAIN_BRANCH)
|
|
||||||
|
|
||||||
@echo $(version) | tr -d '\n' | tee $(versionFile) &>/dev/null
|
|
||||||
|
|
||||||
@NEW_VERSION=$(version) $(SCRIPTS_DIR)/prepare-release.sh
|
|
||||||
|
|
||||||
release/tag: .check_git_clean ## Creates git tag
|
|
||||||
@git pull --ff-only
|
|
||||||
@echo "Applying tag 'v$(curVersion)' to HEAD..."
|
|
||||||
@git tag --sign "v$(curVersion)" -m "Release v$(curVersion)"
|
|
||||||
@echo "[OK] Success!"
|
|
||||||
@echo "Remember to call 'git push --tags' to persist the tag."
|
|
||||||
|
|
||||||
## Helper functions =====================
|
|
||||||
|
|
||||||
.check_git_clean:
|
|
||||||
ifneq ($(GIT_BRANCH), $(MAIN_BRANCH))
|
|
||||||
@echo "[ERROR] Please checkout default branch '$(MAIN_BRANCH)' and re-run this command."; exit 1;
|
|
||||||
endif
|
|
||||||
ifneq ($(WORKTREE_CLEAN), 0)
|
|
||||||
@echo "[ERROR] Uncommitted changes found in worktree. Address them and try again."; exit 1;
|
|
||||||
endif
|
|
||||||
|
|||||||
5
PROJECT
5
PROJECT
@@ -1,10 +1,10 @@
|
|||||||
domain: onepassword.com
|
domain: onepassword.com
|
||||||
layout:
|
layout:
|
||||||
- go.kubebuilder.io/v4-alpha
|
- go.kubebuilder.io/v3
|
||||||
plugins:
|
plugins:
|
||||||
manifests.sdk.operatorframework.io/v2: {}
|
manifests.sdk.operatorframework.io/v2: {}
|
||||||
scorecard.sdk.operatorframework.io/v2: {}
|
scorecard.sdk.operatorframework.io/v2: {}
|
||||||
projectName: onepassword-operator
|
projectName: onepassword-operator-new
|
||||||
repo: github.com/1Password/onepassword-operator
|
repo: github.com/1Password/onepassword-operator
|
||||||
resources:
|
resources:
|
||||||
- api:
|
- api:
|
||||||
@@ -12,6 +12,7 @@ resources:
|
|||||||
namespaced: true
|
namespaced: true
|
||||||
controller: true
|
controller: true
|
||||||
domain: onepassword.com
|
domain: onepassword.com
|
||||||
|
group: onepassword
|
||||||
kind: OnePasswordItem
|
kind: OnePasswordItem
|
||||||
path: github.com/1Password/onepassword-operator/api/v1
|
path: github.com/1Password/onepassword-operator/api/v1
|
||||||
version: v1
|
version: v1
|
||||||
|
|||||||
164
README.md
164
README.md
@@ -1,3 +1,5 @@
|
|||||||
|
// TODO: Update README.md
|
||||||
|
|
||||||
# 1Password Connect Kubernetes Operator
|
# 1Password Connect Kubernetes Operator
|
||||||
|
|
||||||
The 1Password Connect Kubernetes Operator provides the ability to integrate Kubernetes with 1Password. This Operator manages `OnePasswordItem` Custom Resource Definitions (CRDs) that define the location of an Item stored in 1Password. The `OnePasswordItem` CRD, when created, will be used to compose a Kubernetes Secret containing the contents of the specified item.
|
The 1Password Connect Kubernetes Operator provides the ability to integrate Kubernetes with 1Password. This Operator manages `OnePasswordItem` Custom Resource Definitions (CRDs) that define the location of an Item stored in 1Password. The `OnePasswordItem` CRD, when created, will be used to compose a Kubernetes Secret containing the contents of the specified item.
|
||||||
@@ -6,39 +8,26 @@ The 1Password Connect Kubernetes Operator also allows for Kubernetes Secrets to
|
|||||||
|
|
||||||
The 1Password Connect Kubernetes Operator will continually check for updates from 1Password for any Kubernetes Secret that it has generated. If a Kubernetes Secret is updated, any Deployment using that secret can be automatically restarted.
|
The 1Password Connect Kubernetes Operator will continually check for updates from 1Password for any Kubernetes Secret that it has generated. If a Kubernetes Secret is updated, any Deployment using that secret can be automatically restarted.
|
||||||
|
|
||||||
- [Prerequisites](#prerequisites)
|
## Setup
|
||||||
- [Quickstart for Deploying 1Password Connect to Kubernetes](#quickstart-for-deploying-1password-connect-to-kubernetes)
|
|
||||||
- [Kubernetes Operator Deployment](#kubernetes-operator-deployment)
|
|
||||||
- [Usage](#usage)
|
|
||||||
- [Configuring Automatic Rolling Restarts of Deployments](#configuring-automatic-rolling-restarts-of-deployments)
|
|
||||||
- [Development](#development)
|
|
||||||
- [Security](#security)
|
|
||||||
|
|
||||||
## Prerequisites
|
Prerequisites:
|
||||||
|
|
||||||
- [1Password Command Line Tool Installed](https://1password.com/downloads/command-line/)
|
- [1Password Command Line Tool Installed](https://1password.com/downloads/command-line/)
|
||||||
- [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
- [kubectl installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||||
- [`docker` installed](https://docs.docker.com/get-docker/)
|
- [docker installed](https://docs.docker.com/get-docker/)
|
||||||
- [Generated a 1password-credentials.json file and issued a 1Password Connect API Token for the K8s Operator integration](https://developer.1password.com/docs/connect/get-started/#step-1-set-up-a-secrets-automation-workflow)
|
- [Generated a 1password-credentials.json file and issued a 1Password Connect API Token for the K8s Operator integration](https://support.1password.com/secrets-automation/)
|
||||||
- [A `1password-credentials.json` file generated and a 1Password Connect API Token issues for the K8s Operator integration](https://developer.1password.com/docs/connect/get-started/#step-1-set-up-a-secrets-automation-workflow)
|
- [1Password Connect deployed to Kubernetes](https://support.1password.com/connect-deploy-kubernetes/#step-2-deploy-a-1password-connect-server). **NOTE**: If customization of the 1Password Connect deployment is not required you can skip this prerequisite.
|
||||||
## Quickstart for Deploying 1Password Connect to Kubernetes
|
|
||||||
|
|
||||||
If 1Password Connect is already running, you can skip this step.
|
### Quickstart for Deploying 1Password Connect to Kubernetes
|
||||||
|
|
||||||
There are options to deploy 1Password Connect:
|
|
||||||
|
|
||||||
- [Deploy with Helm](#deploy-with-helm)
|
|
||||||
- [Deploy using the Connect Operator](#deploy-using-the-connect-operator)
|
|
||||||
|
|
||||||
#### Deploy with Helm
|
#### Deploy with Helm
|
||||||
|
|
||||||
The 1Password Connect Helm Chart helps to simplify the deployment of 1Password Connect and the 1Password Connect Kubernetes Operator to Kubernetes.
|
The 1Password Connect Helm Chart helps to simplify the deployment of 1Password Connect and the 1Password Connect Kubernetes Operator to Kubernetes.
|
||||||
|
|
||||||
[The 1Password Connect Helm Chart can be found here.](https://github.com/1Password/connect-helm-charts)
|
[The 1Password Connect Helm Chart can be found here.](https://github.com/1Password/connect-helm-charts)
|
||||||
|
|
||||||
#### Deploy using the Connect Operator
|
#### Deploy using the Connect Operator
|
||||||
|
If 1Password Connect is already running, you can skip this step. This guide will provide a quickstart option for deploying a default configuration of 1Password Connect via starting the deploying the 1Password Connect Operator, however it is recommended that you instead deploy your own manifest file if customization of the 1Password Connect deployment is desired.
|
||||||
This guide will provide a quickstart option for deploying a default configuration of 1Password Connect via starting the deploying the 1Password Connect Operator, however it is recommended that you instead deploy your own manifest file if customization of the 1Password Connect deployment is desired.
|
|
||||||
|
|
||||||
Encode the 1password-credentials.json file you generated in the prerequisite steps and save it to a file named op-session:
|
Encode the 1password-credentials.json file you generated in the prerequisite steps and save it to a file named op-session:
|
||||||
|
|
||||||
@@ -48,20 +37,16 @@ cat 1password-credentials.json | base64 | \
|
|||||||
```
|
```
|
||||||
|
|
||||||
Create a Kubernetes secret from the op-session file:
|
Create a Kubernetes secret from the op-session file:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl create secret generic op-credentials --from-file=op-session
|
kubectl create secret generic op-credentials --from-file=op-session
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the following environment variable to the onepassword-connect-operator container in `/config/manager/manager.yaml`:
|
Add the following environment variable to the onepassword-connect-operator container in `deploy/operator.yaml`:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
- name: MANAGE_CONNECT
|
- name: MANAGE_CONNECT
|
||||||
value: "true"
|
value: "true"
|
||||||
```
|
```
|
||||||
|
Adding this environment variable will have the operator automatically deploy a default configuration of 1Password Connect to the `default` namespace.
|
||||||
Adding this environment variable will have the operator automatically deploy a default configuration of 1Password Connect to the current namespace.
|
|
||||||
|
|
||||||
### Kubernetes Operator Deployment
|
### Kubernetes Operator Deployment
|
||||||
|
|
||||||
**Create Kubernetes Secret for OP_CONNECT_TOKEN**
|
**Create Kubernetes Secret for OP_CONNECT_TOKEN**
|
||||||
@@ -73,33 +58,43 @@ kubectl create secret generic onepassword-token --from-literal=token=<OP_CONNECT
|
|||||||
```
|
```
|
||||||
|
|
||||||
If you do not have a token for the operator, you can generate a token and save it to kubernetes with the following command:
|
If you do not have a token for the operator, you can generate a token and save it to kubernetes with the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl create secret generic onepassword-token --from-literal=token=$(op create connect token <server> op-k8s-operator --vault <vault>)
|
kubectl create secret generic onepassword-token --from-literal=token=$(op create connect token <server> op-k8s-operator --vault <vault>)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
[More information on generating a token can be found here](https://support.1password.com/secrets-automation/#appendix-issue-additional-access-tokens)
|
||||||
|
|
||||||
|
**Set Permissions For Operator**
|
||||||
|
|
||||||
|
We must create a service account, role, and role binding and Kubernetes. Examples can be found in the `/deploy` folder.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl apply -f deploy/permissions.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
**Create Custom One Password Secret Resource**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl apply -f deploy/crds/onepassword.com_onepassworditems_crd.yaml
|
||||||
|
```
|
||||||
|
|
||||||
**Deploying the Operator**
|
**Deploying the Operator**
|
||||||
|
|
||||||
An sample Deployment yaml can be found at `/config/manager/manager.yaml`.
|
An sample Deployment yaml can be found at `/deploy/operator.yaml`.
|
||||||
|
|
||||||
|
|
||||||
To further configure the 1Password Kubernetes Operator the Following Environment variables can be set in the operator yaml:
|
To further configure the 1Password Kubernetes Operator the Following Environment variables can be set in the operator yaml:
|
||||||
|
|
||||||
- **OP_CONNECT_HOST** (required): Specifies the host name within Kubernetes in which to access the 1Password Connect.
|
- **OP_CONNECT_HOST** (required): Specifies the host name within Kubernetes in which to access the 1Password Connect.
|
||||||
- **WATCH_NAMESPACE:** (default: watch all namespaces): Comma separated list of what Namespaces to watch for changes.
|
- **WATCH_NAMESPACE:** (default: watch all namespaces): Comma separated list of what Namespaces to watch for changes.
|
||||||
- **POLLING_INTERVAL** (default: 600): The number of seconds the 1Password Kubernetes Operator will wait before checking for updates from 1Password Connect.
|
- **POLLING_INTERVAL** (default: 600): The number of seconds the 1Password Kubernetes Operator will wait before checking for updates from 1Password Connect.
|
||||||
- **MANAGE_CONNECT** (default: false): If set to true, on deployment of the operator, a default configuration of the OnePassword Connect Service will be deployed to the current namespace.
|
- **MANAGE_CONNECT** (default: false): If set to true, on deployment of the operator, a default configuration of the OnePassword Connect Service will be deployed to the `default` namespace.
|
||||||
- **AUTO_RESTART** (default: false): If set to true, the operator will restart any deployment using a secret from 1Password Connect. This can be overwritten by namespace, deployment, or individual secret. More details on AUTO_RESTART can be found in the ["Configuring Automatic Rolling Restarts of Deployments"](#configuring-automatic-rolling-restarts-of-deployments) section.
|
- **AUTO_RESTART** (default: false): If set to true, the operator will restart any deployment using a secret from 1Password Connect. This can be overwritten by namespace, deployment, or individual secret. More details on AUTO_RESTART can be found in the ["Configuring Automatic Rolling Restarts of Deployments"](#configuring-automatic-rolling-restarts-of-deployments) section.
|
||||||
|
|
||||||
To deploy the operator, simply run the following command:
|
Apply the deployment file:
|
||||||
|
|
||||||
```shell
|
```yaml
|
||||||
make deploy
|
kubectl apply -f deploy/operator.yaml
|
||||||
```
|
|
||||||
|
|
||||||
**Undeploy Operator**
|
|
||||||
|
|
||||||
```
|
|
||||||
make undeploy
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
@@ -153,29 +148,26 @@ Note: Deleting the Deployment that you've created will automatically delete the
|
|||||||
If a 1Password Item that is linked to a Kubernetes Secret is updated within the POLLING_INTERVAL the associated Kubernetes Secret will be updated. However, if you do not want a specific secret to be updated you can add the tag `operator.1password.io:ignore-secret` to the item stored in 1Password. While this tag is in place, any updates made to an item will not trigger an update to the associated secret in Kubernetes.
|
If a 1Password Item that is linked to a Kubernetes Secret is updated within the POLLING_INTERVAL the associated Kubernetes Secret will be updated. However, if you do not want a specific secret to be updated you can add the tag `operator.1password.io:ignore-secret` to the item stored in 1Password. While this tag is in place, any updates made to an item will not trigger an update to the associated secret in Kubernetes.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**NOTE**
|
**NOTE**
|
||||||
|
|
||||||
If multiple 1Password vaults/items have the same `title` when using a title in the access path, the desired action will be performed on the oldest vault/item.
|
If multiple 1Password vaults/items have the same `title` when using a title in the access path, the desired action will be performed on the oldest vault/item.
|
||||||
|
|
||||||
Titles and field names that include white space and other characters that are not a valid [DNS subdomain name](https://kubernetes.io/docs/concepts/configuration/secret/) will create Kubernetes secrets that have titles and fields in the following format:
|
Titles and field names that include white space and other characters that are not a valid [DNS subdomain name](https://kubernetes.io/docs/concepts/configuration/secret/) will create Kubernetes secrets that have titles and fields in the following format:
|
||||||
|
- Invalid characters before the first alphanumeric character and after the last alphanumeric character will be removed
|
||||||
- Invalid characters before the first alphanumeric character and after the last alphanumeric character will be removed
|
- All whitespaces between words will be replaced by `-`
|
||||||
- All whitespaces between words will be replaced by `-`
|
- All the letters will be lower-cased.
|
||||||
- All the letters will be lower-cased.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Configuring Automatic Rolling Restarts of Deployments
|
### Configuring Automatic Rolling Restarts of Deployments
|
||||||
|
|
||||||
If a 1Password Item that is linked to a Kubernetes Secret is updated, any deployments configured to `auto-restart` AND are using that secret will be given a rolling restart the next time 1Password Connect is polled for updates.
|
If a 1Password Item that is linked to a Kubernetes Secret is updated, any deployments configured to `auto-restart` AND are using that secret will be given a rolling restart the next time 1Password Connect is polled for updates.
|
||||||
|
|
||||||
There are many levels of granularity on which to configure auto restarts on deployments: at the operator level, per-namespace, or per-deployment.
|
There are many levels of granularity on which to configure auto restarts on deployments: at the operator level, per-namespace, or per-deployment.
|
||||||
|
|
||||||
**On the operator**: This method allows for managing auto restarts on all deployments within the namespaces watched by operator. Auto restarts can be enabled by setting the environemnt variable `AUTO_RESTART` to true. If the value is not set, the operator will default this value to false.
|
**On the operator**: This method allows for managing auto restarts on all deployments within the namespaces watched by operator. Auto restarts can be enabled by setting the environemnt variable `AUTO_RESTART` to true. If the value is not set, the operator will default this value to false.
|
||||||
|
|
||||||
**Per Namespace**: This method allows for managing auto restarts on all deployments within a namespace. Auto restarts can by managed by setting the annotation `operator.1password.io/auto-restart` to either `true` or `false` on the desired namespace. An example of this is shown below:
|
**Per Namespace**: This method allows for managing auto restarts on all deployments within a namespace. Auto restarts can by managed by setting the annotation `operator.1password.io/auto-restart` to either `true` or `false` on the desired namespace. An example of this is shown below:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# enabled auto restarts for all deployments within a namespace unless overwritten within a deployment
|
# enabled auto restarts for all deployments within a namespace unless overwritten within a deployment
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
@@ -185,12 +177,10 @@ metadata:
|
|||||||
annotations:
|
annotations:
|
||||||
operator.1password.io/auto-restart: "true"
|
operator.1password.io/auto-restart: "true"
|
||||||
```
|
```
|
||||||
|
|
||||||
If the value is not set, the auto restart settings on the operator will be used. This value can be overwritten by deployment.
|
If the value is not set, the auto restart settings on the operator will be used. This value can be overwritten by deployment.
|
||||||
|
|
||||||
**Per Deployment**
|
**Per Deployment**
|
||||||
This method allows for managing auto restarts on a given deployment. Auto restarts can by managed by setting the annotation `operator.1password.io/auto-restart` to either `true` or `false` on the desired deployment. An example of this is shown below:
|
This method allows for managing auto restarts on a given deployment. Auto restarts can by managed by setting the annotation `operator.1password.io/auto-restart` to either `true` or `false` on the desired deployment. An example of this is shown below:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# enabled auto restarts for the deployment
|
# enabled auto restarts for the deployment
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
@@ -200,12 +190,10 @@ metadata:
|
|||||||
annotations:
|
annotations:
|
||||||
operator.1password.io/auto-restart: "true"
|
operator.1password.io/auto-restart: "true"
|
||||||
```
|
```
|
||||||
|
|
||||||
If the value is not set, the auto restart settings on the namespace will be used.
|
If the value is not set, the auto restart settings on the namespace will be used.
|
||||||
|
|
||||||
**Per OnePasswordItem Custom Resource**
|
**Per OnePasswordItem Custom Resource**
|
||||||
This method allows for managing auto restarts on a given OnePasswordItem custom resource. Auto restarts can by managed by setting the annotation `operator.1password.io/auto_restart` to either `true` or `false` on the desired OnePasswordItem. An example of this is shown below:
|
This method allows for managing auto restarts on a given OnePasswordItem custom resource. Auto restarts can by managed by setting the annotation `operator.1password.io/auto_restart` to either `true` or `false` on the desired OnePasswordItem. An example of this is shown below:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# enabled auto restarts for the OnePasswordItem
|
# enabled auto restarts for the OnePasswordItem
|
||||||
apiVersion: onepassword.com/v1
|
apiVersion: onepassword.com/v1
|
||||||
@@ -215,78 +203,34 @@ metadata:
|
|||||||
annotations:
|
annotations:
|
||||||
operator.1password.io/auto-restart: "true"
|
operator.1password.io/auto-restart: "true"
|
||||||
```
|
```
|
||||||
|
|
||||||
If the value is not set, the auto restart settings on the deployment will be used.
|
If the value is not set, the auto restart settings on the deployment will be used.
|
||||||
|
|
||||||
<!--
|
|
||||||
## Getting Started
|
|
||||||
You’ll need a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster.
|
|
||||||
**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows).
|
|
||||||
|
|
||||||
### Running on the cluster
|
|
||||||
1. Install Instances of Custom Resources:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl apply -f config/samples/
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Deploy the controller to the cluster with the image specified by `IMG`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
make deploy IMG=<some-registry>/onepassword-operator:tag
|
|
||||||
```
|
|
||||||
|
|
||||||
### Uninstall CRDs
|
|
||||||
To delete the CRDs from the cluster:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
make uninstall
|
|
||||||
```
|
|
||||||
|
|
||||||
### Undeploy controller
|
|
||||||
UnDeploy the controller to the cluster:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
make undeploy
|
|
||||||
```
|
|
||||||
-->
|
|
||||||
|
|
||||||
## Development
|
## Development
|
||||||
|
|
||||||
### How it works
|
### Creating a Docker image
|
||||||
|
|
||||||
This project aims to follow the Kubernetes [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
|
To create a local version of the Docker image for testing, use the following `Makefile` target:
|
||||||
|
```shell
|
||||||
It uses [Controllers](https://kubernetes.io/docs/concepts/architecture/controller/)
|
make build/local
|
||||||
which provides a reconcile function responsible for synchronizing resources untile the desired state is reached on the cluster
|
|
||||||
|
|
||||||
### Test It Out
|
|
||||||
|
|
||||||
1. Install the CRDs into the cluster:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
make install
|
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running):
|
### Building the Operator binary
|
||||||
|
```shell
|
||||||
```sh
|
make build/binary
|
||||||
make run
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**NOTE:** You can also run this in one step by running: `make install run`
|
The binary will be placed inside a `dist` folder within this repository.
|
||||||
|
|
||||||
### Modifying the API definitions
|
### Running Tests
|
||||||
|
|
||||||
If you are editing the API definitions, generate the manifests such as CRs or CRDs using:
|
```shell
|
||||||
|
make test
|
||||||
```sh
|
|
||||||
make manifests
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**NOTE:** Run `make --help` for more information on all potential `make` targets
|
With coverage:
|
||||||
|
```shell
|
||||||
More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html)
|
make test/coverage
|
||||||
|
```
|
||||||
|
|
||||||
## Security
|
## Security
|
||||||
|
|
||||||
|
|||||||
@@ -1,30 +1,22 @@
|
|||||||
/*
|
/*
|
||||||
MIT License
|
Copyright 2022.
|
||||||
|
|
||||||
Copyright (c) 2020-2022 1Password
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
Unless required by applicable law or agreed to in writing, software
|
||||||
copies or substantial portions of the Software.
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
See the License for the specific language governing permissions and
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
limitations under the License.
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package v1 contains API Schema definitions for the v1 API group
|
// Package v1 contains API Schema definitions for the onepassword v1 API group
|
||||||
// +kubebuilder:object:generate=true
|
//+kubebuilder:object:generate=true
|
||||||
// +groupName=onepassword.com
|
//+groupName=onepassword.onepassword.com
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -34,7 +26,7 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// GroupVersion is group version used to register these objects
|
// GroupVersion is group version used to register these objects
|
||||||
GroupVersion = schema.GroupVersion{Group: "onepassword.com", Version: "v1"}
|
GroupVersion = schema.GroupVersion{Group: "onepassword.onepassword.com", Version: "v1"}
|
||||||
|
|
||||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||||
|
|||||||
@@ -1,25 +1,17 @@
|
|||||||
/*
|
/*
|
||||||
MIT License
|
Copyright 2022.
|
||||||
|
|
||||||
Copyright (c) 2020-2022 1Password
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
Unless required by applicable law or agreed to in writing, software
|
||||||
copies or substantial portions of the Software.
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
See the License for the specific language governing permissions and
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
limitations under the License.
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package v1
|
package v1
|
||||||
@@ -36,35 +28,14 @@ type OnePasswordItemSpec struct {
|
|||||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||||
// Important: Run "make" to regenerate code after modifying this file
|
// Important: Run "make" to regenerate code after modifying this file
|
||||||
|
|
||||||
|
// Foo is an example field of OnePasswordItem. Edit onepassworditem_types.go to remove/update
|
||||||
ItemPath string `json:"itemPath,omitempty"`
|
ItemPath string `json:"itemPath,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type OnePasswordItemConditionType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// OnePasswordItemReady means the Kubernetes secret is ready for use.
|
|
||||||
OnePasswordItemReady OnePasswordItemConditionType = "Ready"
|
|
||||||
)
|
|
||||||
|
|
||||||
type OnePasswordItemCondition struct {
|
|
||||||
// Type of job condition, Completed.
|
|
||||||
Type OnePasswordItemConditionType `json:"type"`
|
|
||||||
// Status of the condition, one of True, False, Unknown.
|
|
||||||
Status metav1.ConditionStatus `json:"status"`
|
|
||||||
// Last time the condition transit from one status to another.
|
|
||||||
// +optional
|
|
||||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
|
||||||
// Human-readable message indicating details about last transition.
|
|
||||||
// +optional
|
|
||||||
Message string `json:"message,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnePasswordItemStatus defines the observed state of OnePasswordItem
|
// OnePasswordItemStatus defines the observed state of OnePasswordItem
|
||||||
type OnePasswordItemStatus struct {
|
type OnePasswordItemStatus struct {
|
||||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||||
// Important: Run "make" to regenerate code after modifying this file
|
// Important: Run "make" to regenerate code after modifying this file
|
||||||
|
|
||||||
Conditions []OnePasswordItemCondition `json:"conditions"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//+kubebuilder:object:root=true
|
//+kubebuilder:object:root=true
|
||||||
@@ -74,9 +45,8 @@ type OnePasswordItemStatus struct {
|
|||||||
type OnePasswordItem struct {
|
type OnePasswordItem struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
|
||||||
// Kubernetes secret type. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types
|
|
||||||
Type string `json:"type,omitempty"`
|
|
||||||
Spec OnePasswordItemSpec `json:"spec,omitempty"`
|
Spec OnePasswordItemSpec `json:"spec,omitempty"`
|
||||||
Status OnePasswordItemStatus `json:"status,omitempty"`
|
Status OnePasswordItemStatus `json:"status,omitempty"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,27 +2,19 @@
|
|||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
MIT License
|
Copyright 2022.
|
||||||
|
|
||||||
Copyright (c) 2020-2022 1Password
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
Unless required by applicable law or agreed to in writing, software
|
||||||
copies or substantial portions of the Software.
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
See the License for the specific language governing permissions and
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
limitations under the License.
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Code generated by controller-gen. DO NOT EDIT.
|
// Code generated by controller-gen. DO NOT EDIT.
|
||||||
@@ -39,7 +31,7 @@ func (in *OnePasswordItem) DeepCopyInto(out *OnePasswordItem) {
|
|||||||
out.TypeMeta = in.TypeMeta
|
out.TypeMeta = in.TypeMeta
|
||||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
out.Spec = in.Spec
|
out.Spec = in.Spec
|
||||||
in.Status.DeepCopyInto(&out.Status)
|
out.Status = in.Status
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItem.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItem.
|
||||||
@@ -60,22 +52,6 @@ func (in *OnePasswordItem) DeepCopyObject() runtime.Object {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *OnePasswordItemCondition) DeepCopyInto(out *OnePasswordItemCondition) {
|
|
||||||
*out = *in
|
|
||||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemCondition.
|
|
||||||
func (in *OnePasswordItemCondition) DeepCopy() *OnePasswordItemCondition {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(OnePasswordItemCondition)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *OnePasswordItemList) DeepCopyInto(out *OnePasswordItemList) {
|
func (in *OnePasswordItemList) DeepCopyInto(out *OnePasswordItemList) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -126,13 +102,6 @@ func (in *OnePasswordItemSpec) DeepCopy() *OnePasswordItemSpec {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *OnePasswordItemStatus) DeepCopyInto(out *OnePasswordItemStatus) {
|
func (in *OnePasswordItemStatus) DeepCopyInto(out *OnePasswordItemStatus) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.Conditions != nil {
|
|
||||||
in, out := &in.Conditions, &out.Conditions
|
|
||||||
*out = make([]OnePasswordItemCondition, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemStatus.
|
||||||
|
|||||||
@@ -1,10 +1,6 @@
|
|||||||
---
|
|
||||||
apiVersion: apiextensions.k8s.io/v1
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
controller-gen.kubebuilder.io/version: v0.9.2
|
|
||||||
creationTimestamp: null
|
|
||||||
name: onepassworditems.onepassword.com
|
name: onepassworditems.onepassword.com
|
||||||
spec:
|
spec:
|
||||||
group: onepassword.com
|
group: onepassword.com
|
||||||
@@ -16,6 +12,8 @@ spec:
|
|||||||
scope: Namespaced
|
scope: Namespaced
|
||||||
versions:
|
versions:
|
||||||
- name: v1
|
- name: v1
|
||||||
|
served: true
|
||||||
|
storage: true
|
||||||
schema:
|
schema:
|
||||||
openAPIV3Schema:
|
openAPIV3Schema:
|
||||||
description: OnePasswordItem is the Schema for the onepassworditems API
|
description: OnePasswordItem is the Schema for the onepassworditems API
|
||||||
@@ -40,38 +38,8 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
status:
|
status:
|
||||||
description: OnePasswordItemStatus defines the observed state of OnePasswordItem
|
description: OnePasswordItemStatus defines the observed state of OnePasswordItem
|
||||||
properties:
|
|
||||||
conditions:
|
|
||||||
items:
|
|
||||||
properties:
|
|
||||||
lastTransitionTime:
|
|
||||||
description: Last time the condition transit from one status
|
|
||||||
to another.
|
|
||||||
format: date-time
|
|
||||||
type: string
|
|
||||||
message:
|
|
||||||
description: Human-readable message indicating details about
|
|
||||||
last transition.
|
|
||||||
type: string
|
|
||||||
status:
|
|
||||||
description: Status of the condition, one of True, False, Unknown.
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
description: Type of job condition, Completed.
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- status
|
|
||||||
- type
|
|
||||||
type: object
|
|
||||||
type: array
|
|
||||||
required:
|
|
||||||
- conditions
|
|
||||||
type: object
|
type: object
|
||||||
type:
|
type:
|
||||||
description: 'Kubernetes secret type. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types'
|
description: 'Kubernetes secret type. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types'
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
served: true
|
|
||||||
storage: true
|
|
||||||
subresources:
|
|
||||||
status: {}
|
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
apiVersion: onepassword.com/v1
|
||||||
|
kind: OnePasswordItem
|
||||||
|
metadata:
|
||||||
|
name: example
|
||||||
|
spec:
|
||||||
|
itemPath: "vaults/<vault_id>/items/<item_id>"
|
||||||
@@ -9,7 +9,7 @@ spec:
|
|||||||
ports:
|
ports:
|
||||||
- port: 8080
|
- port: 8080
|
||||||
name: connect-api
|
name: connect-api
|
||||||
nodePort: 30080
|
nodePort: 31080
|
||||||
- port: 8081
|
- port: 8081
|
||||||
name: connect-sync
|
name: connect-sync
|
||||||
nodePort: 30081
|
nodePort: 31081
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
# since it depends on service name and namespace that are out of this kustomize package.
|
# since it depends on service name and namespace that are out of this kustomize package.
|
||||||
# It should be run by config/default
|
# It should be run by config/default
|
||||||
resources:
|
resources:
|
||||||
- bases/onepassword.com_onepassworditems.yaml
|
- bases/onepassword.onepassword.com_onepassworditems.yaml
|
||||||
#+kubebuilder:scaffold:crdkustomizeresource
|
#+kubebuilder:scaffold:crdkustomizeresource
|
||||||
|
|
||||||
patchesStrategicMerge:
|
patchesStrategicMerge:
|
||||||
|
|||||||
39
config/crd/operator.yaml
Normal file
39
config/crd/operator.yaml
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
spec:
|
||||||
|
serviceAccountName: onepassword-connect-operator
|
||||||
|
containers:
|
||||||
|
- name: onepassword-connect-operator
|
||||||
|
image: 1password/onepassword-operator
|
||||||
|
command: ["/manager"]
|
||||||
|
env:
|
||||||
|
- name: WATCH_NAMESPACE
|
||||||
|
value: "default"
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.name
|
||||||
|
- name: OPERATOR_NAME
|
||||||
|
value: "onepassword-connect-operator"
|
||||||
|
- name: OP_CONNECT_HOST
|
||||||
|
value: "http://onepassword-connect:8080"
|
||||||
|
- name: POLLING_INTERVAL
|
||||||
|
value: "10"
|
||||||
|
- name: OP_CONNECT_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: onepassword-token
|
||||||
|
key: token
|
||||||
|
- name: AUTO_RESTART
|
||||||
|
value: "false"
|
||||||
39
config/crd/operator_multi_namespace_example.yaml
Normal file
39
config/crd/operator_multi_namespace_example.yaml
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
spec:
|
||||||
|
serviceAccountName: onepassword-connect-operator
|
||||||
|
containers:
|
||||||
|
- name: onepassword-connect-operator
|
||||||
|
image: 1password/onepassword-operator
|
||||||
|
command: ["/manager"]
|
||||||
|
env:
|
||||||
|
- name: WATCH_NAMESPACE
|
||||||
|
value: "default,development"
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.name
|
||||||
|
- name: OPERATOR_NAME
|
||||||
|
value: "onepassword-connect-operator"
|
||||||
|
- name: OP_CONNECT_HOST
|
||||||
|
value: "http://onepassword-connect:8080"
|
||||||
|
- name: POLLING_INTERVAL
|
||||||
|
value: "10"
|
||||||
|
- name: OP_CONNECT_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: onepassword-token
|
||||||
|
key: token
|
||||||
|
- name: AUTO_RESTART
|
||||||
|
value: "false"
|
||||||
@@ -3,5 +3,5 @@ apiVersion: apiextensions.k8s.io/v1
|
|||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME
|
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
||||||
name: onepassworditems.onepassword.com
|
name: onepassworditems.onepassword.onepassword.com
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
apiVersion: apiextensions.k8s.io/v1
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
name: onepassworditems.onepassword.com
|
name: onepassworditems.onepassword.onepassword.com
|
||||||
spec:
|
spec:
|
||||||
conversion:
|
conversion:
|
||||||
strategy: Webhook
|
strategy: Webhook
|
||||||
|
|||||||
@@ -1,22 +1,40 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: onepassword-connect-operator-default
|
||||||
|
namespace: default
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
namespace: default
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
creationTimestamp: null
|
||||||
name: manager-role
|
name: onepassword-connect-operator
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- configmaps
|
|
||||||
- endpoints
|
|
||||||
- events
|
|
||||||
- namespaces
|
|
||||||
- persistentvolumeclaims
|
|
||||||
- pods
|
- pods
|
||||||
- secrets
|
|
||||||
- services
|
- services
|
||||||
- services/finalizers
|
- services/finalizers
|
||||||
|
- endpoints
|
||||||
|
- persistentvolumeclaims
|
||||||
|
- events
|
||||||
|
- configmaps
|
||||||
|
- secrets
|
||||||
|
- namespaces
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
- create
|
||||||
- delete
|
- delete
|
||||||
@@ -25,17 +43,11 @@ rules:
|
|||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- pods
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- apps
|
- apps
|
||||||
resources:
|
resources:
|
||||||
- daemonsets
|
|
||||||
- deployments
|
- deployments
|
||||||
|
- daemonsets
|
||||||
- replicasets
|
- replicasets
|
||||||
- statefulsets
|
- statefulsets
|
||||||
verbs:
|
verbs:
|
||||||
@@ -47,30 +59,12 @@ rules:
|
|||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- apps
|
- monitoring.coreos.com
|
||||||
resources:
|
resources:
|
||||||
- deployments
|
- servicemonitors
|
||||||
verbs:
|
verbs:
|
||||||
|
- get
|
||||||
- create
|
- create
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- apps
|
|
||||||
resources:
|
|
||||||
- deployments
|
|
||||||
- replicasets
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiGroups:
|
|
||||||
- apps
|
|
||||||
resources:
|
|
||||||
- deployments/finalizers
|
|
||||||
verbs:
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- apps
|
- apps
|
||||||
resourceNames:
|
resourceNames:
|
||||||
@@ -80,19 +74,17 @@ rules:
|
|||||||
verbs:
|
verbs:
|
||||||
- update
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- apps
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- deployments/status
|
- pods
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- monitoring.coreos.com
|
- apps
|
||||||
resources:
|
resources:
|
||||||
- servicemonitors
|
- replicasets
|
||||||
|
- deployments
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
|
||||||
- get
|
- get
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- onepassword.com
|
- onepassword.com
|
||||||
@@ -106,29 +98,3 @@ rules:
|
|||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
|
||||||
- onepassword.com
|
|
||||||
resources:
|
|
||||||
- onepassworditems
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- onepassword.com
|
|
||||||
resources:
|
|
||||||
- onepassworditems/finalizers
|
|
||||||
verbs:
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
|
||||||
- onepassword.com
|
|
||||||
resources:
|
|
||||||
- onepassworditems/status
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
114
config/crd/permissions_multi_namespace_example.yaml
Normal file
114
config/crd/permissions_multi_namespace_example.yaml
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: onepassword-connect-operator-default
|
||||||
|
namespace: default
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
namespace: default
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: onepassword-connect-operator-development
|
||||||
|
namespace: development
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
namespace: default
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
name: onepassword-connect-operator
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- services
|
||||||
|
- services/finalizers
|
||||||
|
- endpoints
|
||||||
|
- persistentvolumeclaims
|
||||||
|
- events
|
||||||
|
- configmaps
|
||||||
|
- secrets
|
||||||
|
- namespaces
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- apps
|
||||||
|
resources:
|
||||||
|
- deployments
|
||||||
|
- daemonsets
|
||||||
|
- replicasets
|
||||||
|
- statefulsets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- monitoring.coreos.com
|
||||||
|
resources:
|
||||||
|
- servicemonitors
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- create
|
||||||
|
- apiGroups:
|
||||||
|
- apps
|
||||||
|
resourceNames:
|
||||||
|
- onepassword-connect-operator
|
||||||
|
resources:
|
||||||
|
- deployments/finalizers
|
||||||
|
verbs:
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- apps
|
||||||
|
resources:
|
||||||
|
- replicasets
|
||||||
|
- deployments
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- onepassword.com
|
||||||
|
resources:
|
||||||
|
- '*'
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
@@ -1,17 +1,18 @@
|
|||||||
|
# Adds namespace to all resources.
|
||||||
|
namespace: onepassword-operator-new-system
|
||||||
|
|
||||||
# Value of this field is prepended to the
|
# Value of this field is prepended to the
|
||||||
# names of all resources, e.g. a deployment named
|
# names of all resources, e.g. a deployment named
|
||||||
# "wordpress" becomes "alices-wordpress".
|
# "wordpress" becomes "alices-wordpress".
|
||||||
# Note that it should also match with the prefix (text before '-') of the namespace
|
# Note that it should also match with the prefix (text before '-') of the namespace
|
||||||
# field above.
|
# field above.
|
||||||
# namePrefix: onepassword-connect-
|
namePrefix: onepassword-operator-new-
|
||||||
|
|
||||||
# Labels to add to all resources and selectors.
|
# Labels to add to all resources and selectors.
|
||||||
#labels:
|
#commonLabels:
|
||||||
#- includeSelectors: true
|
# someName: someValue
|
||||||
# pairs:
|
|
||||||
# someName: someValue
|
|
||||||
|
|
||||||
resources:
|
bases:
|
||||||
- ../crd
|
- ../crd
|
||||||
- ../rbac
|
- ../rbac
|
||||||
- ../manager
|
- ../manager
|
||||||
@@ -42,102 +43,32 @@ patchesStrategicMerge:
|
|||||||
# 'CERTMANAGER' needs to be enabled to use ca injection
|
# 'CERTMANAGER' needs to be enabled to use ca injection
|
||||||
#- webhookcainjection_patch.yaml
|
#- webhookcainjection_patch.yaml
|
||||||
|
|
||||||
|
# the following config is for teaching kustomize how to do var substitution
|
||||||
|
vars:
|
||||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||||
# Uncomment the following replacements to add the cert-manager CA injection annotations
|
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
|
||||||
#replacements:
|
# objref:
|
||||||
# - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs
|
# kind: Certificate
|
||||||
# kind: Certificate
|
# group: cert-manager.io
|
||||||
# group: cert-manager.io
|
# version: v1
|
||||||
# version: v1
|
# name: serving-cert # this name should match the one in certificate.yaml
|
||||||
# name: serving-cert # this name should match the one in certificate.yaml
|
# fieldref:
|
||||||
# fieldPath: .metadata.namespace # namespace of the certificate CR
|
# fieldpath: metadata.namespace
|
||||||
# targets:
|
#- name: CERTIFICATE_NAME
|
||||||
# - select:
|
# objref:
|
||||||
# kind: ValidatingWebhookConfiguration
|
# kind: Certificate
|
||||||
# fieldPaths:
|
# group: cert-manager.io
|
||||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
# version: v1
|
||||||
# options:
|
# name: serving-cert # this name should match the one in certificate.yaml
|
||||||
# delimiter: '/'
|
#- name: SERVICE_NAMESPACE # namespace of the service
|
||||||
# index: 0
|
# objref:
|
||||||
# create: true
|
# kind: Service
|
||||||
# - select:
|
# version: v1
|
||||||
# kind: MutatingWebhookConfiguration
|
# name: webhook-service
|
||||||
# fieldPaths:
|
# fieldref:
|
||||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
# fieldpath: metadata.namespace
|
||||||
# options:
|
#- name: SERVICE_NAME
|
||||||
# delimiter: '/'
|
# objref:
|
||||||
# index: 0
|
# kind: Service
|
||||||
# create: true
|
# version: v1
|
||||||
# - select:
|
# name: webhook-service
|
||||||
# kind: CustomResourceDefinition
|
|
||||||
# fieldPaths:
|
|
||||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
|
||||||
# options:
|
|
||||||
# delimiter: '/'
|
|
||||||
# index: 0
|
|
||||||
# create: true
|
|
||||||
# - source:
|
|
||||||
# kind: Certificate
|
|
||||||
# group: cert-manager.io
|
|
||||||
# version: v1
|
|
||||||
# name: serving-cert # this name should match the one in certificate.yaml
|
|
||||||
# fieldPath: .metadata.name
|
|
||||||
# targets:
|
|
||||||
# - select:
|
|
||||||
# kind: ValidatingWebhookConfiguration
|
|
||||||
# fieldPaths:
|
|
||||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
|
||||||
# options:
|
|
||||||
# delimiter: '/'
|
|
||||||
# index: 1
|
|
||||||
# create: true
|
|
||||||
# - select:
|
|
||||||
# kind: MutatingWebhookConfiguration
|
|
||||||
# fieldPaths:
|
|
||||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
|
||||||
# options:
|
|
||||||
# delimiter: '/'
|
|
||||||
# index: 1
|
|
||||||
# create: true
|
|
||||||
# - select:
|
|
||||||
# kind: CustomResourceDefinition
|
|
||||||
# fieldPaths:
|
|
||||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
|
||||||
# options:
|
|
||||||
# delimiter: '/'
|
|
||||||
# index: 1
|
|
||||||
# create: true
|
|
||||||
# - source: # Add cert-manager annotation to the webhook Service
|
|
||||||
# kind: Service
|
|
||||||
# version: v1
|
|
||||||
# name: webhook-service
|
|
||||||
# fieldPath: .metadata.name # namespace of the service
|
|
||||||
# targets:
|
|
||||||
# - select:
|
|
||||||
# kind: Certificate
|
|
||||||
# group: cert-manager.io
|
|
||||||
# version: v1
|
|
||||||
# fieldPaths:
|
|
||||||
# - .spec.dnsNames.0
|
|
||||||
# - .spec.dnsNames.1
|
|
||||||
# options:
|
|
||||||
# delimiter: '.'
|
|
||||||
# index: 0
|
|
||||||
# create: true
|
|
||||||
# - source:
|
|
||||||
# kind: Service
|
|
||||||
# version: v1
|
|
||||||
# name: webhook-service
|
|
||||||
# fieldPath: .metadata.namespace # namespace of the service
|
|
||||||
# targets:
|
|
||||||
# - select:
|
|
||||||
# kind: Certificate
|
|
||||||
# group: cert-manager.io
|
|
||||||
# version: v1
|
|
||||||
# fieldPaths:
|
|
||||||
# - .spec.dnsNames.0
|
|
||||||
# - .spec.dnsNames.1
|
|
||||||
# options:
|
|
||||||
# delimiter: '.'
|
|
||||||
# index: 1
|
|
||||||
# create: true
|
|
||||||
|
|||||||
@@ -3,19 +3,14 @@
|
|||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: onepassword-connect-operator
|
name: controller-manager
|
||||||
namespace: system
|
namespace: system
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: kube-rbac-proxy
|
- name: kube-rbac-proxy
|
||||||
securityContext:
|
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- "ALL"
|
|
||||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
|
|
||||||
args:
|
args:
|
||||||
- "--secure-listen-address=0.0.0.0:8443"
|
- "--secure-listen-address=0.0.0.0:8443"
|
||||||
- "--upstream=http://127.0.0.1:8080/"
|
- "--upstream=http://127.0.0.1:8080/"
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: onepassword-connect-operator
|
name: controller-manager
|
||||||
namespace: system
|
namespace: system
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
|
|||||||
@@ -9,13 +9,3 @@ webhook:
|
|||||||
leaderElection:
|
leaderElection:
|
||||||
leaderElect: true
|
leaderElect: true
|
||||||
resourceName: c26807fd.onepassword.com
|
resourceName: c26807fd.onepassword.com
|
||||||
# leaderElectionReleaseOnCancel defines if the leader should step down volume
|
|
||||||
# when the Manager ends. This requires the binary to immediately end when the
|
|
||||||
# Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
|
|
||||||
# speeds up voluntary leader transitions as the new leader don't have to wait
|
|
||||||
# LeaseDuration time first.
|
|
||||||
# In the default scaffold provided, the program ends immediately after
|
|
||||||
# the manager stops, so would be fine to enable this option. However,
|
|
||||||
# if you are doing or is intended to do any operation such as perform cleanups
|
|
||||||
# after the manager stops then its usage might be unsafe.
|
|
||||||
# leaderElectionReleaseOnCancel: true
|
|
||||||
|
|||||||
@@ -1,63 +1,40 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
control-plane: controller-manager
|
||||||
|
name: system
|
||||||
|
---
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: onepassword-connect-operator
|
name: controller-manager
|
||||||
namespace: system
|
namespace: system
|
||||||
labels:
|
labels:
|
||||||
name: onepassword-connect-operator
|
control-plane: controller-manager
|
||||||
spec:
|
spec:
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
name: onepassword-connect-operator
|
control-plane: controller-manager
|
||||||
replicas: 1
|
replicas: 1
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
kubectl.kubernetes.io/default-container: manager
|
kubectl.kubernetes.io/default-container: manager
|
||||||
labels:
|
labels:
|
||||||
name: onepassword-connect-operator
|
control-plane: controller-manager
|
||||||
spec:
|
spec:
|
||||||
securityContext:
|
securityContext:
|
||||||
runAsNonRoot: true
|
runAsNonRoot: true
|
||||||
# TODO(user): For common cases that do not require escalating privileges
|
|
||||||
# it is recommended to ensure that all your Pods/Containers are restrictive.
|
|
||||||
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
|
|
||||||
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
|
|
||||||
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
|
|
||||||
# seccompProfile:
|
|
||||||
# type: RuntimeDefault
|
|
||||||
containers:
|
containers:
|
||||||
- command:
|
- command:
|
||||||
- /manager
|
- /manager
|
||||||
args:
|
args:
|
||||||
- --leader-elect
|
- --leader-elect
|
||||||
image: 1password/onepassword-operator:latest
|
image: controller:latest
|
||||||
name: manager
|
name: manager
|
||||||
env:
|
|
||||||
- name: WATCH_NAMESPACE
|
|
||||||
value: "default"
|
|
||||||
- name: POD_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
- name: OPERATOR_NAME
|
|
||||||
value: "onepassword-connect-operator"
|
|
||||||
- name: OP_CONNECT_HOST
|
|
||||||
value: "http://onepassword-connect:8080"
|
|
||||||
- name: POLLING_INTERVAL
|
|
||||||
value: "10"
|
|
||||||
- name: OP_CONNECT_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: onepassword-token
|
|
||||||
key: token
|
|
||||||
- name: AUTO_RESTART
|
|
||||||
value: "false"
|
|
||||||
securityContext:
|
securityContext:
|
||||||
allowPrivilegeEscalation: false
|
allowPrivilegeEscalation: false
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- "ALL"
|
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /healthz
|
path: /healthz
|
||||||
@@ -79,5 +56,5 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 64Mi
|
memory: 64Mi
|
||||||
serviceAccountName: onepassword-connect-operator
|
serviceAccountName: controller-manager
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: 10
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# These resources constitute the fully configured set of manifests
|
# These resources constitute the fully configured set of manifests
|
||||||
# used to generate the 'manifests/' directory in a bundle.
|
# used to generate the 'manifests/' directory in a bundle.
|
||||||
resources:
|
resources:
|
||||||
- bases/onepassword-operator.clusterserviceversion.yaml
|
- bases/onepassword-operator-new.clusterserviceversion.yaml
|
||||||
- ../default
|
- ../default
|
||||||
- ../samples
|
- ../samples
|
||||||
- ../scorecard
|
- ../scorecard
|
||||||
@@ -20,8 +20,7 @@ resources:
|
|||||||
# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs.
|
# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs.
|
||||||
# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment.
|
# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment.
|
||||||
# - op: remove
|
# - op: remove
|
||||||
|
# path: /spec/template/spec/containers/1/volumeMounts/0
|
||||||
# path: /spec/template/spec/containers/0/volumeMounts/0
|
|
||||||
# # Remove the "cert" volume, since OLM will create and mount a set of certs.
|
# # Remove the "cert" volume, since OLM will create and mount a set of certs.
|
||||||
# # Update the indices in this path if adding or removing volumes in the manager's Deployment.
|
# # Update the indices in this path if adding or removing volumes in the manager's Deployment.
|
||||||
# - op: remove
|
# - op: remove
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ apiVersion: monitoring.coreos.com/v1
|
|||||||
kind: ServiceMonitor
|
kind: ServiceMonitor
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
name: onepassword-connect-operator
|
control-plane: controller-manager
|
||||||
name: onepassword-connect-operator-metrics-monitor
|
name: controller-manager-metrics-monitor
|
||||||
namespace: system
|
namespace: system
|
||||||
spec:
|
spec:
|
||||||
endpoints:
|
endpoints:
|
||||||
@@ -17,4 +17,4 @@ spec:
|
|||||||
insecureSkipVerify: true
|
insecureSkipVerify: true
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
name: onepassword-connect-operator
|
control-plane: controller-manager
|
||||||
|
|||||||
@@ -8,5 +8,5 @@ roleRef:
|
|||||||
name: proxy-role
|
name: proxy-role
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: onepassword-connect-operator
|
name: controller-manager
|
||||||
namespace: system
|
namespace: system
|
||||||
|
|||||||
@@ -2,8 +2,8 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
name: onepassword-connect-operator
|
control-plane: controller-manager
|
||||||
name: onepassword-connect-operator-metrics-service
|
name: controller-manager-metrics-service
|
||||||
namespace: system
|
namespace: system
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
@@ -12,4 +12,4 @@ spec:
|
|||||||
protocol: TCP
|
protocol: TCP
|
||||||
targetPort: https
|
targetPort: https
|
||||||
selector:
|
selector:
|
||||||
name: onepassword-connect-operator
|
control-plane: controller-manager
|
||||||
|
|||||||
@@ -8,5 +8,5 @@ roleRef:
|
|||||||
name: leader-election-role
|
name: leader-election-role
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: onepassword-connect-operator
|
name: controller-manager
|
||||||
namespace: system
|
namespace: system
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ metadata:
|
|||||||
name: onepassworditem-editor-role
|
name: onepassworditem-editor-role
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- onepassword.com
|
- onepassword.onepassword.com
|
||||||
resources:
|
resources:
|
||||||
- onepassworditems
|
- onepassworditems
|
||||||
verbs:
|
verbs:
|
||||||
@@ -17,7 +17,7 @@ rules:
|
|||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- onepassword.com
|
- onepassword.onepassword.com
|
||||||
resources:
|
resources:
|
||||||
- onepassworditems/status
|
- onepassworditems/status
|
||||||
verbs:
|
verbs:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ metadata:
|
|||||||
name: onepassworditem-viewer-role
|
name: onepassworditem-viewer-role
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- onepassword.com
|
- onepassword.onepassword.com
|
||||||
resources:
|
resources:
|
||||||
- onepassworditems
|
- onepassworditems
|
||||||
verbs:
|
verbs:
|
||||||
@@ -13,7 +13,7 @@ rules:
|
|||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- onepassword.com
|
- onepassword.onepassword.com
|
||||||
resources:
|
resources:
|
||||||
- onepassworditems/status
|
- onepassworditems/status
|
||||||
verbs:
|
verbs:
|
||||||
|
|||||||
@@ -8,5 +8,5 @@ roleRef:
|
|||||||
name: manager-role
|
name: manager-role
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: onepassword-connect-operator
|
name: controller-manager
|
||||||
namespace: system
|
namespace: system
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: onepassword-connect-operator
|
name: controller-manager
|
||||||
namespace: system
|
namespace: system
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
apiVersion: onepassword.com/v1
|
apiVersion: onepassword.onepassword.com/v1
|
||||||
kind: OnePasswordItem
|
kind: OnePasswordItem
|
||||||
metadata:
|
metadata:
|
||||||
name: onepassworditem-sample
|
name: onepassworditem-sample
|
||||||
spec:
|
spec:
|
||||||
itemPath: "vaults/<vault_id>/items/<item_id>"
|
# TODO(user): Add fields here
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
entrypoint:
|
entrypoint:
|
||||||
- scorecard-test
|
- scorecard-test
|
||||||
- basic-check-spec
|
- basic-check-spec
|
||||||
image: quay.io/operator-framework/scorecard-test:v1.23.0
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
labels:
|
labels:
|
||||||
suite: basic
|
suite: basic
|
||||||
test: basic-check-spec-test
|
test: basic-check-spec-test
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
entrypoint:
|
entrypoint:
|
||||||
- scorecard-test
|
- scorecard-test
|
||||||
- olm-bundle-validation
|
- olm-bundle-validation
|
||||||
image: quay.io/operator-framework/scorecard-test:v1.23.0
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
labels:
|
labels:
|
||||||
suite: olm
|
suite: olm
|
||||||
test: olm-bundle-validation-test
|
test: olm-bundle-validation-test
|
||||||
@@ -14,7 +14,7 @@
|
|||||||
entrypoint:
|
entrypoint:
|
||||||
- scorecard-test
|
- scorecard-test
|
||||||
- olm-crds-have-validation
|
- olm-crds-have-validation
|
||||||
image: quay.io/operator-framework/scorecard-test:v1.23.0
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
labels:
|
labels:
|
||||||
suite: olm
|
suite: olm
|
||||||
test: olm-crds-have-validation-test
|
test: olm-crds-have-validation-test
|
||||||
@@ -24,7 +24,7 @@
|
|||||||
entrypoint:
|
entrypoint:
|
||||||
- scorecard-test
|
- scorecard-test
|
||||||
- olm-crds-have-resources
|
- olm-crds-have-resources
|
||||||
image: quay.io/operator-framework/scorecard-test:v1.23.0
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
labels:
|
labels:
|
||||||
suite: olm
|
suite: olm
|
||||||
test: olm-crds-have-resources-test
|
test: olm-crds-have-resources-test
|
||||||
@@ -34,7 +34,7 @@
|
|||||||
entrypoint:
|
entrypoint:
|
||||||
- scorecard-test
|
- scorecard-test
|
||||||
- olm-spec-descriptors
|
- olm-spec-descriptors
|
||||||
image: quay.io/operator-framework/scorecard-test:v1.23.0
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
labels:
|
labels:
|
||||||
suite: olm
|
suite: olm
|
||||||
test: olm-spec-descriptors-test
|
test: olm-spec-descriptors-test
|
||||||
@@ -44,7 +44,7 @@
|
|||||||
entrypoint:
|
entrypoint:
|
||||||
- scorecard-test
|
- scorecard-test
|
||||||
- olm-status-descriptors
|
- olm-status-descriptors
|
||||||
image: quay.io/operator-framework/scorecard-test:v1.23.0
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
labels:
|
labels:
|
||||||
suite: olm
|
suite: olm
|
||||||
test: olm-status-descriptors-test
|
test: olm-status-descriptors-test
|
||||||
|
|||||||
@@ -1,41 +1,16 @@
|
|||||||
/*
|
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2020-2022 1Password
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package controllers
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/connect"
|
|
||||||
|
|
||||||
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
||||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||||
"github.com/1Password/onepassword-operator/pkg/utils"
|
"github.com/1Password/onepassword-operator/pkg/utils"
|
||||||
|
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/1Password/connect-sdk-go/connect"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
@@ -44,49 +19,103 @@ import (
|
|||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||||
)
|
)
|
||||||
|
|
||||||
var logDeployment = logf.Log.WithName("controller_deployment")
|
var deploymentLog = logf.Log.WithName("controller_deployment")
|
||||||
|
var finalizer = "onepassword.com/finalizer.secret"
|
||||||
|
|
||||||
// DeploymentReconciler reconciles a Deployment object
|
const annotationRegExpString = "^operator.1password.io\\/[a-zA-Z\\.]+"
|
||||||
type DeploymentReconciler struct {
|
|
||||||
client.Client
|
func Add(mgr manager.Manager, opConnectClient connect.Client) error {
|
||||||
Scheme *runtime.Scheme
|
return add(mgr, newReconciler(mgr, opConnectClient))
|
||||||
OpConnectClient connect.Client
|
|
||||||
OpAnnotationRegExp *regexp.Regexp
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
|
func newReconciler(mgr manager.Manager, opConnectClient connect.Client) *ReconcileDeployment {
|
||||||
//+kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get;update;patch
|
r, _ := regexp.Compile(annotationRegExpString)
|
||||||
//+kubebuilder:rbac:groups=apps,resources=deployments/finalizers,verbs=update
|
return &ReconcileDeployment{
|
||||||
|
opAnnotationRegExp: r,
|
||||||
|
kubeClient: mgr.GetClient(),
|
||||||
|
scheme: mgr.GetScheme(),
|
||||||
|
opConnectClient: opConnectClient,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||||
// move the current state of the cluster closer to the desired state.
|
c, err := controller.New("deployment-controller", mgr, controller.Options{Reconciler: r})
|
||||||
// TODO(user): Modify the Reconcile function to compare the state specified by
|
if err != nil {
|
||||||
// the OnePasswordItem object against the actual cluster state, and then
|
return err
|
||||||
// perform operations to make the cluster state reflect the state specified by
|
}
|
||||||
// the user.
|
|
||||||
//
|
// Watch for changes to primary resource Deployment
|
||||||
// For more details, check Reconcile and its Result here:
|
err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{})
|
||||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile
|
if err != nil {
|
||||||
func (r *DeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
return err
|
||||||
reqLogger := logDeployment.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name)
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ reconcile.Reconciler = &ReconcileDeployment{}
|
||||||
|
|
||||||
|
type ReconcileDeployment struct {
|
||||||
|
opAnnotationRegExp *regexp.Regexp
|
||||||
|
kubeClient client.Client
|
||||||
|
scheme *runtime.Scheme
|
||||||
|
opConnectClient connect.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReconcileDeployment) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
|
||||||
|
c, err := controller.New("deployment-controller", mgr, controller.Options{Reconciler: r})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch for changes to primary resource Deployment
|
||||||
|
err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
// TODO figure out what to do with this code.
|
||||||
|
// return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
// For(&appsv1.Deployment{}).
|
||||||
|
// Complete(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReconcileDeployment) test() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reconcile reads that state of the cluster for a Deployment object and makes changes based on the state read
|
||||||
|
// and what is in the Deployment.Spec
|
||||||
|
// Note:
|
||||||
|
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
|
||||||
|
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
|
||||||
|
func (r *ReconcileDeployment) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||||
|
reqLogger := deploymentLog.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||||
reqLogger.Info("Reconciling Deployment")
|
reqLogger.Info("Reconciling Deployment")
|
||||||
|
|
||||||
deployment := &appsv1.Deployment{}
|
deployment := &appsv1.Deployment{}
|
||||||
err := r.Get(context.Background(), req.NamespacedName, deployment)
|
err := r.kubeClient.Get(ctx, request.NamespacedName, deployment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
return reconcile.Result{}, nil
|
return reconcile.Result{}, nil
|
||||||
}
|
}
|
||||||
return ctrl.Result{}, err
|
return reconcile.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
annotations, annotationsFound := op.GetAnnotationsForDeployment(deployment, r.OpAnnotationRegExp)
|
annotations, annotationsFound := op.GetAnnotationsForDeployment(deployment, r.opAnnotationRegExp)
|
||||||
if !annotationsFound {
|
if !annotationsFound {
|
||||||
reqLogger.Info("No 1Password Annotations found")
|
reqLogger.Info("No 1Password Annotations found")
|
||||||
return ctrl.Result{}, nil
|
return reconcile.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//If the deployment is not being deleted
|
//If the deployment is not being deleted
|
||||||
@@ -95,15 +124,15 @@ func (r *DeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||||||
// This is so we can handle cleanup of associated secrets properly
|
// This is so we can handle cleanup of associated secrets properly
|
||||||
if !utils.ContainsString(deployment.ObjectMeta.Finalizers, finalizer) {
|
if !utils.ContainsString(deployment.ObjectMeta.Finalizers, finalizer) {
|
||||||
deployment.ObjectMeta.Finalizers = append(deployment.ObjectMeta.Finalizers, finalizer)
|
deployment.ObjectMeta.Finalizers = append(deployment.ObjectMeta.Finalizers, finalizer)
|
||||||
if err := r.Update(context.Background(), deployment); err != nil {
|
if err := r.kubeClient.Update(context.Background(), deployment); err != nil {
|
||||||
return reconcile.Result{}, err
|
return reconcile.Result{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Handles creation or updating secrets for deployment if needed
|
// Handles creation or updating secrets for deployment if needed
|
||||||
if err := r.handleApplyingDeployment(deployment, deployment.Namespace, annotations, req); err != nil {
|
if err := r.HandleApplyingDeployment(deployment, deployment.Namespace, annotations, request); err != nil {
|
||||||
return ctrl.Result{}, err
|
return reconcile.Result{}, err
|
||||||
}
|
}
|
||||||
return ctrl.Result{}, nil
|
return reconcile.Result{}, nil
|
||||||
}
|
}
|
||||||
// The deployment has been marked for deletion. If the one password
|
// The deployment has been marked for deletion. If the one password
|
||||||
// finalizer is found there are cleanup tasks to perform
|
// finalizer is found there are cleanup tasks to perform
|
||||||
@@ -117,17 +146,10 @@ func (r *DeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||||||
return reconcile.Result{}, err
|
return reconcile.Result{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ctrl.Result{}, nil
|
return reconcile.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetupWithManager sets up the controller with the Manager.
|
func (r *ReconcileDeployment) cleanupKubernetesSecretForDeployment(secretName string, deletedDeployment *appsv1.Deployment) error {
|
||||||
func (r *DeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
|
||||||
For(&appsv1.Deployment{}).
|
|
||||||
Complete(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *DeploymentReconciler) cleanupKubernetesSecretForDeployment(secretName string, deletedDeployment *appsv1.Deployment) error {
|
|
||||||
kubernetesSecret := &corev1.Secret{}
|
kubernetesSecret := &corev1.Secret{}
|
||||||
kubernetesSecret.ObjectMeta.Name = secretName
|
kubernetesSecret.ObjectMeta.Name = secretName
|
||||||
kubernetesSecret.ObjectMeta.Namespace = deletedDeployment.Namespace
|
kubernetesSecret.ObjectMeta.Namespace = deletedDeployment.Namespace
|
||||||
@@ -144,7 +166,7 @@ func (r *DeploymentReconciler) cleanupKubernetesSecretForDeployment(secretName s
|
|||||||
|
|
||||||
// Only delete the associated kubernetes secret if it is not being used by other deployments
|
// Only delete the associated kubernetes secret if it is not being used by other deployments
|
||||||
if !multipleDeploymentsUsingSecret {
|
if !multipleDeploymentsUsingSecret {
|
||||||
if err := r.Delete(context.Background(), kubernetesSecret); err != nil {
|
if err := r.kubeClient.Delete(context.Background(), kubernetesSecret); err != nil {
|
||||||
if !errors.IsNotFound(err) {
|
if !errors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -153,15 +175,15 @@ func (r *DeploymentReconciler) cleanupKubernetesSecretForDeployment(secretName s
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DeploymentReconciler) areMultipleDeploymentsUsingSecret(updatedSecrets map[string]*corev1.Secret, deletedDeployment appsv1.Deployment) (bool, error) {
|
func (r *ReconcileDeployment) areMultipleDeploymentsUsingSecret(updatedSecrets map[string]*corev1.Secret, deletedDeployment appsv1.Deployment) (bool, error) {
|
||||||
deployments := &appsv1.DeploymentList{}
|
deployments := &appsv1.DeploymentList{}
|
||||||
opts := []client.ListOption{
|
opts := []client.ListOption{
|
||||||
client.InNamespace(deletedDeployment.Namespace),
|
client.InNamespace(deletedDeployment.Namespace),
|
||||||
}
|
}
|
||||||
|
|
||||||
err := r.List(context.Background(), deployments, opts...)
|
err := r.kubeClient.List(context.Background(), deployments, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logDeployment.Error(err, "Failed to list kubernetes deployments")
|
deploymentLog.Error(err, "Failed to list kubernetes deployments")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -175,30 +197,30 @@ func (r *DeploymentReconciler) areMultipleDeploymentsUsingSecret(updatedSecrets
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DeploymentReconciler) removeOnePasswordFinalizerFromDeployment(deployment *appsv1.Deployment) error {
|
func (r *ReconcileDeployment) removeOnePasswordFinalizerFromDeployment(deployment *appsv1.Deployment) error {
|
||||||
deployment.ObjectMeta.Finalizers = utils.RemoveString(deployment.ObjectMeta.Finalizers, finalizer)
|
deployment.ObjectMeta.Finalizers = utils.RemoveString(deployment.ObjectMeta.Finalizers, finalizer)
|
||||||
return r.Update(context.Background(), deployment)
|
return r.kubeClient.Update(context.Background(), deployment)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DeploymentReconciler) handleApplyingDeployment(deployment *appsv1.Deployment, namespace string, annotations map[string]string, request reconcile.Request) error {
|
func (r *ReconcileDeployment) HandleApplyingDeployment(deployment *appsv1.Deployment, namespace string, annotations map[string]string, request reconcile.Request) error {
|
||||||
reqLog := logDeployment.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
reqLog := deploymentLog.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||||
|
|
||||||
secretName := annotations[op.NameAnnotation]
|
secretName := annotations[op.NameAnnotation]
|
||||||
secretLabels := map[string]string(nil)
|
secretLabels := map[string]string(nil)
|
||||||
secretType := string(corev1.SecretTypeOpaque)
|
secretType := ""
|
||||||
|
|
||||||
if len(secretName) == 0 {
|
if len(secretName) == 0 {
|
||||||
reqLog.Info("No 'item-name' annotation set. 'item-path' and 'item-name' must be set as annotations to add new secret.")
|
reqLog.Info("No 'item-name' annotation set. 'item-path' and 'item-name' must be set as annotations to add new secret.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
item, err := op.GetOnePasswordItemByPath(r.OpConnectClient, annotations[op.ItemPathAnnotation])
|
item, err := op.GetOnePasswordItemByPath(r.opConnectClient, annotations[op.ItemPathAnnotation])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to retrieve item: %v", err)
|
return fmt.Errorf("Failed to retrieve item: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create owner reference.
|
// Create owner reference.
|
||||||
gvk, err := apiutil.GVKForObject(deployment, r.Scheme)
|
gvk, err := apiutil.GVKForObject(deployment, r.scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not to retrieve group version kind: %v", err)
|
return fmt.Errorf("could not to retrieve group version kind: %v", err)
|
||||||
}
|
}
|
||||||
@@ -209,5 +231,5 @@ func (r *DeploymentReconciler) handleApplyingDeployment(deployment *appsv1.Deplo
|
|||||||
UID: deployment.GetUID(),
|
UID: deployment.GetUID(),
|
||||||
}
|
}
|
||||||
|
|
||||||
return kubeSecrets.CreateKubernetesSecretFromItem(r.Client, secretName, namespace, item, annotations[op.RestartDeploymentsAnnotation], secretLabels, secretType, ownerRef)
|
return kubeSecrets.CreateKubernetesSecretFromItem(r.kubeClient, secretName, namespace, item, annotations[op.RestartDeploymentsAnnotation], secretLabels, secretType, annotations, ownerRef)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,418 +0,0 @@
|
|||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"github.com/1Password/connect-sdk-go/onepassword"
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/mocks"
|
|
||||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
||||||
|
|
||||||
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
deploymentKind = "Deployment"
|
|
||||||
deploymentAPIVersion = "v1"
|
|
||||||
deploymentName = "test-deployment"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ = Describe("Deployment controller", func() {
|
|
||||||
var ctx context.Context
|
|
||||||
var deploymentKey types.NamespacedName
|
|
||||||
var secretKey types.NamespacedName
|
|
||||||
var deploymentResource *appsv1.Deployment
|
|
||||||
createdSecret := &v1.Secret{}
|
|
||||||
|
|
||||||
makeDeployment := func() {
|
|
||||||
ctx = context.Background()
|
|
||||||
|
|
||||||
deploymentKey = types.NamespacedName{
|
|
||||||
Name: deploymentName,
|
|
||||||
Namespace: namespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
secretKey = types.NamespacedName{
|
|
||||||
Name: item1.Name,
|
|
||||||
Namespace: namespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
By("Deploying a pod with proper annotations successfully")
|
|
||||||
deploymentResource = &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: deploymentKey.Name,
|
|
||||||
Namespace: deploymentKey.Namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.ItemPathAnnotation: item1.Path,
|
|
||||||
op.NameAnnotation: item1.Name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{"app": deploymentName},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: deploymentName,
|
|
||||||
Image: "eu.gcr.io/kyma-project/example/http-db-service:0.0.6",
|
|
||||||
ImagePullPolicy: "IfNotPresent",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: map[string]string{"app": deploymentName},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
Expect(k8sClient.Create(ctx, deploymentResource)).Should(Succeed())
|
|
||||||
|
|
||||||
By("Creating the K8s secret successfully")
|
|
||||||
time.Sleep(time.Millisecond * 100)
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, secretKey, createdSecret)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(createdSecret.Data).Should(Equal(item1.SecretData))
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanK8sResources := func() {
|
|
||||||
// failed test runs that don't clean up leave resources behind.
|
|
||||||
err := k8sClient.DeleteAllOf(context.Background(), &onepasswordv1.OnePasswordItem{}, client.InNamespace(namespace))
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
err = k8sClient.DeleteAllOf(context.Background(), &v1.Secret{}, client.InNamespace(namespace))
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
err = k8sClient.DeleteAllOf(context.Background(), &appsv1.Deployment{}, client.InNamespace(namespace))
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
mockGetItemFunc := func() {
|
|
||||||
mocks.DoGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
item := onepassword.Item{}
|
|
||||||
item.Fields = []*onepassword.ItemField{}
|
|
||||||
for k, v := range item1.Data {
|
|
||||||
item.Fields = append(item.Fields, &onepassword.ItemField{Label: k, Value: v})
|
|
||||||
}
|
|
||||||
item.Version = item1.Version
|
|
||||||
item.Vault.ID = vaultUUID
|
|
||||||
item.ID = uuid
|
|
||||||
return &item, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
cleanK8sResources()
|
|
||||||
mockGetItemFunc()
|
|
||||||
time.Sleep(time.Second) // TODO: can we achieve that with ginkgo?
|
|
||||||
makeDeployment()
|
|
||||||
})
|
|
||||||
|
|
||||||
Context("Deployment with secrets from 1Password", func() {
|
|
||||||
It("Should delete secret if deployment is deleted", func() {
|
|
||||||
By("Deleting the pod")
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &appsv1.Deployment{}
|
|
||||||
err := k8sClient.Get(ctx, deploymentKey, f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return k8sClient.Delete(ctx, f)
|
|
||||||
}, timeout, interval).Should(Succeed())
|
|
||||||
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &appsv1.Deployment{}
|
|
||||||
return k8sClient.Get(ctx, deploymentKey, f)
|
|
||||||
}, timeout, interval).ShouldNot(Succeed())
|
|
||||||
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &v1.Secret{}
|
|
||||||
return k8sClient.Get(ctx, secretKey, f)
|
|
||||||
}, timeout, interval).ShouldNot(Succeed())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("Should update existing K8s Secret using deployment", func() {
|
|
||||||
By("Updating secret")
|
|
||||||
mocks.DoGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
item := onepassword.Item{}
|
|
||||||
item.Fields = []*onepassword.ItemField{}
|
|
||||||
for k, v := range item2.Data {
|
|
||||||
item.Fields = append(item.Fields, &onepassword.ItemField{Label: k, Value: v})
|
|
||||||
}
|
|
||||||
item.Version = item2.Version
|
|
||||||
item.Vault.ID = vaultUUID
|
|
||||||
item.ID = uuid
|
|
||||||
return &item, nil
|
|
||||||
}
|
|
||||||
Eventually(func() error {
|
|
||||||
updatedDeployment := &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: deploymentKey.Name,
|
|
||||||
Namespace: deploymentKey.Namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.ItemPathAnnotation: item2.Path,
|
|
||||||
op.NameAnnotation: item1.Name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{"app": deploymentName},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: deploymentName,
|
|
||||||
Image: "eu.gcr.io/kyma-project/example/http-db-service:0.0.6",
|
|
||||||
ImagePullPolicy: "IfNotPresent",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: map[string]string{"app": deploymentName},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := k8sClient.Update(ctx, updatedDeployment)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, timeout, interval).Should(Succeed())
|
|
||||||
|
|
||||||
// TODO: can we achieve the same without sleep?
|
|
||||||
time.Sleep(time.Millisecond * 10)
|
|
||||||
By("Reading updated K8s secret")
|
|
||||||
updatedSecret := &v1.Secret{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, secretKey, updatedSecret)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(updatedSecret.Data).Should(Equal(item2.SecretData))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("Should not update secret if Annotations have not changed", func() {
|
|
||||||
By("Updating secret without changing annotations")
|
|
||||||
Eventually(func() error {
|
|
||||||
updatedDeployment := &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: deploymentKey.Name,
|
|
||||||
Namespace: deploymentKey.Namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.ItemPathAnnotation: item1.Path,
|
|
||||||
op.NameAnnotation: item1.Name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{"app": deploymentName},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: deploymentName,
|
|
||||||
Image: "eu.gcr.io/kyma-project/example/http-db-service:0.0.6",
|
|
||||||
ImagePullPolicy: "IfNotPresent",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: map[string]string{"app": deploymentName},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := k8sClient.Update(ctx, updatedDeployment)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, timeout, interval).Should(Succeed())
|
|
||||||
|
|
||||||
// TODO: can we achieve the same without sleep?
|
|
||||||
time.Sleep(time.Millisecond * 10)
|
|
||||||
By("Reading updated K8s secret")
|
|
||||||
updatedSecret := &v1.Secret{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, secretKey, updatedSecret)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(updatedSecret.Data).Should(Equal(item1.SecretData))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("Should not delete secret created via deployment if it's used in another container", func() {
|
|
||||||
By("Creating another POD with created secret")
|
|
||||||
anotherDeploymentKey := types.NamespacedName{
|
|
||||||
Name: "other-deployment",
|
|
||||||
Namespace: namespace,
|
|
||||||
}
|
|
||||||
Eventually(func() error {
|
|
||||||
anotherDeployment := &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: anotherDeploymentKey.Name,
|
|
||||||
Namespace: anotherDeploymentKey.Namespace,
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{"app": anotherDeploymentKey.Name},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: anotherDeploymentKey.Name,
|
|
||||||
Image: "eu.gcr.io/kyma-project/example/http-db-service:0.0.6",
|
|
||||||
ImagePullPolicy: "IfNotPresent",
|
|
||||||
Env: []v1.EnvVar{
|
|
||||||
{
|
|
||||||
Name: anotherDeploymentKey.Name,
|
|
||||||
ValueFrom: &v1.EnvVarSource{
|
|
||||||
SecretKeyRef: &v1.SecretKeySelector{
|
|
||||||
LocalObjectReference: v1.LocalObjectReference{
|
|
||||||
Name: secretKey.Name,
|
|
||||||
},
|
|
||||||
Key: "password",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: map[string]string{"app": anotherDeploymentKey.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := k8sClient.Create(ctx, anotherDeployment)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, timeout, interval).Should(Succeed())
|
|
||||||
|
|
||||||
By("Deleting the pod")
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &appsv1.Deployment{}
|
|
||||||
err := k8sClient.Get(ctx, deploymentKey, f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return k8sClient.Delete(ctx, f)
|
|
||||||
}, timeout, interval).Should(Succeed())
|
|
||||||
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &v1.Secret{}
|
|
||||||
return k8sClient.Get(ctx, secretKey, f)
|
|
||||||
}, timeout, interval).Should(Succeed())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("Should not delete secret created via deployment if it's used in another volume", func() {
|
|
||||||
By("Creating another POD with created secret")
|
|
||||||
anotherDeploymentKey := types.NamespacedName{
|
|
||||||
Name: "other-deployment",
|
|
||||||
Namespace: namespace,
|
|
||||||
}
|
|
||||||
Eventually(func() error {
|
|
||||||
anotherDeployment := &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: anotherDeploymentKey.Name,
|
|
||||||
Namespace: anotherDeploymentKey.Namespace,
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{"app": anotherDeploymentKey.Name},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Volumes: []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: anotherDeploymentKey.Name,
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
Secret: &v1.SecretVolumeSource{
|
|
||||||
SecretName: secretKey.Name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: anotherDeploymentKey.Name,
|
|
||||||
Image: "eu.gcr.io/kyma-project/example/http-db-service:0.0.6",
|
|
||||||
ImagePullPolicy: "IfNotPresent",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: map[string]string{"app": anotherDeploymentKey.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := k8sClient.Create(ctx, anotherDeployment)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, timeout, interval).Should(Succeed())
|
|
||||||
|
|
||||||
By("Deleting the pod")
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &appsv1.Deployment{}
|
|
||||||
err := k8sClient.Get(ctx, deploymentKey, f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return k8sClient.Delete(ctx, f)
|
|
||||||
}, timeout, interval).Should(Succeed())
|
|
||||||
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &v1.Secret{}
|
|
||||||
return k8sClient.Get(ctx, secretKey, f)
|
|
||||||
}, timeout, interval).Should(Succeed())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
@@ -1,25 +1,17 @@
|
|||||||
/*
|
/*
|
||||||
MIT License
|
Copyright 2022.
|
||||||
|
|
||||||
Copyright (c) 2020-2022 1Password
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
Unless required by applicable law or agreed to in writing, software
|
||||||
copies or substantial portions of the Software.
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
See the License for the specific language governing permissions and
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
limitations under the License.
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package controllers
|
package controllers
|
||||||
@@ -28,44 +20,43 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/connect"
|
"github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||||
|
|
||||||
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
|
|
||||||
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
|
||||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||||
"github.com/1Password/onepassword-operator/pkg/utils"
|
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
kubeClient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||||
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
|
||||||
|
"github.com/1Password/onepassword-operator/pkg/utils"
|
||||||
|
|
||||||
|
"github.com/1Password/connect-sdk-go/connect"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var logOnePasswordItem = logf.Log.WithName("controller_onepassworditem")
|
var log = logf.Log.WithName("controller_onepassworditem")
|
||||||
var finalizer = "onepassword.com/finalizer.secret"
|
|
||||||
|
|
||||||
// OnePasswordItemReconciler reconciles a OnePasswordItem object
|
// OnePasswordItemReconciler reconciles a OnePasswordItem object
|
||||||
type OnePasswordItemReconciler struct {
|
type OnePasswordItemReconciler struct {
|
||||||
client.Client
|
Client kubeClient.Client
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
OpConnectClient connect.Client
|
OpConnectClient connect.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
//+kubebuilder:rbac:groups=onepassword.com,resources=onepassworditems,verbs=get;list;watch;create;update;patch;delete
|
//+kubebuilder:rbac:groups=onepassword.onepassword.com,resources=onepassworditems,verbs=get;list;watch;create;update;patch;delete
|
||||||
//+kubebuilder:rbac:groups=onepassword.com,resources=onepassworditems/status,verbs=get;update;patch
|
//+kubebuilder:rbac:groups=onepassword.onepassword.com,resources=onepassworditems/status,verbs=get;update;patch
|
||||||
//+kubebuilder:rbac:groups=onepassword.com,resources=onepassworditems/finalizers,verbs=update
|
//+kubebuilder:rbac:groups=onepassword.onepassword.com,resources=onepassworditems/finalizers,verbs=update
|
||||||
|
|
||||||
//+kubebuilder:rbac:groups="",resources=pods,verbs=get
|
|
||||||
//+kubebuilder:rbac:groups="",resources=pods;services;services/finalizers;endpoints;persistentvolumeclaims;events;configmaps;secrets;namespaces,verbs=get;list;watch;create;update;patch;delete
|
|
||||||
//+kubebuilder:rbac:groups=apps,resources=daemonsets;deployments;replicasets;statefulsets,verbs=get;list;watch;create;update;patch;delete
|
|
||||||
//+kubebuilder:rbac:groups=apps,resources=replicasets;deployments,verbs=get
|
|
||||||
//+kubebuilder:rbac:groups=apps,resourceNames=onepassword-connect-operator,resources=deployments/finalizers,verbs=update
|
|
||||||
//+kubebuilder:rbac:groups=onepassword.com,resources=*,verbs=get;list;watch;create;update;patch;delete
|
|
||||||
//+kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;create
|
|
||||||
|
|
||||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||||
// move the current state of the cluster closer to the desired state.
|
// move the current state of the cluster closer to the desired state.
|
||||||
@@ -75,18 +66,18 @@ type OnePasswordItemReconciler struct {
|
|||||||
// the user.
|
// the user.
|
||||||
//
|
//
|
||||||
// For more details, check Reconcile and its Result here:
|
// For more details, check Reconcile and its Result here:
|
||||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile
|
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.0/pkg/reconcile
|
||||||
func (r *OnePasswordItemReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
func (r *OnePasswordItemReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||||
reqLogger := logOnePasswordItem.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name)
|
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||||
reqLogger.Info("Reconciling OnePasswordItem")
|
reqLogger.Info("Reconciling OnePasswordItem")
|
||||||
|
|
||||||
onepassworditem := &onepasswordv1.OnePasswordItem{}
|
onepassworditem := &onepasswordv1.OnePasswordItem{}
|
||||||
err := r.Get(context.Background(), req.NamespacedName, onepassworditem)
|
err := r.Client.Get(context.Background(), request.NamespacedName, onepassworditem)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
return ctrl.Result{}, nil
|
return reconcile.Result{}, nil
|
||||||
}
|
}
|
||||||
return ctrl.Result{}, err
|
return reconcile.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the deployment is not being deleted
|
// If the deployment is not being deleted
|
||||||
@@ -95,44 +86,56 @@ func (r *OnePasswordItemReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||||||
// This is so we can handle cleanup of associated secrets properly
|
// This is so we can handle cleanup of associated secrets properly
|
||||||
if !utils.ContainsString(onepassworditem.ObjectMeta.Finalizers, finalizer) {
|
if !utils.ContainsString(onepassworditem.ObjectMeta.Finalizers, finalizer) {
|
||||||
onepassworditem.ObjectMeta.Finalizers = append(onepassworditem.ObjectMeta.Finalizers, finalizer)
|
onepassworditem.ObjectMeta.Finalizers = append(onepassworditem.ObjectMeta.Finalizers, finalizer)
|
||||||
if err := r.Update(context.Background(), onepassworditem); err != nil {
|
if err := r.Client.Update(context.Background(), onepassworditem); err != nil {
|
||||||
return ctrl.Result{}, err
|
return reconcile.Result{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handles creation or updating secrets for deployment if needed
|
// Handles creation or updating secrets for deployment if needed
|
||||||
err := r.handleOnePasswordItem(onepassworditem, req)
|
if err := r.HandleOnePasswordItem(onepassworditem, request); err != nil {
|
||||||
if updateStatusErr := r.updateStatus(onepassworditem, err); updateStatusErr != nil {
|
return reconcile.Result{}, err
|
||||||
return ctrl.Result{}, fmt.Errorf("cannot update status: %s", updateStatusErr)
|
|
||||||
}
|
}
|
||||||
return ctrl.Result{}, err
|
return reconcile.Result{}, nil
|
||||||
}
|
}
|
||||||
// If one password finalizer exists then we must cleanup associated secrets
|
// If one password finalizer exists then we must cleanup associated secrets
|
||||||
if utils.ContainsString(onepassworditem.ObjectMeta.Finalizers, finalizer) {
|
if utils.ContainsString(onepassworditem.ObjectMeta.Finalizers, finalizer) {
|
||||||
|
|
||||||
// Delete associated kubernetes secret
|
// Delete associated kubernetes secret
|
||||||
if err = r.cleanupKubernetesSecret(onepassworditem); err != nil {
|
if err = r.cleanupKubernetesSecret(onepassworditem); err != nil {
|
||||||
return ctrl.Result{}, err
|
return reconcile.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove finalizer now that cleanup is complete
|
// Remove finalizer now that cleanup is complete
|
||||||
if err := r.removeFinalizer(onepassworditem); err != nil {
|
if err := r.removeFinalizer(onepassworditem); err != nil {
|
||||||
return ctrl.Result{}, err
|
return reconcile.Result{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ctrl.Result{}, nil
|
return reconcile.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetupWithManager sets up the controller with the Manager.
|
// SetupWithManager sets up the controller with the Manager.
|
||||||
func (r *OnePasswordItemReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *OnePasswordItemReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
c, err := controller.New("onepassworditem-controller", mgr, controller.Options{Reconciler: r})
|
||||||
For(&onepasswordv1.OnePasswordItem{}).
|
if err != nil {
|
||||||
Complete(r)
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch for changes to primary resource OnePasswordItem
|
||||||
|
err = c.Watch(&source.Kind{Type: &onepasswordv1.OnePasswordItem{}}, &handler.EnqueueRequestForObject{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
// TODO Consider the simplified code below. Based on the migration guide: https://sdk.operatorframework.io/docs/building-operators/golang/migration/#create-a-new-project
|
||||||
|
// return ctrl.NewControllerManagedBy(mgr).Named("onepassworditem-controller").WithOptions(controller.Options{Reconciler: r}).
|
||||||
|
// For(&onepasswordv1.OnePasswordItem{}).Watches(&source.Kind{Type: &onepasswordv1.OnePasswordItem{}}, &handler.EnqueueRequestForObject{}).
|
||||||
|
// Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *OnePasswordItemReconciler) removeFinalizer(onePasswordItem *onepasswordv1.OnePasswordItem) error {
|
func (r *OnePasswordItemReconciler) removeFinalizer(onePasswordItem *onepasswordv1.OnePasswordItem) error {
|
||||||
onePasswordItem.ObjectMeta.Finalizers = utils.RemoveString(onePasswordItem.ObjectMeta.Finalizers, finalizer)
|
onePasswordItem.ObjectMeta.Finalizers = utils.RemoveString(onePasswordItem.ObjectMeta.Finalizers, finalizer)
|
||||||
if err := r.Update(context.Background(), onePasswordItem); err != nil {
|
if err := r.Client.Update(context.Background(), onePasswordItem); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -143,8 +146,8 @@ func (r *OnePasswordItemReconciler) cleanupKubernetesSecret(onePasswordItem *one
|
|||||||
kubernetesSecret.ObjectMeta.Name = onePasswordItem.Name
|
kubernetesSecret.ObjectMeta.Name = onePasswordItem.Name
|
||||||
kubernetesSecret.ObjectMeta.Namespace = onePasswordItem.Namespace
|
kubernetesSecret.ObjectMeta.Namespace = onePasswordItem.Namespace
|
||||||
|
|
||||||
r.Delete(context.Background(), kubernetesSecret)
|
r.Client.Delete(context.Background(), kubernetesSecret)
|
||||||
if err := r.Delete(context.Background(), kubernetesSecret); err != nil {
|
if err := r.Client.Delete(context.Background(), kubernetesSecret); err != nil {
|
||||||
if !errors.IsNotFound(err) {
|
if !errors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -152,18 +155,14 @@ func (r *OnePasswordItemReconciler) cleanupKubernetesSecret(onePasswordItem *one
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *OnePasswordItemReconciler) removeOnePasswordFinalizerFromOnePasswordItem(opSecret *onepasswordv1.OnePasswordItem) error {
|
func (r *OnePasswordItemReconciler) HandleOnePasswordItem(resource *onepasswordv1.OnePasswordItem, request reconcile.Request) error {
|
||||||
opSecret.ObjectMeta.Finalizers = utils.RemoveString(opSecret.ObjectMeta.Finalizers, finalizer)
|
|
||||||
return r.Update(context.Background(), opSecret)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OnePasswordItemReconciler) handleOnePasswordItem(resource *onepasswordv1.OnePasswordItem, req ctrl.Request) error {
|
|
||||||
secretName := resource.GetName()
|
secretName := resource.GetName()
|
||||||
labels := resource.Labels
|
labels := resource.Labels
|
||||||
|
annotations := resource.Annotations
|
||||||
secretType := resource.Type
|
secretType := resource.Type
|
||||||
autoRestart := resource.Annotations[op.RestartDeploymentsAnnotation]
|
autoRestart := annotations[op.RestartDeploymentsAnnotation]
|
||||||
|
|
||||||
item, err := op.GetOnePasswordItemByPath(r.OpConnectClient, resource.Spec.ItemPath)
|
item, err := onepassword.GetOnePasswordItemByPath(r.OpConnectClient, resource.Spec.ItemPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to retrieve item: %v", err)
|
return fmt.Errorf("Failed to retrieve item: %v", err)
|
||||||
}
|
}
|
||||||
@@ -180,36 +179,5 @@ func (r *OnePasswordItemReconciler) handleOnePasswordItem(resource *onepasswordv
|
|||||||
UID: resource.GetUID(),
|
UID: resource.GetUID(),
|
||||||
}
|
}
|
||||||
|
|
||||||
return kubeSecrets.CreateKubernetesSecretFromItem(r.Client, secretName, resource.Namespace, item, autoRestart, labels, secretType, ownerRef)
|
return kubeSecrets.CreateKubernetesSecretFromItem(r.Client, secretName, resource.Namespace, item, autoRestart, labels, secretType, annotations, ownerRef)
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OnePasswordItemReconciler) updateStatus(resource *onepasswordv1.OnePasswordItem, err error) error {
|
|
||||||
existingCondition := findCondition(resource.Status.Conditions, onepasswordv1.OnePasswordItemReady)
|
|
||||||
updatedCondition := existingCondition
|
|
||||||
if err != nil {
|
|
||||||
updatedCondition.Message = err.Error()
|
|
||||||
updatedCondition.Status = metav1.ConditionFalse
|
|
||||||
} else {
|
|
||||||
updatedCondition.Message = ""
|
|
||||||
updatedCondition.Status = metav1.ConditionTrue
|
|
||||||
}
|
|
||||||
|
|
||||||
if existingCondition.Status != updatedCondition.Status {
|
|
||||||
updatedCondition.LastTransitionTime = metav1.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
resource.Status.Conditions = []onepasswordv1.OnePasswordItemCondition{updatedCondition}
|
|
||||||
return r.Status().Update(context.Background(), resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
func findCondition(conditions []onepasswordv1.OnePasswordItemCondition, t onepasswordv1.OnePasswordItemConditionType) onepasswordv1.OnePasswordItemCondition {
|
|
||||||
for _, c := range conditions {
|
|
||||||
if c.Type == t {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return onepasswordv1.OnePasswordItemCondition{
|
|
||||||
Type: t,
|
|
||||||
Status: metav1.ConditionUnknown,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,426 +0,0 @@
|
|||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/onepassword"
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/mocks"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
|
||||||
|
|
||||||
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
firstHost = "http://localhost:8080"
|
|
||||||
awsKey = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
|
||||||
iceCream = "freezing blue 20%"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ = Describe("OnePasswordItem controller", func() {
|
|
||||||
BeforeEach(func() {
|
|
||||||
// failed test runs that don't clean up leave resources behind.
|
|
||||||
err := k8sClient.DeleteAllOf(context.Background(), &onepasswordv1.OnePasswordItem{}, client.InNamespace(namespace))
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
err = k8sClient.DeleteAllOf(context.Background(), &v1.Secret{}, client.InNamespace(namespace))
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
mocks.DoGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
item := onepassword.Item{}
|
|
||||||
item.Fields = []*onepassword.ItemField{}
|
|
||||||
for k, v := range item1.Data {
|
|
||||||
item.Fields = append(item.Fields, &onepassword.ItemField{Label: k, Value: v})
|
|
||||||
}
|
|
||||||
item.Version = item1.Version
|
|
||||||
item.Vault.ID = vaultUUID
|
|
||||||
item.ID = uuid
|
|
||||||
return &item, nil
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
Context("Happy path", func() {
|
|
||||||
It("Should handle 1Password Item and secret correctly", func() {
|
|
||||||
ctx := context.Background()
|
|
||||||
spec := onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: item1.Path,
|
|
||||||
}
|
|
||||||
|
|
||||||
key := types.NamespacedName{
|
|
||||||
Name: "sample-item",
|
|
||||||
Namespace: namespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
toCreate := &onepasswordv1.OnePasswordItem{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: key.Name,
|
|
||||||
Namespace: key.Namespace,
|
|
||||||
},
|
|
||||||
Spec: spec,
|
|
||||||
}
|
|
||||||
|
|
||||||
By("Creating a new OnePasswordItem successfully")
|
|
||||||
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
|
|
||||||
|
|
||||||
created := &onepasswordv1.OnePasswordItem{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, key, created)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
|
|
||||||
By("Creating the K8s secret successfully")
|
|
||||||
createdSecret := &v1.Secret{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, key, createdSecret)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(createdSecret.Data).Should(Equal(item1.SecretData))
|
|
||||||
|
|
||||||
By("Updating existing secret successfully")
|
|
||||||
newData := map[string]string{
|
|
||||||
"username": "newUser1234",
|
|
||||||
"password": "##newPassword##",
|
|
||||||
"extraField": "dev",
|
|
||||||
}
|
|
||||||
newDataByte := map[string][]byte{
|
|
||||||
"username": []byte("newUser1234"),
|
|
||||||
"password": []byte("##newPassword##"),
|
|
||||||
"extraField": []byte("dev"),
|
|
||||||
}
|
|
||||||
mocks.DoGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
item := onepassword.Item{}
|
|
||||||
item.Fields = []*onepassword.ItemField{}
|
|
||||||
for k, v := range newData {
|
|
||||||
item.Fields = append(item.Fields, &onepassword.ItemField{Label: k, Value: v})
|
|
||||||
}
|
|
||||||
item.Version = item1.Version + 1
|
|
||||||
item.Vault.ID = vaultUUID
|
|
||||||
item.ID = uuid
|
|
||||||
return &item, nil
|
|
||||||
}
|
|
||||||
_, err := onePasswordItemReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key})
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
updatedSecret := &v1.Secret{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, key, updatedSecret)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(updatedSecret.Data).Should(Equal(newDataByte))
|
|
||||||
|
|
||||||
By("Deleting the OnePasswordItem successfully")
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &onepasswordv1.OnePasswordItem{}
|
|
||||||
err := k8sClient.Get(ctx, key, f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return k8sClient.Delete(ctx, f)
|
|
||||||
}, timeout, interval).Should(Succeed())
|
|
||||||
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &onepasswordv1.OnePasswordItem{}
|
|
||||||
return k8sClient.Get(ctx, key, f)
|
|
||||||
}, timeout, interval).ShouldNot(Succeed())
|
|
||||||
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &v1.Secret{}
|
|
||||||
return k8sClient.Get(ctx, key, f)
|
|
||||||
}, timeout, interval).ShouldNot(Succeed())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("Should handle 1Password Item with fields and sections that have invalid K8s labels correctly", func() {
|
|
||||||
ctx := context.Background()
|
|
||||||
spec := onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: item1.Path,
|
|
||||||
}
|
|
||||||
|
|
||||||
key := types.NamespacedName{
|
|
||||||
Name: "my-secret-it3m",
|
|
||||||
Namespace: namespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
toCreate := &onepasswordv1.OnePasswordItem{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: key.Name,
|
|
||||||
Namespace: key.Namespace,
|
|
||||||
},
|
|
||||||
Spec: spec,
|
|
||||||
}
|
|
||||||
|
|
||||||
testData := map[string]string{
|
|
||||||
"username": username,
|
|
||||||
"password": password,
|
|
||||||
"first host": firstHost,
|
|
||||||
"AWS Access Key": awsKey,
|
|
||||||
"😄 ice-cream type": iceCream,
|
|
||||||
}
|
|
||||||
expectedData := map[string][]byte{
|
|
||||||
"username": []byte(username),
|
|
||||||
"password": []byte(password),
|
|
||||||
"first-host": []byte(firstHost),
|
|
||||||
"AWS-Access-Key": []byte(awsKey),
|
|
||||||
"ice-cream-type": []byte(iceCream),
|
|
||||||
}
|
|
||||||
|
|
||||||
mocks.DoGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
item := onepassword.Item{}
|
|
||||||
item.Title = "!my sECReT it3m%"
|
|
||||||
item.Fields = []*onepassword.ItemField{}
|
|
||||||
for k, v := range testData {
|
|
||||||
item.Fields = append(item.Fields, &onepassword.ItemField{Label: k, Value: v})
|
|
||||||
}
|
|
||||||
item.Version = item1.Version + 1
|
|
||||||
item.Vault.ID = vaultUUID
|
|
||||||
item.ID = uuid
|
|
||||||
return &item, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
By("Creating a new OnePasswordItem successfully")
|
|
||||||
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
|
|
||||||
|
|
||||||
created := &onepasswordv1.OnePasswordItem{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, key, created)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
|
|
||||||
By("Creating the K8s secret successfully")
|
|
||||||
createdSecret := &v1.Secret{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, key, createdSecret)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(createdSecret.Data).Should(Equal(expectedData))
|
|
||||||
|
|
||||||
By("Deleting the OnePasswordItem successfully")
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &onepasswordv1.OnePasswordItem{}
|
|
||||||
err := k8sClient.Get(ctx, key, f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return k8sClient.Delete(ctx, f)
|
|
||||||
}, timeout, interval).Should(Succeed())
|
|
||||||
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &onepasswordv1.OnePasswordItem{}
|
|
||||||
return k8sClient.Get(ctx, key, f)
|
|
||||||
}, timeout, interval).ShouldNot(Succeed())
|
|
||||||
|
|
||||||
Eventually(func() error {
|
|
||||||
f := &v1.Secret{}
|
|
||||||
return k8sClient.Get(ctx, key, f)
|
|
||||||
}, timeout, interval).ShouldNot(Succeed())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("Should not update K8s secret if OnePasswordItem Version or VaultPath has not changed", func() {
|
|
||||||
ctx := context.Background()
|
|
||||||
spec := onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: item1.Path,
|
|
||||||
}
|
|
||||||
|
|
||||||
key := types.NamespacedName{
|
|
||||||
Name: "item-not-updated",
|
|
||||||
Namespace: namespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
toCreate := &onepasswordv1.OnePasswordItem{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: key.Name,
|
|
||||||
Namespace: key.Namespace,
|
|
||||||
},
|
|
||||||
Spec: spec,
|
|
||||||
}
|
|
||||||
|
|
||||||
By("Creating a new OnePasswordItem successfully")
|
|
||||||
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
|
|
||||||
|
|
||||||
item := &onepasswordv1.OnePasswordItem{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, key, item)
|
|
||||||
return err == nil
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
|
|
||||||
By("Creating the K8s secret successfully")
|
|
||||||
createdSecret := &v1.Secret{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, key, createdSecret)
|
|
||||||
return err == nil
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(createdSecret.Data).Should(Equal(item1.SecretData))
|
|
||||||
|
|
||||||
By("Updating OnePasswordItem type")
|
|
||||||
Eventually(func() bool {
|
|
||||||
err1 := k8sClient.Get(ctx, key, item)
|
|
||||||
if err1 != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
item.Type = string(v1.SecretTypeOpaque)
|
|
||||||
err := k8sClient.Update(ctx, item)
|
|
||||||
return err == nil
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
|
|
||||||
By("Reading K8s secret")
|
|
||||||
secret := &v1.Secret{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, key, secret)
|
|
||||||
return err == nil
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(secret.Data).Should(Equal(item1.SecretData))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("Should create custom K8s Secret type using OnePasswordItem", func() {
|
|
||||||
const customType = "CustomType"
|
|
||||||
ctx := context.Background()
|
|
||||||
spec := onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: item1.Path,
|
|
||||||
}
|
|
||||||
|
|
||||||
key := types.NamespacedName{
|
|
||||||
Name: "item-custom-secret-type",
|
|
||||||
Namespace: namespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
toCreate := &onepasswordv1.OnePasswordItem{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: key.Name,
|
|
||||||
Namespace: key.Namespace,
|
|
||||||
},
|
|
||||||
Spec: spec,
|
|
||||||
Type: customType,
|
|
||||||
}
|
|
||||||
|
|
||||||
By("Creating a new OnePasswordItem successfully")
|
|
||||||
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
|
|
||||||
|
|
||||||
By("Reading K8s secret")
|
|
||||||
secret := &v1.Secret{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, key, secret)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(secret.Type).Should(Equal(v1.SecretType(customType)))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Context("Unhappy path", func() {
|
|
||||||
It("Should throw an error if K8s Secret type is changed", func() {
|
|
||||||
ctx := context.Background()
|
|
||||||
spec := onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: item1.Path,
|
|
||||||
}
|
|
||||||
|
|
||||||
key := types.NamespacedName{
|
|
||||||
Name: "item-changed-secret-type",
|
|
||||||
Namespace: namespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
toCreate := &onepasswordv1.OnePasswordItem{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: key.Name,
|
|
||||||
Namespace: key.Namespace,
|
|
||||||
},
|
|
||||||
Spec: spec,
|
|
||||||
}
|
|
||||||
|
|
||||||
By("Creating a new OnePasswordItem successfully")
|
|
||||||
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
|
|
||||||
|
|
||||||
By("Reading K8s secret")
|
|
||||||
secret := &v1.Secret{}
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, key, secret)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
|
|
||||||
By("Failing to update K8s secret")
|
|
||||||
Eventually(func() bool {
|
|
||||||
secret.Type = v1.SecretTypeBasicAuth
|
|
||||||
err := k8sClient.Update(ctx, secret)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, timeout, interval).Should(BeFalse())
|
|
||||||
})
|
|
||||||
|
|
||||||
When("OnePasswordItem resource name contains `_`", func() {
|
|
||||||
It("Should fail creating a OnePasswordItem resource", func() {
|
|
||||||
ctx := context.Background()
|
|
||||||
spec := onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: item1.Path,
|
|
||||||
}
|
|
||||||
|
|
||||||
key := types.NamespacedName{
|
|
||||||
Name: "invalid_name",
|
|
||||||
Namespace: namespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
toCreate := &onepasswordv1.OnePasswordItem{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: key.Name,
|
|
||||||
Namespace: key.Namespace,
|
|
||||||
},
|
|
||||||
Spec: spec,
|
|
||||||
}
|
|
||||||
|
|
||||||
By("Creating a new OnePasswordItem")
|
|
||||||
Expect(k8sClient.Create(ctx, toCreate)).To(HaveOccurred())
|
|
||||||
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
When("OnePasswordItem resource name contains capital letters", func() {
|
|
||||||
It("Should fail creating a OnePasswordItem resource", func() {
|
|
||||||
ctx := context.Background()
|
|
||||||
spec := onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: item1.Path,
|
|
||||||
}
|
|
||||||
|
|
||||||
key := types.NamespacedName{
|
|
||||||
Name: "invalidName",
|
|
||||||
Namespace: namespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
toCreate := &onepasswordv1.OnePasswordItem{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: key.Name,
|
|
||||||
Namespace: key.Namespace,
|
|
||||||
},
|
|
||||||
Spec: spec,
|
|
||||||
}
|
|
||||||
|
|
||||||
By("Creating a new OnePasswordItem")
|
|
||||||
Expect(k8sClient.Create(ctx, toCreate)).To(HaveOccurred())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
@@ -1,145 +1,68 @@
|
|||||||
/*
|
/*
|
||||||
MIT License
|
Copyright 2022.
|
||||||
|
|
||||||
Copyright (c) 2020-2022 1Password
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
Unless required by applicable law or agreed to in writing, software
|
||||||
copies or substantial portions of the Software.
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
See the License for the specific language governing permissions and
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
limitations under the License.
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package controllers
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/mocks"
|
. "github.com/onsi/ginkgo"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||||
|
|
||||||
onepasswordcomv1 "github.com/1Password/onepassword-operator/api/v1"
|
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
|
||||||
//+kubebuilder:scaffold:imports
|
//+kubebuilder:scaffold:imports
|
||||||
)
|
)
|
||||||
|
|
||||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||||
|
|
||||||
const (
|
var cfg *rest.Config
|
||||||
username = "test-user"
|
var k8sClient client.Client
|
||||||
password = "QmHumKc$mUeEem7caHtbaBaJ"
|
var testEnv *envtest.Environment
|
||||||
|
|
||||||
username2 = "test-user2"
|
|
||||||
password2 = "4zotzqDqXKasLFT2jzTs"
|
|
||||||
|
|
||||||
annotationRegExpString = "^operator.1password.io\\/[a-zA-Z\\.]+"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Define utility constants for object names and testing timeouts/durations and intervals.
|
|
||||||
const (
|
|
||||||
namespace = "default"
|
|
||||||
|
|
||||||
timeout = time.Second * 10
|
|
||||||
duration = time.Second * 10
|
|
||||||
interval = time.Millisecond * 250
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
cfg *rest.Config
|
|
||||||
k8sClient client.Client
|
|
||||||
testEnv *envtest.Environment
|
|
||||||
ctx context.Context
|
|
||||||
cancel context.CancelFunc
|
|
||||||
onePasswordItemReconciler *OnePasswordItemReconciler
|
|
||||||
deploymentReconciler *DeploymentReconciler
|
|
||||||
|
|
||||||
item1 = &TestItem{
|
|
||||||
Name: "test-item",
|
|
||||||
Version: 123,
|
|
||||||
Path: "vaults/hfnjvi6aymbsnfc2xeeoheizda/items/nwrhuano7bcwddcviubpp4mhfq",
|
|
||||||
Data: map[string]string{
|
|
||||||
"username": username,
|
|
||||||
"password": password,
|
|
||||||
},
|
|
||||||
SecretData: map[string][]byte{
|
|
||||||
"password": []byte(password),
|
|
||||||
"username": []byte(username),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
item2 = &TestItem{
|
|
||||||
Name: "test-item2",
|
|
||||||
Path: "vaults/hfnjvi6aymbsnfc2xeeoheizd2/items/nwrhuano7bcwddcviubpp4mhf2",
|
|
||||||
Version: 456,
|
|
||||||
Data: map[string]string{
|
|
||||||
"username": username2,
|
|
||||||
"password": password2,
|
|
||||||
},
|
|
||||||
SecretData: map[string][]byte{
|
|
||||||
"password": []byte(password2),
|
|
||||||
"username": []byte(username2),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
type TestItem struct {
|
|
||||||
Name string
|
|
||||||
Version int
|
|
||||||
Path string
|
|
||||||
Data map[string]string
|
|
||||||
SecretData map[string][]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAPIs(t *testing.T) {
|
func TestAPIs(t *testing.T) {
|
||||||
RegisterFailHandler(Fail)
|
RegisterFailHandler(Fail)
|
||||||
|
|
||||||
RunSpecs(t, "Controller Suite")
|
RunSpecsWithDefaultAndCustomReporters(t,
|
||||||
|
"Controller Suite",
|
||||||
|
[]Reporter{printer.NewlineReporter{}})
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = BeforeSuite(func() {
|
var _ = BeforeSuite(func() {
|
||||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||||
|
|
||||||
ctx, cancel = context.WithCancel(context.TODO())
|
|
||||||
|
|
||||||
By("bootstrapping test environment")
|
By("bootstrapping test environment")
|
||||||
testEnv = &envtest.Environment{
|
testEnv = &envtest.Environment{
|
||||||
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||||
ErrorIfCRDPathMissing: true,
|
ErrorIfCRDPathMissing: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
cfg, err := testEnv.Start()
|
||||||
// cfg is defined in this file globally.
|
|
||||||
cfg, err = testEnv.Start()
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(cfg).NotTo(BeNil())
|
Expect(cfg).NotTo(BeNil())
|
||||||
|
|
||||||
err = onepasswordcomv1.AddToScheme(scheme.Scheme)
|
err = onepasswordv1.AddToScheme(scheme.Scheme)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
//+kubebuilder:scaffold:scheme
|
//+kubebuilder:scaffold:scheme
|
||||||
@@ -148,41 +71,9 @@ var _ = BeforeSuite(func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(k8sClient).NotTo(BeNil())
|
Expect(k8sClient).NotTo(BeNil())
|
||||||
|
|
||||||
k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
|
}, 60)
|
||||||
Scheme: scheme.Scheme,
|
|
||||||
})
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
opConnectClient := &mocks.TestClient{}
|
|
||||||
|
|
||||||
onePasswordItemReconciler = &OnePasswordItemReconciler{
|
|
||||||
Client: k8sManager.GetClient(),
|
|
||||||
Scheme: k8sManager.GetScheme(),
|
|
||||||
OpConnectClient: opConnectClient,
|
|
||||||
}
|
|
||||||
err = (onePasswordItemReconciler).SetupWithManager(k8sManager)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
r, _ := regexp.Compile(annotationRegExpString)
|
|
||||||
deploymentReconciler = &DeploymentReconciler{
|
|
||||||
Client: k8sManager.GetClient(),
|
|
||||||
Scheme: k8sManager.GetScheme(),
|
|
||||||
OpConnectClient: opConnectClient,
|
|
||||||
OpAnnotationRegExp: r,
|
|
||||||
}
|
|
||||||
err = (deploymentReconciler).SetupWithManager(k8sManager)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer GinkgoRecover()
|
|
||||||
err = k8sManager.Start(ctx)
|
|
||||||
Expect(err).ToNot(HaveOccurred(), "failed to run manager")
|
|
||||||
}()
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
var _ = AfterSuite(func() {
|
var _ = AfterSuite(func() {
|
||||||
cancel()
|
|
||||||
By("tearing down the test environment")
|
By("tearing down the test environment")
|
||||||
err := testEnv.Stop()
|
err := testEnv.Stop()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|||||||
110
go.mod
110
go.mod
@@ -1,87 +1,81 @@
|
|||||||
module github.com/1Password/onepassword-operator
|
module github.com/1Password/onepassword-operator
|
||||||
|
|
||||||
go 1.18
|
go 1.17
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/1Password/connect-sdk-go v1.5.0
|
github.com/1Password/connect-sdk-go v1.2.0
|
||||||
github.com/onsi/ginkgo/v2 v2.1.6
|
github.com/onsi/ginkgo v1.16.5
|
||||||
github.com/onsi/gomega v1.20.2
|
github.com/onsi/gomega v1.17.0
|
||||||
github.com/stretchr/testify v1.8.0
|
github.com/stretchr/testify v1.7.0
|
||||||
k8s.io/api v0.25.3
|
k8s.io/api v0.23.5
|
||||||
k8s.io/apimachinery v0.25.3
|
k8s.io/apimachinery v0.23.5
|
||||||
k8s.io/client-go v0.25.3
|
k8s.io/client-go v0.23.5
|
||||||
k8s.io/kubectl v0.25.0
|
k8s.io/kubectl v0.23.5
|
||||||
sigs.k8s.io/controller-runtime v0.13.0
|
sigs.k8s.io/controller-runtime v0.11.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/compute v1.10.0 // indirect
|
cloud.google.com/go v0.81.0 // indirect
|
||||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||||
github.com/Azure/go-autorest/autorest v0.11.28 // indirect
|
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect
|
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
||||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
||||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
github.com/go-logr/logr v1.2.0 // indirect
|
||||||
github.com/go-logr/logr v1.2.3 // indirect
|
github.com/go-logr/zapr v1.2.0 // indirect
|
||||||
github.com/go-logr/zapr v1.2.3 // indirect
|
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
|
||||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
|
||||||
github.com/go-openapi/swag v0.22.3 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
|
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/gnostic v0.6.9 // indirect
|
github.com/google/go-cmp v0.5.5 // indirect
|
||||||
github.com/google/go-cmp v0.5.9 // indirect
|
github.com/google/gofuzz v1.1.0 // indirect
|
||||||
github.com/google/gofuzz v1.2.0 // indirect
|
github.com/google/uuid v1.1.2 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||||
github.com/imdario/mergo v0.3.13 // indirect
|
github.com/imdario/mergo v0.3.12 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/nxadm/tail v1.4.8 // indirect
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.13.0 // indirect
|
github.com/prometheus/client_golang v1.11.0 // indirect
|
||||||
github.com/prometheus/client_model v0.3.0 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.37.0 // indirect
|
github.com/prometheus/common v0.28.0 // indirect
|
||||||
github.com/prometheus/procfs v0.8.0 // indirect
|
github.com/prometheus/procfs v0.6.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect
|
github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
|
||||||
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
|
github.com/uber/jaeger-lib v2.4.0+incompatible // indirect
|
||||||
go.uber.org/atomic v1.10.0 // indirect
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.uber.org/multierr v1.6.0 // indirect
|
||||||
go.uber.org/zap v1.23.0 // indirect
|
go.uber.org/zap v1.19.1 // indirect
|
||||||
golang.org/x/crypto v0.1.0 // indirect
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
||||||
golang.org/x/net v0.1.0 // indirect
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||||
golang.org/x/oauth2 v0.1.0 // indirect
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||||
golang.org/x/sys v0.1.0 // indirect
|
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect
|
||||||
golang.org/x/term v0.1.0 // indirect
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||||
golang.org/x/text v0.4.0 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/time v0.1.0 // indirect
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/protobuf v1.27.1 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
k8s.io/apiextensions-apiserver v0.25.3 // indirect
|
k8s.io/apiextensions-apiserver v0.23.0 // indirect
|
||||||
k8s.io/component-base v0.25.3 // indirect
|
k8s.io/component-base v0.23.5 // indirect
|
||||||
k8s.io/klog/v2 v2.80.1 // indirect
|
k8s.io/klog/v2 v2.30.0 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||||
k8s.io/utils v0.0.0-20221012122500-cfd413dd9e85 // indirect
|
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,23 +1,15 @@
|
|||||||
/*
|
/*
|
||||||
MIT License
|
Copyright 2022.
|
||||||
|
|
||||||
Copyright (c) 2020-2022 1Password
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
Unless required by applicable law or agreed to in writing, software
|
||||||
copies or substantial portions of the Software.
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
See the License for the specific language governing permissions and
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
limitations under the License.
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
*/
|
*/
|
||||||
194
main.go
194
main.go
@@ -1,25 +1,17 @@
|
|||||||
/*
|
/*
|
||||||
MIT License
|
Copyright 2022.
|
||||||
|
|
||||||
Copyright (c) 2020-2022 1Password
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
Unless required by applicable law or agreed to in writing, software
|
||||||
copies or substantial portions of the Software.
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
See the License for the specific language governing permissions and
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
limitations under the License.
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package main
|
package main
|
||||||
@@ -29,13 +21,19 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/connect"
|
"github.com/1Password/connect-sdk-go/connect"
|
||||||
|
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||||
|
"github.com/1Password/onepassword-operator/pkg/utils"
|
||||||
|
"github.com/1Password/onepassword-operator/version"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||||
|
|
||||||
|
// sdkVersion "github.com/operator-framework/operator-sdk/version"
|
||||||
|
|
||||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||||
// to ensure that exec-entrypoint and run can make use of them.
|
// to ensure that exec-entrypoint and run can make use of them.
|
||||||
@@ -45,53 +43,35 @@ import (
|
|||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||||
|
|
||||||
onepasswordcomv1 "github.com/1Password/onepassword-operator/api/v1"
|
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
|
||||||
"github.com/1Password/onepassword-operator/controllers"
|
"github.com/1Password/onepassword-operator/controllers"
|
||||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/utils"
|
|
||||||
"github.com/1Password/onepassword-operator/version"
|
|
||||||
//+kubebuilder:scaffold:imports
|
//+kubebuilder:scaffold:imports
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
scheme = k8sruntime.NewScheme()
|
scheme = k8sruntime.NewScheme()
|
||||||
setupLog = ctrl.Log.WithName("setup")
|
setupLog = ctrl.Log.WithName("setup")
|
||||||
|
WatchNamespaceEnvVar = "WATCH_NAMESPACE"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
envPollingIntervalVariable = "POLLING_INTERVAL"
|
|
||||||
manageConnect = "MANAGE_CONNECT"
|
|
||||||
restartDeploymentsEnvVariable = "AUTO_RESTART"
|
|
||||||
defaultPollingInterval = 600
|
|
||||||
|
|
||||||
annotationRegExpString = "^operator.1password.io\\/[a-zA-Z\\.]+"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Change below variables to serve metrics on different host or port.
|
|
||||||
var (
|
|
||||||
metricsHost = "0.0.0.0"
|
|
||||||
metricsPort int32 = 8383
|
|
||||||
operatorMetricsPort int32 = 8686
|
|
||||||
)
|
|
||||||
|
|
||||||
func printVersion() {
|
|
||||||
setupLog.Info(fmt.Sprintf("Operator Version: %s", version.OperatorVersion))
|
|
||||||
setupLog.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
|
|
||||||
setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
|
|
||||||
setupLog.Info(fmt.Sprintf("Version of operator-sdk: %v", version.OperatorSDKVersion))
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||||
|
|
||||||
utilruntime.Must(onepasswordcomv1.AddToScheme(scheme))
|
utilruntime.Must(onepasswordv1.AddToScheme(scheme))
|
||||||
//+kubebuilder:scaffold:scheme
|
//+kubebuilder:scaffold:scheme
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func printVersion() {
|
||||||
|
setupLog.Info(fmt.Sprintf("Operator Version: %s", version.Version))
|
||||||
|
setupLog.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
|
||||||
|
setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
|
||||||
|
// TODO figure out how to get operator-sdk version
|
||||||
|
// setupLog.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var metricsAddr string
|
var metricsAddr string
|
||||||
var enableLeaderElection bool
|
var enableLeaderElection bool
|
||||||
@@ -111,21 +91,11 @@ func main() {
|
|||||||
|
|
||||||
printVersion()
|
printVersion()
|
||||||
|
|
||||||
watchNamespace, err := getWatchNamespace()
|
namespace := os.Getenv(WatchNamespaceEnvVar)
|
||||||
if err != nil {
|
|
||||||
setupLog.Error(err, "unable to get WatchNamespace, "+
|
|
||||||
"the manager will watch and manage resources in all namespaces")
|
|
||||||
}
|
|
||||||
|
|
||||||
deploymentNamespace, err := utils.GetOperatorNamespace()
|
|
||||||
if err != nil {
|
|
||||||
setupLog.Error(err, "Failed to get namespace")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
options := ctrl.Options{
|
options := ctrl.Options{
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
Namespace: watchNamespace,
|
Namespace: namespace,
|
||||||
MetricsBindAddress: metricsAddr,
|
MetricsBindAddress: metricsAddr,
|
||||||
Port: 9443,
|
Port: 9443,
|
||||||
HealthProbeBindAddress: probeAddr,
|
HealthProbeBindAddress: probeAddr,
|
||||||
@@ -134,11 +104,11 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
|
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
|
||||||
if strings.Contains(watchNamespace, ",") {
|
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
|
||||||
setupLog.Info("manager set up with multiple namespaces", "namespaces", watchNamespace)
|
// Also note that you may face performance issues when using this with a high number of namespaces.
|
||||||
// configure cluster-scoped with MultiNamespacedCacheBuilder
|
if strings.Contains(namespace, ",") {
|
||||||
options.Namespace = ""
|
options.Namespace = ""
|
||||||
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(watchNamespace, ","))
|
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ","))
|
||||||
}
|
}
|
||||||
|
|
||||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), options)
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), options)
|
||||||
@@ -150,7 +120,7 @@ func main() {
|
|||||||
// Setup One Password Client
|
// Setup One Password Client
|
||||||
opConnectClient, err := connect.NewClientFromEnvironment()
|
opConnectClient, err := connect.NewClientFromEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setupLog.Error(err, "unable to create Connect client")
|
setupLog.Error(err, "failed to create 1Password client")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -162,25 +132,35 @@ func main() {
|
|||||||
setupLog.Error(err, "unable to create controller", "controller", "OnePasswordItem")
|
setupLog.Error(err, "unable to create controller", "controller", "OnePasswordItem")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
//+kubebuilder:scaffold:builder
|
||||||
|
|
||||||
r, _ := regexp.Compile(annotationRegExpString)
|
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||||
if err = (&controllers.DeploymentReconciler{
|
setupLog.Error(err, "unable to set up health check")
|
||||||
Client: mgr.GetClient(),
|
os.Exit(1)
|
||||||
Scheme: mgr.GetScheme(),
|
}
|
||||||
OpConnectClient: opConnectClient,
|
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||||
OpAnnotationRegExp: r,
|
setupLog.Error(err, "unable to set up ready check")
|
||||||
}).SetupWithManager(mgr); err != nil {
|
os.Exit(1)
|
||||||
setupLog.Error(err, "unable to create controller", "controller", "Deployment")
|
}
|
||||||
|
|
||||||
|
setupLog.Info("starting manager")
|
||||||
|
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||||
|
setupLog.Error(err, "problem running manager")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
deploymentNamespace, err := utils.GetOperatorNamespace()
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "Failed to get namespace")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
//+kubebuilder:scaffold:builder
|
|
||||||
|
|
||||||
//Setup 1PasswordConnect
|
//Setup 1PasswordConnect
|
||||||
if shouldManageConnect() {
|
if shouldManageConnect() {
|
||||||
setupLog.Info("Automated Connect Management Enabled")
|
setupLog.Info("Automated Connect Management Enabled")
|
||||||
go func() {
|
go func() {
|
||||||
connectStarted := false
|
connectStarted := false
|
||||||
for connectStarted == false {
|
for !connectStarted {
|
||||||
err := op.SetupConnect(mgr.GetClient(), deploymentNamespace)
|
err := op.SetupConnect(mgr.GetClient(), deploymentNamespace)
|
||||||
// Cache Not Started is an acceptable error. Retry until cache is started.
|
// Cache Not Started is an acceptable error. Retry until cache is started.
|
||||||
if err != nil && !errors.Is(err, &cache.ErrCacheNotStarted{}) {
|
if err != nil && !errors.Is(err, &cache.ErrCacheNotStarted{}) {
|
||||||
@@ -196,6 +176,8 @@ func main() {
|
|||||||
setupLog.Info("Automated Connect Management Disabled")
|
setupLog.Info("Automated Connect Management Disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Configure Metrics Service. See: https://sdk.operatorframework.io/docs/building-operators/golang/migration/#export-metrics
|
||||||
|
|
||||||
// Setup update secrets task
|
// Setup update secrets task
|
||||||
updatedSecretsPoller := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
|
updatedSecretsPoller := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
|
||||||
done := make(chan bool)
|
done := make(chan bool)
|
||||||
@@ -215,35 +197,16 @@ func main() {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
// Start the Cmd
|
||||||
setupLog.Error(err, "unable to set up health check")
|
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
|
||||||
os.Exit(1)
|
setupLog.Error(err, "Manager exited non-zero")
|
||||||
}
|
done <- true
|
||||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
|
||||||
setupLog.Error(err, "unable to set up ready check")
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
setupLog.Info("starting manager")
|
|
||||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
|
||||||
setupLog.Error(err, "problem running manager")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getWatchNamespace returns the Namespace the operator should be watching for changes
|
const manageConnect = "MANAGE_CONNECT"
|
||||||
func getWatchNamespace() (string, error) {
|
|
||||||
// WatchNamespaceEnvVar is the constant for env variable WATCH_NAMESPACE
|
|
||||||
// which specifies the Namespace to watch.
|
|
||||||
// An empty value means the operator is running with cluster scope.
|
|
||||||
var watchNamespaceEnvVar = "WATCH_NAMESPACE"
|
|
||||||
|
|
||||||
ns, found := os.LookupEnv(watchNamespaceEnvVar)
|
|
||||||
if !found {
|
|
||||||
return "", fmt.Errorf("%s must be set", watchNamespaceEnvVar)
|
|
||||||
}
|
|
||||||
return ns, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldManageConnect() bool {
|
func shouldManageConnect() bool {
|
||||||
shouldManageConnect, found := os.LookupEnv(manageConnect)
|
shouldManageConnect, found := os.LookupEnv(manageConnect)
|
||||||
@@ -258,18 +221,8 @@ func shouldManageConnect() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func shouldAutoRestartDeployments() bool {
|
const envPollingIntervalVariable = "POLLING_INTERVAL"
|
||||||
shouldAutoRestartDeployments, found := os.LookupEnv(restartDeploymentsEnvVariable)
|
const defaultPollingInterval = 600
|
||||||
if found {
|
|
||||||
shouldAutoRestartDeploymentsBool, err := strconv.ParseBool(strings.ToLower(shouldAutoRestartDeployments))
|
|
||||||
if err != nil {
|
|
||||||
setupLog.Error(err, "")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
return shouldAutoRestartDeploymentsBool
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPollingIntervalForUpdatingSecrets() time.Duration {
|
func getPollingIntervalForUpdatingSecrets() time.Duration {
|
||||||
timeInSecondsString, found := os.LookupEnv(envPollingIntervalVariable)
|
timeInSecondsString, found := os.LookupEnv(envPollingIntervalVariable)
|
||||||
@@ -284,3 +237,18 @@ func getPollingIntervalForUpdatingSecrets() time.Duration {
|
|||||||
setupLog.Info(fmt.Sprintf("Using default polling interval of %v seconds", defaultPollingInterval))
|
setupLog.Info(fmt.Sprintf("Using default polling interval of %v seconds", defaultPollingInterval))
|
||||||
return time.Duration(defaultPollingInterval) * time.Second
|
return time.Duration(defaultPollingInterval) * time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const restartDeploymentsEnvVariable = "AUTO_RESTART"
|
||||||
|
|
||||||
|
func shouldAutoRestartDeployments() bool {
|
||||||
|
shouldAutoRestartDeployments, found := os.LookupEnv(restartDeploymentsEnvVariable)
|
||||||
|
if found {
|
||||||
|
shouldAutoRestartDeploymentsBool, err := strconv.ParseBool(strings.ToLower(shouldAutoRestartDeployments))
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
return shouldAutoRestartDeploymentsBool
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ import (
|
|||||||
const OnepasswordPrefix = "operator.1password.io"
|
const OnepasswordPrefix = "operator.1password.io"
|
||||||
const NameAnnotation = OnepasswordPrefix + "/item-name"
|
const NameAnnotation = OnepasswordPrefix + "/item-name"
|
||||||
const VersionAnnotation = OnepasswordPrefix + "/item-version"
|
const VersionAnnotation = OnepasswordPrefix + "/item-version"
|
||||||
|
const restartAnnotation = OnepasswordPrefix + "/last-restarted"
|
||||||
const ItemPathAnnotation = OnepasswordPrefix + "/item-path"
|
const ItemPathAnnotation = OnepasswordPrefix + "/item-path"
|
||||||
const RestartDeploymentsAnnotation = OnepasswordPrefix + "/auto-restart"
|
const RestartDeploymentsAnnotation = OnepasswordPrefix + "/auto-restart"
|
||||||
|
|
||||||
@@ -34,17 +35,23 @@ var ErrCannotUpdateSecretType = errs.New("Cannot change secret type. Secret type
|
|||||||
|
|
||||||
var log = logf.Log
|
var log = logf.Log
|
||||||
|
|
||||||
func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretName, namespace string, item *onepassword.Item, autoRestart string, labels map[string]string, secretType string, ownerRef *metav1.OwnerReference) error {
|
func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretName, namespace string, item *onepassword.Item, autoRestart string, labels map[string]string, secretType string, secretAnnotations map[string]string, ownerRef *metav1.OwnerReference) error {
|
||||||
|
|
||||||
itemVersion := fmt.Sprint(item.Version)
|
itemVersion := fmt.Sprint(item.Version)
|
||||||
secretAnnotations := map[string]string{
|
|
||||||
VersionAnnotation: itemVersion,
|
// If secretAnnotations is nil we create an empty map so we can later assign values for the OP Annotations in the map
|
||||||
ItemPathAnnotation: fmt.Sprintf("vaults/%v/items/%v", item.Vault.ID, item.ID),
|
if secretAnnotations == nil {
|
||||||
|
secretAnnotations = map[string]string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
secretAnnotations[VersionAnnotation] = itemVersion
|
||||||
|
secretAnnotations[ItemPathAnnotation] = fmt.Sprintf("vaults/%v/items/%v", item.Vault.ID, item.ID)
|
||||||
|
|
||||||
if autoRestart != "" {
|
if autoRestart != "" {
|
||||||
_, err := utils.StringToBool(autoRestart)
|
_, err := utils.StringToBool(autoRestart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error parsing %v annotation on Secret %v. Must be true or false. Defaulting to false.", RestartDeploymentsAnnotation, secretName)
|
log.Error(err, "Error parsing %v annotation on Secret %v. Must be true or false. Defaulting to false.", RestartDeploymentsAnnotation, secretName)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
secretAnnotations[RestartDeploymentsAnnotation] = autoRestart
|
secretAnnotations[RestartDeploymentsAnnotation] = autoRestart
|
||||||
}
|
}
|
||||||
@@ -61,31 +68,19 @@ func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretNa
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the secret types are being changed on the update.
|
currentAnnotations := currentSecret.Annotations
|
||||||
// Avoid Opaque and "" are treated as different on check.
|
currentLabels := currentSecret.Labels
|
||||||
wantSecretType := secretType
|
|
||||||
if wantSecretType == "" {
|
|
||||||
wantSecretType = string(corev1.SecretTypeOpaque)
|
|
||||||
}
|
|
||||||
currentSecretType := string(currentSecret.Type)
|
currentSecretType := string(currentSecret.Type)
|
||||||
if currentSecretType == "" {
|
if !reflect.DeepEqual(currentSecretType, secretType) {
|
||||||
currentSecretType = string(corev1.SecretTypeOpaque)
|
|
||||||
}
|
|
||||||
if currentSecretType != wantSecretType {
|
|
||||||
return ErrCannotUpdateSecretType
|
return ErrCannotUpdateSecretType
|
||||||
}
|
}
|
||||||
|
|
||||||
currentAnnotations := currentSecret.Annotations
|
|
||||||
currentLabels := currentSecret.Labels
|
|
||||||
if !reflect.DeepEqual(currentAnnotations, secretAnnotations) || !reflect.DeepEqual(currentLabels, labels) {
|
if !reflect.DeepEqual(currentAnnotations, secretAnnotations) || !reflect.DeepEqual(currentLabels, labels) {
|
||||||
log.Info(fmt.Sprintf("Updating Secret %v at namespace '%v'", secret.Name, secret.Namespace))
|
log.Info(fmt.Sprintf("Updating Secret %v at namespace '%v'", secret.Name, secret.Namespace))
|
||||||
currentSecret.ObjectMeta.Annotations = secretAnnotations
|
currentSecret.ObjectMeta.Annotations = secretAnnotations
|
||||||
currentSecret.ObjectMeta.Labels = labels
|
currentSecret.ObjectMeta.Labels = labels
|
||||||
currentSecret.Data = secret.Data
|
currentSecret.Data = secret.Data
|
||||||
if err := kubeClient.Update(context.Background(), currentSecret); err != nil {
|
return kubeClient.Update(context.Background(), currentSecret)
|
||||||
return fmt.Errorf("Kubernetes secret update failed: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info(fmt.Sprintf("Secret with name %v and version %v already exists", secret.Name, secret.Annotations[VersionAnnotation]))
|
log.Info(fmt.Sprintf("Secret with name %v and version %v already exists", secret.Name, secret.Annotations[VersionAnnotation]))
|
||||||
|
|||||||
@@ -7,16 +7,20 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/onepassword"
|
"github.com/1Password/connect-sdk-go/onepassword"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
kubeValidate "k8s.io/apimachinery/pkg/util/validation"
|
kubeValidate "k8s.io/apimachinery/pkg/util/validation"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||||
)
|
)
|
||||||
|
|
||||||
const restartDeploymentAnnotation = "false"
|
const restartDeploymentAnnotation = "false"
|
||||||
|
|
||||||
|
type k8s struct {
|
||||||
|
clientset kubernetes.Interface
|
||||||
|
}
|
||||||
|
|
||||||
func TestCreateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
func TestCreateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
||||||
secretName := "test-secret-name"
|
secretName := "test-secret-name"
|
||||||
namespace := "test"
|
namespace := "test"
|
||||||
@@ -27,11 +31,14 @@ func TestCreateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
|||||||
item.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
item.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
||||||
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
||||||
|
|
||||||
kubeClient := fake.NewClientBuilder().Build()
|
kubeClient := fake.NewFakeClient()
|
||||||
secretLabels := map[string]string{}
|
secretLabels := map[string]string{}
|
||||||
|
secretAnnotations := map[string]string{
|
||||||
|
"testAnnotation": "exists",
|
||||||
|
}
|
||||||
secretType := ""
|
secretType := ""
|
||||||
|
|
||||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, nil)
|
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %v", err)
|
t.Errorf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -43,6 +50,10 @@ func TestCreateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
|||||||
}
|
}
|
||||||
compareFields(item.Fields, createdSecret.Data, t)
|
compareFields(item.Fields, createdSecret.Data, t)
|
||||||
compareAnnotationsToItem(createdSecret.Annotations, item, t)
|
compareAnnotationsToItem(createdSecret.Annotations, item, t)
|
||||||
|
|
||||||
|
if createdSecret.Annotations["testAnnotation"] != "exists" {
|
||||||
|
t.Errorf("Expected testAnnotation to be merged with existing annotations, but wasn't.")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKubernetesSecretFromOnePasswordItemOwnerReferences(t *testing.T) {
|
func TestKubernetesSecretFromOnePasswordItemOwnerReferences(t *testing.T) {
|
||||||
@@ -55,8 +66,11 @@ func TestKubernetesSecretFromOnePasswordItemOwnerReferences(t *testing.T) {
|
|||||||
item.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
item.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
||||||
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
||||||
|
|
||||||
kubeClient := fake.NewClientBuilder().Build()
|
kubeClient := fake.NewFakeClient()
|
||||||
secretLabels := map[string]string{}
|
secretLabels := map[string]string{}
|
||||||
|
secretAnnotations := map[string]string{
|
||||||
|
"testAnnotation": "exists",
|
||||||
|
}
|
||||||
secretType := ""
|
secretType := ""
|
||||||
|
|
||||||
ownerRef := &metav1.OwnerReference{
|
ownerRef := &metav1.OwnerReference{
|
||||||
@@ -65,7 +79,7 @@ func TestKubernetesSecretFromOnePasswordItemOwnerReferences(t *testing.T) {
|
|||||||
Name: "test-deployment",
|
Name: "test-deployment",
|
||||||
UID: types.UID("test-uid"),
|
UID: types.UID("test-uid"),
|
||||||
}
|
}
|
||||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, ownerRef)
|
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, ownerRef)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %v", err)
|
t.Errorf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -100,11 +114,12 @@ func TestUpdateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
|||||||
item.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
item.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
||||||
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
||||||
|
|
||||||
kubeClient := fake.NewClientBuilder().Build()
|
kubeClient := fake.NewFakeClient()
|
||||||
secretLabels := map[string]string{}
|
secretLabels := map[string]string{}
|
||||||
|
secretAnnotations := map[string]string{}
|
||||||
secretType := ""
|
secretType := ""
|
||||||
|
|
||||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, nil)
|
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %v", err)
|
t.Errorf("Unexpected error: %v", err)
|
||||||
@@ -116,7 +131,7 @@ func TestUpdateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
|||||||
newItem.Version = 456
|
newItem.Version = 456
|
||||||
newItem.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
newItem.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
||||||
newItem.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
newItem.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
||||||
err = CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &newItem, restartDeploymentAnnotation, secretLabels, secretType, nil)
|
err = CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &newItem, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %v", err)
|
t.Errorf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -215,11 +230,14 @@ func TestCreateKubernetesTLSSecretFromOnePasswordItem(t *testing.T) {
|
|||||||
item.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
item.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
||||||
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
item.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
||||||
|
|
||||||
kubeClient := fake.NewClientBuilder().Build()
|
kubeClient := fake.NewFakeClient()
|
||||||
secretLabels := map[string]string{}
|
secretLabels := map[string]string{}
|
||||||
|
secretAnnotations := map[string]string{
|
||||||
|
"testAnnotation": "exists",
|
||||||
|
}
|
||||||
secretType := "kubernetes.io/tls"
|
secretType := "kubernetes.io/tls"
|
||||||
|
|
||||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, nil)
|
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %v", err)
|
t.Errorf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,147 +5,80 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type TestClient struct {
|
type TestClient struct {
|
||||||
GetVaultsFunc func() ([]onepassword.Vault, error)
|
GetVaultsFunc func() ([]onepassword.Vault, error)
|
||||||
GetVaultsByTitleFunc func(title string) ([]onepassword.Vault, error)
|
GetVaultsByTitleFunc func(title string) ([]onepassword.Vault, error)
|
||||||
GetVaultFunc func(uuid string) (*onepassword.Vault, error)
|
GetVaultFunc func(uuid string) (*onepassword.Vault, error)
|
||||||
GetVaultByUUIDFunc func(uuid string) (*onepassword.Vault, error)
|
GetItemFunc func(uuid string, vaultUUID string) (*onepassword.Item, error)
|
||||||
GetVaultByTitleFunc func(title string) (*onepassword.Vault, error)
|
GetItemsFunc func(vaultUUID string) ([]onepassword.Item, error)
|
||||||
GetItemFunc func(itemQuery string, vaultQuery string) (*onepassword.Item, error)
|
GetItemsByTitleFunc func(title string, vaultUUID string) ([]onepassword.Item, error)
|
||||||
GetItemByUUIDFunc func(uuid string, vaultQuery string) (*onepassword.Item, error)
|
GetItemByTitleFunc func(title string, vaultUUID string) (*onepassword.Item, error)
|
||||||
GetItemByTitleFunc func(title string, vaultQuery string) (*onepassword.Item, error)
|
CreateItemFunc func(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error)
|
||||||
GetItemsFunc func(vaultQuery string) ([]onepassword.Item, error)
|
UpdateItemFunc func(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error)
|
||||||
GetItemsByTitleFunc func(title string, vaultQuery string) ([]onepassword.Item, error)
|
DeleteItemFunc func(item *onepassword.Item, vaultUUID string) error
|
||||||
CreateItemFunc func(item *onepassword.Item, vaultQuery string) (*onepassword.Item, error)
|
GetFileFunc func(uuid string, itemUUID string, vaultUUID string) (*onepassword.File, error)
|
||||||
UpdateItemFunc func(item *onepassword.Item, vaultQuery string) (*onepassword.Item, error)
|
GetFileContentFunc func(file *onepassword.File) ([]byte, error)
|
||||||
DeleteItemFunc func(item *onepassword.Item, vaultQuery string) error
|
|
||||||
DeleteItemByIDFunc func(itemUUID string, vaultQuery string) error
|
|
||||||
DeleteItemByTitleFunc func(title string, vaultQuery string) error
|
|
||||||
GetFilesFunc func(itemQuery string, vaultQuery string) ([]onepassword.File, error)
|
|
||||||
GetFileFunc func(uuid string, itemQuery string, vaultQuery string) (*onepassword.File, error)
|
|
||||||
GetFileContentFunc func(file *onepassword.File) ([]byte, error)
|
|
||||||
DownloadFileFunc func(file *onepassword.File, targetDirectory string, overwrite bool) (string, error)
|
|
||||||
LoadStructFromItemByUUIDFunc func(config interface{}, itemUUID string, vaultQuery string) error
|
|
||||||
LoadStructFromItemByTitleFunc func(config interface{}, itemTitle string, vaultQuery string) error
|
|
||||||
LoadStructFromItemFunc func(config interface{}, itemQuery string, vaultQuery string) error
|
|
||||||
LoadStructFunc func(config interface{}) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
DoGetVaultsFunc func() ([]onepassword.Vault, error)
|
GetGetVaultsFunc func() ([]onepassword.Vault, error)
|
||||||
DoGetVaultsByTitleFunc func(title string) ([]onepassword.Vault, error)
|
DoGetVaultsByTitleFunc func(title string) ([]onepassword.Vault, error)
|
||||||
DoGetVaultFunc func(uuid string) (*onepassword.Vault, error)
|
DoGetVaultFunc func(uuid string) (*onepassword.Vault, error)
|
||||||
DoGetVaultByUUIDFunc func(uuid string) (*onepassword.Vault, error)
|
GetGetItemFunc func(uuid string, vaultUUID string) (*onepassword.Item, error)
|
||||||
DoGetVaultByTitleFunc func(title string) (*onepassword.Vault, error)
|
DoGetItemsByTitleFunc func(title string, vaultUUID string) ([]onepassword.Item, error)
|
||||||
DoGetItemFunc func(itemQuery string, vaultQuery string) (*onepassword.Item, error)
|
DoGetItemByTitleFunc func(title string, vaultUUID string) (*onepassword.Item, error)
|
||||||
DoGetItemByUUIDFunc func(uuid string, vaultQuery string) (*onepassword.Item, error)
|
DoCreateItemFunc func(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error)
|
||||||
DoGetItemByTitleFunc func(title string, vaultQuery string) (*onepassword.Item, error)
|
DoDeleteItemFunc func(item *onepassword.Item, vaultUUID string) error
|
||||||
DoGetItemsFunc func(vaultQuery string) ([]onepassword.Item, error)
|
DoGetItemsFunc func(vaultUUID string) ([]onepassword.Item, error)
|
||||||
DoGetItemsByTitleFunc func(title string, vaultQuery string) ([]onepassword.Item, error)
|
DoUpdateItemFunc func(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error)
|
||||||
DoCreateItemFunc func(item *onepassword.Item, vaultQuery string) (*onepassword.Item, error)
|
DoGetFileFunc func(uuid string, itemUUID string, vaultUUID string) (*onepassword.File, error)
|
||||||
DoUpdateItemFunc func(item *onepassword.Item, vaultQuery string) (*onepassword.Item, error)
|
DoGetFileContentFunc func(file *onepassword.File) ([]byte, error)
|
||||||
DoDeleteItemFunc func(item *onepassword.Item, vaultQuery string) error
|
|
||||||
DoDeleteItemByIDFunc func(itemUUID string, vaultQuery string) error
|
|
||||||
DoDeleteItemByTitleFunc func(title string, vaultQuery string) error
|
|
||||||
DoGetFilesFunc func(itemQuery string, vaultQuery string) ([]onepassword.File, error)
|
|
||||||
DoGetFileFunc func(uuid string, itemQuery string, vaultQuery string) (*onepassword.File, error)
|
|
||||||
DoGetFileContentFunc func(file *onepassword.File) ([]byte, error)
|
|
||||||
DoDownloadFileFunc func(file *onepassword.File, targetDirectory string, overwrite bool) (string, error)
|
|
||||||
DoLoadStructFromItemByUUIDFunc func(config interface{}, itemUUID string, vaultQuery string) error
|
|
||||||
DoLoadStructFromItemByTitleFunc func(config interface{}, itemTitle string, vaultQuery string) error
|
|
||||||
DoLoadStructFromItemFunc func(config interface{}, itemQuery string, vaultQuery string) error
|
|
||||||
DoLoadStructFunc func(config interface{}) error
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Do is the mock client's `Do` func
|
// Do is the mock client's `Do` func
|
||||||
|
|
||||||
func (m *TestClient) GetVaults() ([]onepassword.Vault, error) {
|
func (m *TestClient) GetVaults() ([]onepassword.Vault, error) {
|
||||||
return DoGetVaultsFunc()
|
return GetGetVaultsFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) GetVaultsByTitle(title string) ([]onepassword.Vault, error) {
|
func (m *TestClient) GetVaultsByTitle(title string) ([]onepassword.Vault, error) {
|
||||||
return DoGetVaultsByTitleFunc(title)
|
return DoGetVaultsByTitleFunc(title)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) GetVault(vaultQuery string) (*onepassword.Vault, error) {
|
func (m *TestClient) GetVault(uuid string) (*onepassword.Vault, error) {
|
||||||
return DoGetVaultFunc(vaultQuery)
|
return DoGetVaultFunc(uuid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) GetVaultByUUID(uuid string) (*onepassword.Vault, error) {
|
func (m *TestClient) GetItem(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
||||||
return DoGetVaultByUUIDFunc(uuid)
|
return GetGetItemFunc(uuid, vaultUUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) GetVaultByTitle(title string) (*onepassword.Vault, error) {
|
func (m *TestClient) GetItems(vaultUUID string) ([]onepassword.Item, error) {
|
||||||
return DoGetVaultByTitleFunc(title)
|
return DoGetItemsFunc(vaultUUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) GetItem(itemQuery string, vaultQuery string) (*onepassword.Item, error) {
|
func (m *TestClient) GetItemsByTitle(title, vaultUUID string) ([]onepassword.Item, error) {
|
||||||
return DoGetItemFunc(itemQuery, vaultQuery)
|
return DoGetItemsByTitleFunc(title, vaultUUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) GetItemByUUID(uuid string, vaultQuery string) (*onepassword.Item, error) {
|
func (m *TestClient) GetItemByTitle(title string, vaultUUID string) (*onepassword.Item, error) {
|
||||||
return DoGetItemByUUIDFunc(uuid, vaultQuery)
|
return DoGetItemByTitleFunc(title, vaultUUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) GetItemByTitle(title string, vaultQuery string) (*onepassword.Item, error) {
|
func (m *TestClient) CreateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error) {
|
||||||
return DoGetItemByTitleFunc(title, vaultQuery)
|
return DoCreateItemFunc(item, vaultUUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) GetItems(vaultQuery string) ([]onepassword.Item, error) {
|
func (m *TestClient) DeleteItem(item *onepassword.Item, vaultUUID string) error {
|
||||||
return DoGetItemsFunc(vaultQuery)
|
return DoDeleteItemFunc(item, vaultUUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) GetItemsByTitle(title string, vaultQuery string) ([]onepassword.Item, error) {
|
func (m *TestClient) UpdateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error) {
|
||||||
return DoGetItemsByTitleFunc(title, vaultQuery)
|
return DoUpdateItemFunc(item, vaultUUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) CreateItem(item *onepassword.Item, vaultQuery string) (*onepassword.Item, error) {
|
func (m *TestClient) GetFile(uuid string, itemUUID string, vaultUUID string) (*onepassword.File, error) {
|
||||||
return DoCreateItemFunc(item, vaultQuery)
|
return DoGetFileFunc(uuid, itemUUID, vaultUUID)
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TestClient) UpdateItem(item *onepassword.Item, vaultQuery string) (*onepassword.Item, error) {
|
|
||||||
return DoUpdateItemFunc(item, vaultQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TestClient) DeleteItem(item *onepassword.Item, vaultQuery string) error {
|
|
||||||
return DoDeleteItemFunc(item, vaultQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TestClient) DeleteItemByID(itemUUID string, vaultQuery string) error {
|
|
||||||
return DoDeleteItemByIDFunc(itemUUID, vaultQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TestClient) DeleteItemByTitle(title string, vaultQuery string) error {
|
|
||||||
return DoDeleteItemByTitleFunc(title, vaultQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TestClient) GetFiles(itemQuery string, vaultQuery string) ([]onepassword.File, error) {
|
|
||||||
return DoGetFilesFunc(itemQuery, vaultQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TestClient) GetFile(uuid string, itemQuery string, vaultQuery string) (*onepassword.File, error) {
|
|
||||||
return DoGetFileFunc(uuid, itemQuery, vaultQuery)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) GetFileContent(file *onepassword.File) ([]byte, error) {
|
func (m *TestClient) GetFileContent(file *onepassword.File) ([]byte, error) {
|
||||||
return DoGetFileContentFunc(file)
|
return DoGetFileContentFunc(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestClient) DownloadFile(file *onepassword.File, targetDirectory string, overwrite bool) (string, error) {
|
|
||||||
return DoDownloadFileFunc(file, targetDirectory, overwrite)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TestClient) LoadStructFromItemByUUID(config interface{}, itemUUID string, vaultQuery string) error {
|
|
||||||
return DoLoadStructFromItemByUUIDFunc(config, itemUUID, vaultQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TestClient) LoadStructFromItemByTitle(config interface{}, itemTitle string, vaultQuery string) error {
|
|
||||||
return DoLoadStructFromItemByTitleFunc(config, itemTitle, vaultQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TestClient) LoadStructFromItem(config interface{}, itemQuery string, vaultQuery string) error {
|
|
||||||
return DoLoadStructFromItemFunc(config, itemQuery, vaultQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TestClient) LoadStruct(config interface{}) error {
|
|
||||||
return DoLoadStructFunc(config)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -2,12 +2,12 @@ package onepassword
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
errors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/yaml"
|
"k8s.io/apimachinery/pkg/util/yaml"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
@@ -15,8 +15,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var logConnectSetup = logf.Log.WithName("ConnectSetup")
|
var logConnectSetup = logf.Log.WithName("ConnectSetup")
|
||||||
var deploymentPath = "config/connect/deployment.yaml"
|
var deploymentPath = "deploy/connect/deployment.yaml"
|
||||||
var servicePath = "config/connect/service.yaml"
|
var servicePath = "deploy/connect/service.yaml"
|
||||||
|
|
||||||
func SetupConnect(kubeClient client.Client, deploymentNamespace string) error {
|
func SetupConnect(kubeClient client.Client, deploymentNamespace string) error {
|
||||||
err := setupService(kubeClient, servicePath, deploymentNamespace)
|
err := setupService(kubeClient, servicePath, deploymentNamespace)
|
||||||
|
|||||||
@@ -23,9 +23,9 @@ func TestServiceSetup(t *testing.T) {
|
|||||||
objs := []runtime.Object{}
|
objs := []runtime.Object{}
|
||||||
|
|
||||||
// Create a fake client to mock API calls.
|
// Create a fake client to mock API calls.
|
||||||
client := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objs...).Build()
|
client := fake.NewFakeClientWithScheme(s, objs...)
|
||||||
|
|
||||||
err := setupService(client, "../../config/connect/service.yaml", defaultNamespacedName.Namespace)
|
err := setupService(client, "../../deploy/connect/service.yaml", defaultNamespacedName.Namespace)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error Setting Up Connect: %v", err)
|
t.Errorf("Error Setting Up Connect: %v", err)
|
||||||
@@ -48,9 +48,9 @@ func TestDeploymentSetup(t *testing.T) {
|
|||||||
objs := []runtime.Object{}
|
objs := []runtime.Object{}
|
||||||
|
|
||||||
// Create a fake client to mock API calls.
|
// Create a fake client to mock API calls.
|
||||||
client := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objs...).Build()
|
client := fake.NewFakeClientWithScheme(s, objs...)
|
||||||
|
|
||||||
err := setupDeployment(client, "../../config/connect/deployment.yaml", defaultNamespacedName.Namespace)
|
err := setupDeployment(client, "../../deploy/connect/deployment.yaml", defaultNamespacedName.Namespace)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error Setting Up Connect: %v", err)
|
t.Errorf("Error Setting Up Connect: %v", err)
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ import (
|
|||||||
|
|
||||||
func TestAreContainersUsingSecretsFromEnv(t *testing.T) {
|
func TestAreContainersUsingSecretsFromEnv(t *testing.T) {
|
||||||
secretNamesToSearch := map[string]*corev1.Secret{
|
secretNamesToSearch := map[string]*corev1.Secret{
|
||||||
"onepassword-database-secret": {},
|
"onepassword-database-secret": &corev1.Secret{},
|
||||||
"onepassword-api-key": {},
|
"onepassword-api-key": &corev1.Secret{},
|
||||||
}
|
}
|
||||||
|
|
||||||
containerSecretNames := []string{
|
containerSecretNames := []string{
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ import (
|
|||||||
|
|
||||||
func TestIsDeploymentUsingSecretsUsingVolumes(t *testing.T) {
|
func TestIsDeploymentUsingSecretsUsingVolumes(t *testing.T) {
|
||||||
secretNamesToSearch := map[string]*corev1.Secret{
|
secretNamesToSearch := map[string]*corev1.Secret{
|
||||||
"onepassword-database-secret": {},
|
"onepassword-database-secret": &corev1.Secret{},
|
||||||
"onepassword-api-key": {},
|
"onepassword-api-key": &corev1.Secret{},
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSecretNames := []string{
|
volumeSecretNames := []string{
|
||||||
@@ -28,8 +28,8 @@ func TestIsDeploymentUsingSecretsUsingVolumes(t *testing.T) {
|
|||||||
|
|
||||||
func TestIsDeploymentUsingSecretsUsingContainers(t *testing.T) {
|
func TestIsDeploymentUsingSecretsUsingContainers(t *testing.T) {
|
||||||
secretNamesToSearch := map[string]*corev1.Secret{
|
secretNamesToSearch := map[string]*corev1.Secret{
|
||||||
"onepassword-database-secret": {},
|
"onepassword-database-secret": &corev1.Secret{},
|
||||||
"onepassword-api-key": {},
|
"onepassword-api-key": &corev1.Secret{},
|
||||||
}
|
}
|
||||||
|
|
||||||
containerSecretNames := []string{
|
containerSecretNames := []string{
|
||||||
@@ -47,8 +47,8 @@ func TestIsDeploymentUsingSecretsUsingContainers(t *testing.T) {
|
|||||||
|
|
||||||
func TestIsDeploymentNotUSingSecrets(t *testing.T) {
|
func TestIsDeploymentNotUSingSecrets(t *testing.T) {
|
||||||
secretNamesToSearch := map[string]*corev1.Secret{
|
secretNamesToSearch := map[string]*corev1.Secret{
|
||||||
"onepassword-database-secret": {},
|
"onepassword-database-secret": &corev1.Secret{},
|
||||||
"onepassword-api-key": {},
|
"onepassword-api-key": &corev1.Secret{},
|
||||||
}
|
}
|
||||||
|
|
||||||
deployment := &appsv1.Deployment{}
|
deployment := &appsv1.Deployment{}
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/connect"
|
"github.com/1Password/connect-sdk-go/connect"
|
||||||
"github.com/1Password/connect-sdk-go/onepassword"
|
"github.com/1Password/connect-sdk-go/onepassword"
|
||||||
|
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
|
v1 "github.com/1Password/onepassword-operator/api/v1"
|
||||||
|
|
||||||
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
||||||
"github.com/1Password/onepassword-operator/pkg/utils"
|
"github.com/1Password/onepassword-operator/pkg/utils"
|
||||||
|
|
||||||
@@ -90,10 +91,9 @@ func (h *SecretUpdateHandler) restartDeploymentsWithUpdatedSecrets(updatedSecret
|
|||||||
|
|
||||||
func (h *SecretUpdateHandler) restartDeployment(deployment *appsv1.Deployment) {
|
func (h *SecretUpdateHandler) restartDeployment(deployment *appsv1.Deployment) {
|
||||||
log.Info(fmt.Sprintf("Deployment %q at namespace %q references an updated secret. Restarting", deployment.GetName(), deployment.Namespace))
|
log.Info(fmt.Sprintf("Deployment %q at namespace %q references an updated secret. Restarting", deployment.GetName(), deployment.Namespace))
|
||||||
if deployment.Spec.Template.Annotations == nil {
|
deployment.Spec.Template.Annotations = map[string]string{
|
||||||
deployment.Spec.Template.Annotations = map[string]string{}
|
RestartAnnotation: time.Now().String(),
|
||||||
}
|
}
|
||||||
deployment.Spec.Template.Annotations[RestartAnnotation] = time.Now().String()
|
|
||||||
err := h.client.Update(context.Background(), deployment)
|
err := h.client.Update(context.Background(), deployment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "Problem restarting deployment")
|
log.Error(err, "Problem restarting deployment")
|
||||||
@@ -134,21 +134,15 @@ func (h *SecretUpdateHandler) updateKubernetesSecrets() (map[string]map[string]*
|
|||||||
log.Info(fmt.Sprintf("Secret '%v' has been updated in 1Password but is set to be ignored. Updates to an ignored secret will not trigger an update to a kubernetes secret or a rolling restart.", secret.GetName()))
|
log.Info(fmt.Sprintf("Secret '%v' has been updated in 1Password but is set to be ignored. Updates to an ignored secret will not trigger an update to a kubernetes secret or a rolling restart.", secret.GetName()))
|
||||||
secret.Annotations[VersionAnnotation] = itemVersion
|
secret.Annotations[VersionAnnotation] = itemVersion
|
||||||
secret.Annotations[ItemPathAnnotation] = itemPathString
|
secret.Annotations[ItemPathAnnotation] = itemPathString
|
||||||
if err := h.client.Update(context.Background(), &secret); err != nil {
|
h.client.Update(context.Background(), &secret)
|
||||||
log.Error(err, "failed to update secret %s annotations to version %d: %s", secret.Name, itemVersion, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.Info(fmt.Sprintf("Updating kubernetes secret '%v'", secret.GetName()))
|
log.Info(fmt.Sprintf("Updating kubernetes secret '%v'", secret.GetName()))
|
||||||
secret.Annotations[VersionAnnotation] = itemVersion
|
secret.Annotations[VersionAnnotation] = itemVersion
|
||||||
secret.Annotations[ItemPathAnnotation] = itemPathString
|
secret.Annotations[ItemPathAnnotation] = itemPathString
|
||||||
secret.Data = kubeSecrets.BuildKubernetesSecretData(item.Fields, item.Files)
|
updatedSecret := kubeSecrets.BuildKubernetesSecretFromOnePasswordItem(secret.Name, secret.Namespace, secret.Annotations, secret.Labels, string(secret.Type), *item, nil)
|
||||||
log.Info(fmt.Sprintf("New secret path: %v and version: %v", secret.Annotations[ItemPathAnnotation], secret.Annotations[VersionAnnotation]))
|
log.Info(fmt.Sprintf("New secret path: %v and version: %v", updatedSecret.Annotations[ItemPathAnnotation], updatedSecret.Annotations[VersionAnnotation]))
|
||||||
if err := h.client.Update(context.Background(), &secret); err != nil {
|
h.client.Update(context.Background(), updatedSecret)
|
||||||
log.Error(err, "failed to update secret %s to version %d: %s", secret.Name, itemVersion, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if updatedSecrets[secret.Namespace] == nil {
|
if updatedSecrets[secret.Namespace] == nil {
|
||||||
updatedSecrets[secret.Namespace] = make(map[string]*corev1.Secret)
|
updatedSecrets[secret.Namespace] = make(map[string]*corev1.Secret)
|
||||||
}
|
}
|
||||||
@@ -193,7 +187,7 @@ func (h *SecretUpdateHandler) getIsSetForAutoRestartByNamespaceMap() (map[string
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *SecretUpdateHandler) getPathFromOnePasswordItem(secret corev1.Secret) string {
|
func (h *SecretUpdateHandler) getPathFromOnePasswordItem(secret corev1.Secret) string {
|
||||||
onePasswordItem := &onepasswordv1.OnePasswordItem{}
|
onePasswordItem := &v1.OnePasswordItem{}
|
||||||
|
|
||||||
// Search for our original OnePasswordItem if it exists
|
// Search for our original OnePasswordItem if it exists
|
||||||
err := h.client.Get(context.TODO(), client.ObjectKey{
|
err := h.client.Get(context.TODO(), client.ObjectKey{
|
||||||
|
|||||||
@@ -122,9 +122,6 @@ var tests = []testUpdateSecretTask{
|
|||||||
},
|
},
|
||||||
Spec: appsv1.DeploymentSpec{
|
Spec: appsv1.DeploymentSpec{
|
||||||
Template: corev1.PodTemplateSpec{
|
Template: corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Annotations: map[string]string{"external-annotation": "some-value"},
|
|
||||||
},
|
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
@@ -238,9 +235,6 @@ var tests = []testUpdateSecretTask{
|
|||||||
},
|
},
|
||||||
Spec: appsv1.DeploymentSpec{
|
Spec: appsv1.DeploymentSpec{
|
||||||
Template: corev1.PodTemplateSpec{
|
Template: corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Annotations: map[string]string{"external-annotation": "some-value"},
|
|
||||||
},
|
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Volumes: []corev1.Volume{
|
Volumes: []corev1.Volume{
|
||||||
{
|
{
|
||||||
@@ -348,9 +342,6 @@ var tests = []testUpdateSecretTask{
|
|||||||
},
|
},
|
||||||
Spec: appsv1.DeploymentSpec{
|
Spec: appsv1.DeploymentSpec{
|
||||||
Template: corev1.PodTemplateSpec{
|
Template: corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Annotations: map[string]string{"external-annotation": "some-value"},
|
|
||||||
},
|
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
@@ -420,9 +411,6 @@ var tests = []testUpdateSecretTask{
|
|||||||
},
|
},
|
||||||
Spec: appsv1.DeploymentSpec{
|
Spec: appsv1.DeploymentSpec{
|
||||||
Template: corev1.PodTemplateSpec{
|
Template: corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Annotations: map[string]string{"external-annotation": "some-value"},
|
|
||||||
},
|
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
@@ -494,9 +482,6 @@ var tests = []testUpdateSecretTask{
|
|||||||
},
|
},
|
||||||
Spec: appsv1.DeploymentSpec{
|
Spec: appsv1.DeploymentSpec{
|
||||||
Template: corev1.PodTemplateSpec{
|
Template: corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Annotations: map[string]string{"external-annotation": "some-value"},
|
|
||||||
},
|
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
@@ -568,9 +553,6 @@ var tests = []testUpdateSecretTask{
|
|||||||
},
|
},
|
||||||
Spec: appsv1.DeploymentSpec{
|
Spec: appsv1.DeploymentSpec{
|
||||||
Template: corev1.PodTemplateSpec{
|
Template: corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Annotations: map[string]string{"external-annotation": "some-value"},
|
|
||||||
},
|
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
@@ -648,9 +630,6 @@ var tests = []testUpdateSecretTask{
|
|||||||
},
|
},
|
||||||
Spec: appsv1.DeploymentSpec{
|
Spec: appsv1.DeploymentSpec{
|
||||||
Template: corev1.PodTemplateSpec{
|
Template: corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Annotations: map[string]string{"external-annotation": "some-value"},
|
|
||||||
},
|
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
@@ -724,9 +703,6 @@ var tests = []testUpdateSecretTask{
|
|||||||
},
|
},
|
||||||
Spec: appsv1.DeploymentSpec{
|
Spec: appsv1.DeploymentSpec{
|
||||||
Template: corev1.PodTemplateSpec{
|
Template: corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Annotations: map[string]string{"external-annotation": "some-value"},
|
|
||||||
},
|
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
@@ -800,10 +776,10 @@ func TestUpdateSecretHandler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create a fake client to mock API calls.
|
// Create a fake client to mock API calls.
|
||||||
cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objs...).Build()
|
cl := fake.NewFakeClientWithScheme(s, objs...)
|
||||||
|
|
||||||
opConnectClient := &mocks.TestClient{}
|
opConnectClient := &mocks.TestClient{}
|
||||||
mocks.DoGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
mocks.GetGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
||||||
|
|
||||||
item := onepassword.Item{}
|
item := onepassword.Item{}
|
||||||
item.Fields = generateFields(testData.opItem["username"], testData.opItem["password"])
|
item.Fields = generateFields(testData.opItem["username"], testData.opItem["password"])
|
||||||
@@ -853,16 +829,6 @@ func TestUpdateSecretHandler(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
assert.False(t, testData.expectedRestart, "Deployment was restarted but should not have been.")
|
assert.False(t, testData.expectedRestart, "Deployment was restarted but should not have been.")
|
||||||
}
|
}
|
||||||
|
|
||||||
oldPodTemplateAnnotations := testData.existingDeployment.Spec.Template.ObjectMeta.Annotations
|
|
||||||
newPodTemplateAnnotations := deployment.Spec.Template.Annotations
|
|
||||||
for name, expected := range oldPodTemplateAnnotations {
|
|
||||||
actual, ok := newPodTemplateAnnotations[name]
|
|
||||||
if assert.Truef(t, ok, "Annotation %s was present in original pod template but was dropped after update", name) {
|
|
||||||
assert.Equalf(t, expected, actual, "Annotation value for %s original pod template has changed", name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -871,7 +837,7 @@ func TestIsUpdatedSecret(t *testing.T) {
|
|||||||
|
|
||||||
secretName := "test-secret"
|
secretName := "test-secret"
|
||||||
updatedSecrets := map[string]*corev1.Secret{
|
updatedSecrets := map[string]*corev1.Secret{
|
||||||
"some_secret": {},
|
"some_secret": &corev1.Secret{},
|
||||||
}
|
}
|
||||||
assert.False(t, isUpdatedSecret(secretName, updatedSecrets))
|
assert.False(t, isUpdatedSecret(secretName, updatedSecrets))
|
||||||
|
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ import (
|
|||||||
|
|
||||||
func TestAreVolmesUsingSecrets(t *testing.T) {
|
func TestAreVolmesUsingSecrets(t *testing.T) {
|
||||||
secretNamesToSearch := map[string]*corev1.Secret{
|
secretNamesToSearch := map[string]*corev1.Secret{
|
||||||
"onepassword-database-secret": {},
|
"onepassword-database-secret": &corev1.Secret{},
|
||||||
"onepassword-api-key": {},
|
"onepassword-api-key": &corev1.Secret{},
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSecretNames := []string{
|
volumeSecretNames := []string{
|
||||||
@@ -27,8 +27,8 @@ func TestAreVolmesUsingSecrets(t *testing.T) {
|
|||||||
|
|
||||||
func TestAreVolumesNotUsingSecrets(t *testing.T) {
|
func TestAreVolumesNotUsingSecrets(t *testing.T) {
|
||||||
secretNamesToSearch := map[string]*corev1.Secret{
|
secretNamesToSearch := map[string]*corev1.Secret{
|
||||||
"onepassword-database-secret": {},
|
"onepassword-database-secret": &corev1.Secret{},
|
||||||
"onepassword-api-key": {},
|
"onepassword-api-key": &corev1.Secret{},
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSecretNames := []string{
|
volumeSecretNames := []string{
|
||||||
|
|||||||
@@ -1,25 +1,10 @@
|
|||||||
// Copyright 2018 The Operator-SDK Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var ForceRunModeEnv = "OSDK_FORCE_RUN_MODE"
|
var ForceRunModeEnv = "OSDK_FORCE_RUN_MODE"
|
||||||
@@ -31,8 +16,6 @@ const (
|
|||||||
ClusterRunMode RunModeType = "cluster"
|
ClusterRunMode RunModeType = "cluster"
|
||||||
)
|
)
|
||||||
|
|
||||||
var log = logf.Log.WithName("k8sutil")
|
|
||||||
|
|
||||||
// ErrNoNamespace indicates that a namespace could not be found for the current
|
// ErrNoNamespace indicates that a namespace could not be found for the current
|
||||||
// environment
|
// environment
|
||||||
var ErrNoNamespace = fmt.Errorf("namespace not found for current environment")
|
var ErrNoNamespace = fmt.Errorf("namespace not found for current environment")
|
||||||
@@ -46,7 +29,7 @@ func GetOperatorNamespace() (string, error) {
|
|||||||
if isRunModeLocal() {
|
if isRunModeLocal() {
|
||||||
return "", ErrRunLocal
|
return "", ErrRunLocal
|
||||||
}
|
}
|
||||||
nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return "", ErrNoNamespace
|
return "", ErrNoNamespace
|
||||||
@@ -54,7 +37,6 @@ func GetOperatorNamespace() (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
ns := strings.TrimSpace(string(nsBytes))
|
ns := strings.TrimSpace(string(nsBytes))
|
||||||
log.V(1).Info("Found namespace", "Namespace", ns)
|
|
||||||
return ns, nil
|
return ns, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
202
vendor/cloud.google.com/go/compute/LICENSE
generated
vendored
202
vendor/cloud.google.com/go/compute/LICENSE
generated
vendored
@@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
542
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
542
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@@ -1,542 +0,0 @@
|
|||||||
// Copyright 2014 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package metadata provides access to Google Compute Engine (GCE)
|
|
||||||
// metadata and API service accounts.
|
|
||||||
//
|
|
||||||
// This package is a wrapper around the GCE metadata service,
|
|
||||||
// as documented at https://cloud.google.com/compute/docs/metadata/overview.
|
|
||||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// metadataIP is the documented metadata server IP address.
|
|
||||||
metadataIP = "169.254.169.254"
|
|
||||||
|
|
||||||
// metadataHostEnv is the environment variable specifying the
|
|
||||||
// GCE metadata hostname. If empty, the default value of
|
|
||||||
// metadataIP ("169.254.169.254") is used instead.
|
|
||||||
// This is variable name is not defined by any spec, as far as
|
|
||||||
// I know; it was made up for the Go package.
|
|
||||||
metadataHostEnv = "GCE_METADATA_HOST"
|
|
||||||
|
|
||||||
userAgent = "gcloud-golang/0.1"
|
|
||||||
)
|
|
||||||
|
|
||||||
type cachedValue struct {
|
|
||||||
k string
|
|
||||||
trim bool
|
|
||||||
mu sync.Mutex
|
|
||||||
v string
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
|
||||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
|
||||||
instID = &cachedValue{k: "instance/id", trim: true}
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultClient = &Client{hc: newDefaultHTTPClient()}
|
|
||||||
|
|
||||||
func newDefaultHTTPClient() *http.Client {
|
|
||||||
return &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 2 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
},
|
|
||||||
Timeout: 5 * time.Second,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotDefinedError is returned when requested metadata is not defined.
|
|
||||||
//
|
|
||||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
|
||||||
//
|
|
||||||
// This error is not returned if the value is defined to be the empty
|
|
||||||
// string.
|
|
||||||
type NotDefinedError string
|
|
||||||
|
|
||||||
func (suffix NotDefinedError) Error() string {
|
|
||||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cachedValue) get(cl *Client) (v string, err error) {
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
c.mu.Lock()
|
|
||||||
if c.v != "" {
|
|
||||||
return c.v, nil
|
|
||||||
}
|
|
||||||
if c.trim {
|
|
||||||
v, err = cl.getTrimmed(c.k)
|
|
||||||
} else {
|
|
||||||
v, err = cl.Get(c.k)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
c.v = v
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
onGCEOnce sync.Once
|
|
||||||
onGCE bool
|
|
||||||
)
|
|
||||||
|
|
||||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
|
||||||
func OnGCE() bool {
|
|
||||||
onGCEOnce.Do(initOnGCE)
|
|
||||||
return onGCE
|
|
||||||
}
|
|
||||||
|
|
||||||
func initOnGCE() {
|
|
||||||
onGCE = testOnGCE()
|
|
||||||
}
|
|
||||||
|
|
||||||
func testOnGCE() bool {
|
|
||||||
// The user explicitly said they're on GCE, so trust them.
|
|
||||||
if os.Getenv(metadataHostEnv) != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
resc := make(chan bool, 2)
|
|
||||||
|
|
||||||
// Try two strategies in parallel.
|
|
||||||
// See https://github.com/googleapis/google-cloud-go/issues/194
|
|
||||||
go func() {
|
|
||||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
res, err := newDefaultHTTPClient().Do(req.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
resc <- false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
resolver := &net.Resolver{}
|
|
||||||
addrs, err := resolver.LookupHost(ctx, "metadata.google.internal")
|
|
||||||
if err != nil || len(addrs) == 0 {
|
|
||||||
resc <- false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resc <- strsContains(addrs, metadataIP)
|
|
||||||
}()
|
|
||||||
|
|
||||||
tryHarder := systemInfoSuggestsGCE()
|
|
||||||
if tryHarder {
|
|
||||||
res := <-resc
|
|
||||||
if res {
|
|
||||||
// The first strategy succeeded, so let's use it.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Wait for either the DNS or metadata server probe to
|
|
||||||
// contradict the other one and say we are running on
|
|
||||||
// GCE. Give it a lot of time to do so, since the system
|
|
||||||
// info already suggests we're running on a GCE BIOS.
|
|
||||||
timer := time.NewTimer(5 * time.Second)
|
|
||||||
defer timer.Stop()
|
|
||||||
select {
|
|
||||||
case res = <-resc:
|
|
||||||
return res
|
|
||||||
case <-timer.C:
|
|
||||||
// Too slow. Who knows what this system is.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// There's no hint from the system info that we're running on
|
|
||||||
// GCE, so use the first probe's result as truth, whether it's
|
|
||||||
// true or false. The goal here is to optimize for speed for
|
|
||||||
// users who are NOT running on GCE. We can't assume that
|
|
||||||
// either a DNS lookup or an HTTP request to a blackholed IP
|
|
||||||
// address is fast. Worst case this should return when the
|
|
||||||
// metaClient's Transport.ResponseHeaderTimeout or
|
|
||||||
// Transport.Dial.Timeout fires (in two seconds).
|
|
||||||
return <-resc
|
|
||||||
}
|
|
||||||
|
|
||||||
// systemInfoSuggestsGCE reports whether the local system (without
|
|
||||||
// doing network requests) suggests that we're running on GCE. If this
|
|
||||||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
|
||||||
// server.
|
|
||||||
func systemInfoSuggestsGCE() bool {
|
|
||||||
if runtime.GOOS != "linux" {
|
|
||||||
// We don't have any non-Linux clues available, at least yet.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
|
||||||
name := strings.TrimSpace(string(slurp))
|
|
||||||
return name == "Google" || name == "Google Compute Engine"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe calls Client.Subscribe on the default client.
|
|
||||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
|
||||||
return defaultClient.Subscribe(suffix, fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get calls Client.Get on the default client.
|
|
||||||
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
|
|
||||||
|
|
||||||
// ProjectID returns the current instance's project ID string.
|
|
||||||
func ProjectID() (string, error) { return defaultClient.ProjectID() }
|
|
||||||
|
|
||||||
// NumericProjectID returns the current instance's numeric project ID.
|
|
||||||
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
|
|
||||||
|
|
||||||
// InternalIP returns the instance's primary internal IP address.
|
|
||||||
func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
|
||||||
|
|
||||||
// ExternalIP returns the instance's primary external (public) IP address.
|
|
||||||
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
|
||||||
|
|
||||||
// Email calls Client.Email on the default client.
|
|
||||||
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
|
|
||||||
|
|
||||||
// Hostname returns the instance's hostname. This will be of the form
|
|
||||||
// "<instanceID>.c.<projID>.internal".
|
|
||||||
func Hostname() (string, error) { return defaultClient.Hostname() }
|
|
||||||
|
|
||||||
// InstanceTags returns the list of user-defined instance tags,
|
|
||||||
// assigned when initially creating a GCE instance.
|
|
||||||
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
|
|
||||||
|
|
||||||
// InstanceID returns the current VM's numeric instance ID.
|
|
||||||
func InstanceID() (string, error) { return defaultClient.InstanceID() }
|
|
||||||
|
|
||||||
// InstanceName returns the current VM's instance ID string.
|
|
||||||
func InstanceName() (string, error) { return defaultClient.InstanceName() }
|
|
||||||
|
|
||||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|
||||||
func Zone() (string, error) { return defaultClient.Zone() }
|
|
||||||
|
|
||||||
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
|
||||||
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
|
|
||||||
|
|
||||||
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
|
||||||
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
|
|
||||||
|
|
||||||
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
|
||||||
func InstanceAttributeValue(attr string) (string, error) {
|
|
||||||
return defaultClient.InstanceAttributeValue(attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
|
||||||
func ProjectAttributeValue(attr string) (string, error) {
|
|
||||||
return defaultClient.ProjectAttributeValue(attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scopes calls Client.Scopes on the default client.
|
|
||||||
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
|
|
||||||
|
|
||||||
func strsContains(ss []string, s string) bool {
|
|
||||||
for _, v := range ss {
|
|
||||||
if v == s {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Client provides metadata.
|
|
||||||
type Client struct {
|
|
||||||
hc *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient returns a Client that can be used to fetch metadata.
|
|
||||||
// Returns the client that uses the specified http.Client for HTTP requests.
|
|
||||||
// If nil is specified, returns the default client.
|
|
||||||
func NewClient(c *http.Client) *Client {
|
|
||||||
if c == nil {
|
|
||||||
return defaultClient
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Client{hc: c}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getETag returns a value from the metadata service as well as the associated ETag.
|
|
||||||
// This func is otherwise equivalent to Get.
|
|
||||||
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
|
||||||
// a container, which is an important use-case for local testing of cloud
|
|
||||||
// deployments. To enable spoofing of the metadata service, the environment
|
|
||||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
|
||||||
// requests shall go.
|
|
||||||
host := os.Getenv(metadataHostEnv)
|
|
||||||
if host == "" {
|
|
||||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
|
||||||
// binaries built with the "netgo" tag and without cgo won't
|
|
||||||
// know the search suffix for "metadata" is
|
|
||||||
// ".google.internal", and this IP address is documented as
|
|
||||||
// being stable anyway.
|
|
||||||
host = metadataIP
|
|
||||||
}
|
|
||||||
suffix = strings.TrimLeft(suffix, "/")
|
|
||||||
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
|
||||||
req, err := http.NewRequest("GET", u, nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
req.Header.Set("Metadata-Flavor", "Google")
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
var res *http.Response
|
|
||||||
var reqErr error
|
|
||||||
retryer := newRetryer()
|
|
||||||
for {
|
|
||||||
res, reqErr = c.hc.Do(req)
|
|
||||||
var code int
|
|
||||||
if res != nil {
|
|
||||||
code = res.StatusCode
|
|
||||||
}
|
|
||||||
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
|
|
||||||
if err := sleep(ctx, delay); err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if reqErr != nil {
|
|
||||||
return "", "", reqErr
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode == http.StatusNotFound {
|
|
||||||
return "", "", NotDefinedError(suffix)
|
|
||||||
}
|
|
||||||
all, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
|
|
||||||
}
|
|
||||||
return string(all), res.Header.Get("Etag"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a value from the metadata service.
|
|
||||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|
||||||
//
|
|
||||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
|
||||||
// 169.254.169.254 will be used instead.
|
|
||||||
//
|
|
||||||
// If the requested metadata is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
func (c *Client) Get(suffix string) (string, error) {
|
|
||||||
val, _, err := c.getETag(suffix)
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) getTrimmed(suffix string) (s string, err error) {
|
|
||||||
s, err = c.Get(suffix)
|
|
||||||
s = strings.TrimSpace(s)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) lines(suffix string) ([]string, error) {
|
|
||||||
j, err := c.Get(suffix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
|
||||||
for i := range s {
|
|
||||||
s[i] = strings.TrimSpace(s[i])
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectID returns the current instance's project ID string.
|
|
||||||
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
|
|
||||||
|
|
||||||
// NumericProjectID returns the current instance's numeric project ID.
|
|
||||||
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
|
|
||||||
|
|
||||||
// InstanceID returns the current VM's numeric instance ID.
|
|
||||||
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
|
|
||||||
|
|
||||||
// InternalIP returns the instance's primary internal IP address.
|
|
||||||
func (c *Client) InternalIP() (string, error) {
|
|
||||||
return c.getTrimmed("instance/network-interfaces/0/ip")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Email returns the email address associated with the service account.
|
|
||||||
// The account may be empty or the string "default" to use the instance's
|
|
||||||
// main account.
|
|
||||||
func (c *Client) Email(serviceAccount string) (string, error) {
|
|
||||||
if serviceAccount == "" {
|
|
||||||
serviceAccount = "default"
|
|
||||||
}
|
|
||||||
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExternalIP returns the instance's primary external (public) IP address.
|
|
||||||
func (c *Client) ExternalIP() (string, error) {
|
|
||||||
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname returns the instance's hostname. This will be of the form
|
|
||||||
// "<instanceID>.c.<projID>.internal".
|
|
||||||
func (c *Client) Hostname() (string, error) {
|
|
||||||
return c.getTrimmed("instance/hostname")
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceTags returns the list of user-defined instance tags,
|
|
||||||
// assigned when initially creating a GCE instance.
|
|
||||||
func (c *Client) InstanceTags() ([]string, error) {
|
|
||||||
var s []string
|
|
||||||
j, err := c.Get("instance/tags")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceName returns the current VM's instance ID string.
|
|
||||||
func (c *Client) InstanceName() (string, error) {
|
|
||||||
return c.getTrimmed("instance/name")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|
||||||
func (c *Client) Zone() (string, error) {
|
|
||||||
zone, err := c.getTrimmed("instance/zone")
|
|
||||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceAttributes returns the list of user-defined attributes,
|
|
||||||
// assigned when initially creating a GCE VM instance. The value of an
|
|
||||||
// attribute can be obtained with InstanceAttributeValue.
|
|
||||||
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
|
|
||||||
|
|
||||||
// ProjectAttributes returns the list of user-defined attributes
|
|
||||||
// applying to the project as a whole, not just this VM. The value of
|
|
||||||
// an attribute can be obtained with ProjectAttributeValue.
|
|
||||||
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
|
|
||||||
|
|
||||||
// InstanceAttributeValue returns the value of the provided VM
|
|
||||||
// instance attribute.
|
|
||||||
//
|
|
||||||
// If the requested attribute is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
//
|
|
||||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
|
||||||
// defined to be the empty string.
|
|
||||||
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
|
|
||||||
return c.Get("instance/attributes/" + attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectAttributeValue returns the value of the provided
|
|
||||||
// project attribute.
|
|
||||||
//
|
|
||||||
// If the requested attribute is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
//
|
|
||||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
|
||||||
// defined to be the empty string.
|
|
||||||
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
|
|
||||||
return c.Get("project/attributes/" + attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scopes returns the service account scopes for the given account.
|
|
||||||
// The account may be empty or the string "default" to use the instance's
|
|
||||||
// main account.
|
|
||||||
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
|
|
||||||
if serviceAccount == "" {
|
|
||||||
serviceAccount = "default"
|
|
||||||
}
|
|
||||||
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe subscribes to a value from the metadata service.
|
|
||||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|
||||||
// The suffix may contain query parameters.
|
|
||||||
//
|
|
||||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
|
||||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
|
||||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
|
||||||
// is deleted. Subscribe returns the error value returned from the last call to
|
|
||||||
// fn, which may be nil when ok == false.
|
|
||||||
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
|
||||||
const failedSubscribeSleep = time.Second * 5
|
|
||||||
|
|
||||||
// First check to see if the metadata value exists at all.
|
|
||||||
val, lastETag, err := c.getETag(suffix)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fn(val, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ok := true
|
|
||||||
if strings.ContainsRune(suffix, '?') {
|
|
||||||
suffix += "&wait_for_change=true&last_etag="
|
|
||||||
} else {
|
|
||||||
suffix += "?wait_for_change=true&last_etag="
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
|
|
||||||
if err != nil {
|
|
||||||
if _, deleted := err.(NotDefinedError); !deleted {
|
|
||||||
time.Sleep(failedSubscribeSleep)
|
|
||||||
continue // Retry on other errors.
|
|
||||||
}
|
|
||||||
ok = false
|
|
||||||
}
|
|
||||||
lastETag = etag
|
|
||||||
|
|
||||||
if err := fn(val, ok); err != nil || !ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error contains an error response from the server.
|
|
||||||
type Error struct {
|
|
||||||
// Code is the HTTP response status code.
|
|
||||||
Code int
|
|
||||||
// Message is the server response message.
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Error) Error() string {
|
|
||||||
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
|
|
||||||
}
|
|
||||||
114
vendor/cloud.google.com/go/compute/metadata/retry.go
generated
vendored
114
vendor/cloud.google.com/go/compute/metadata/retry.go
generated
vendored
@@ -1,114 +0,0 @@
|
|||||||
// Copyright 2021 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package metadata
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"math/rand"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxRetryAttempts = 5
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
syscallRetryable = func(err error) bool { return false }
|
|
||||||
)
|
|
||||||
|
|
||||||
// defaultBackoff is basically equivalent to gax.Backoff without the need for
|
|
||||||
// the dependency.
|
|
||||||
type defaultBackoff struct {
|
|
||||||
max time.Duration
|
|
||||||
mul float64
|
|
||||||
cur time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *defaultBackoff) Pause() time.Duration {
|
|
||||||
d := time.Duration(1 + rand.Int63n(int64(b.cur)))
|
|
||||||
b.cur = time.Duration(float64(b.cur) * b.mul)
|
|
||||||
if b.cur > b.max {
|
|
||||||
b.cur = b.max
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// sleep is the equivalent of gax.Sleep without the need for the dependency.
|
|
||||||
func sleep(ctx context.Context, d time.Duration) error {
|
|
||||||
t := time.NewTimer(d)
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
t.Stop()
|
|
||||||
return ctx.Err()
|
|
||||||
case <-t.C:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRetryer() *metadataRetryer {
|
|
||||||
return &metadataRetryer{bo: &defaultBackoff{
|
|
||||||
cur: 100 * time.Millisecond,
|
|
||||||
max: 30 * time.Second,
|
|
||||||
mul: 2,
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
type backoff interface {
|
|
||||||
Pause() time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
type metadataRetryer struct {
|
|
||||||
bo backoff
|
|
||||||
attempts int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) {
|
|
||||||
if status == http.StatusOK {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
retryOk := shouldRetry(status, err)
|
|
||||||
if !retryOk {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
if r.attempts == maxRetryAttempts {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
r.attempts++
|
|
||||||
return r.bo.Pause(), true
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldRetry(status int, err error) bool {
|
|
||||||
if 500 <= status && status <= 599 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if err == io.ErrUnexpectedEOF {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Transient network errors should be retried.
|
|
||||||
if syscallRetryable(err) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if err, ok := err.(interface{ Temporary() bool }); ok {
|
|
||||||
if err.Temporary() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err, ok := err.(interface{ Unwrap() error }); ok {
|
|
||||||
return shouldRetry(status, err.Unwrap())
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
26
vendor/cloud.google.com/go/compute/metadata/retry_linux.go
generated
vendored
26
vendor/cloud.google.com/go/compute/metadata/retry_linux.go
generated
vendored
@@ -1,26 +0,0 @@
|
|||||||
// Copyright 2021 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//go:build linux
|
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package metadata
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Initialize syscallRetryable to return true on transient socket-level
|
|
||||||
// errors. These errors are specific to Linux.
|
|
||||||
syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
|
|
||||||
}
|
|
||||||
21
vendor/github.com/1Password/connect-sdk-go/LICENSE
generated
vendored
21
vendor/github.com/1Password/connect-sdk-go/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
|||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2021 1Password
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
860
vendor/github.com/1Password/connect-sdk-go/connect/client.go
generated
vendored
860
vendor/github.com/1Password/connect-sdk-go/connect/client.go
generated
vendored
@@ -1,860 +0,0 @@
|
|||||||
package connect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/opentracing/opentracing-go"
|
|
||||||
"github.com/opentracing/opentracing-go/ext"
|
|
||||||
jaegerClientConfig "github.com/uber/jaeger-client-go/config"
|
|
||||||
"github.com/uber/jaeger-client-go/zipkin"
|
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/onepassword"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultUserAgent = "connect-sdk-go/%s"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
vaultUUIDError = fmt.Errorf("malformed vault uuid provided")
|
|
||||||
itemUUIDError = fmt.Errorf("malformed item uuid provided")
|
|
||||||
fileUUIDError = fmt.Errorf("malformed file uuid provided")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Client Represents an available 1Password Connect API to connect to
|
|
||||||
type Client interface {
|
|
||||||
GetVaults() ([]onepassword.Vault, error)
|
|
||||||
GetVault(uuid string) (*onepassword.Vault, error)
|
|
||||||
GetVaultByUUID(uuid string) (*onepassword.Vault, error)
|
|
||||||
GetVaultByTitle(title string) (*onepassword.Vault, error)
|
|
||||||
GetVaultsByTitle(uuid string) ([]onepassword.Vault, error)
|
|
||||||
GetItems(vaultQuery string) ([]onepassword.Item, error)
|
|
||||||
GetItem(itemQuery, vaultQuery string) (*onepassword.Item, error)
|
|
||||||
GetItemByUUID(uuid string, vaultQuery string) (*onepassword.Item, error)
|
|
||||||
GetItemByTitle(title string, vaultQuery string) (*onepassword.Item, error)
|
|
||||||
GetItemsByTitle(title string, vaultQuery string) ([]onepassword.Item, error)
|
|
||||||
CreateItem(item *onepassword.Item, vaultQuery string) (*onepassword.Item, error)
|
|
||||||
UpdateItem(item *onepassword.Item, vaultQuery string) (*onepassword.Item, error)
|
|
||||||
DeleteItem(item *onepassword.Item, vaultQuery string) error
|
|
||||||
DeleteItemByID(itemUUID string, vaultQuery string) error
|
|
||||||
DeleteItemByTitle(title string, vaultQuery string) error
|
|
||||||
GetFiles(itemQuery string, vaultQuery string) ([]onepassword.File, error)
|
|
||||||
GetFile(uuid string, itemQuery string, vaultQuery string) (*onepassword.File, error)
|
|
||||||
GetFileContent(file *onepassword.File) ([]byte, error)
|
|
||||||
DownloadFile(file *onepassword.File, targetDirectory string, overwrite bool) (string, error)
|
|
||||||
LoadStructFromItemByUUID(config interface{}, itemUUID string, vaultQuery string) error
|
|
||||||
LoadStructFromItemByTitle(config interface{}, itemTitle string, vaultQuery string) error
|
|
||||||
LoadStructFromItem(config interface{}, itemQuery string, vaultQuery string) error
|
|
||||||
LoadStruct(config interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpClient interface {
|
|
||||||
Do(req *http.Request) (*http.Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
envHostVariable = "OP_CONNECT_HOST"
|
|
||||||
envTokenVariable = "OP_CONNECT_TOKEN"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewClientFromEnvironment Returns a Secret Service client assuming that your
|
|
||||||
// jwt is set in the OP_TOKEN environment variable
|
|
||||||
func NewClientFromEnvironment() (Client, error) {
|
|
||||||
host, found := os.LookupEnv(envHostVariable)
|
|
||||||
if !found {
|
|
||||||
return nil, fmt.Errorf("There is no hostname available in the %q variable", envHostVariable)
|
|
||||||
}
|
|
||||||
|
|
||||||
token, found := os.LookupEnv(envTokenVariable)
|
|
||||||
if !found {
|
|
||||||
return nil, fmt.Errorf("There is no token available in the %q variable", envTokenVariable)
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewClient(host, token), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient Returns a Secret Service client for a given url and jwt
|
|
||||||
func NewClient(url string, token string) Client {
|
|
||||||
return NewClientWithUserAgent(url, token, fmt.Sprintf(defaultUserAgent, SDKVersion))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientWithUserAgent Returns a Secret Service client for a given url and jwt and identifies with userAgent
|
|
||||||
func NewClientWithUserAgent(url string, token string, userAgent string) Client {
|
|
||||||
if !opentracing.IsGlobalTracerRegistered() {
|
|
||||||
cfg := jaegerClientConfig.Configuration{}
|
|
||||||
zipkinPropagator := zipkin.NewZipkinB3HTTPHeaderPropagator()
|
|
||||||
cfg.InitGlobalTracer(
|
|
||||||
userAgent,
|
|
||||||
jaegerClientConfig.Injector(opentracing.HTTPHeaders, zipkinPropagator),
|
|
||||||
jaegerClientConfig.Extractor(opentracing.HTTPHeaders, zipkinPropagator),
|
|
||||||
jaegerClientConfig.ZipkinSharedRPCSpan(true),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &restClient{
|
|
||||||
URL: url,
|
|
||||||
Token: token,
|
|
||||||
|
|
||||||
userAgent: userAgent,
|
|
||||||
tracer: opentracing.GlobalTracer(),
|
|
||||||
|
|
||||||
client: http.DefaultClient,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type restClient struct {
|
|
||||||
URL string
|
|
||||||
Token string
|
|
||||||
userAgent string
|
|
||||||
tracer opentracing.Tracer
|
|
||||||
client httpClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVaults Get a list of all available vaults
|
|
||||||
func (rs *restClient) GetVaults() ([]onepassword.Vault, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetVaults")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
vaultURL := fmt.Sprintf("/v1/vaults")
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, vaultURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var vaults []onepassword.Vault
|
|
||||||
if err := parseResponse(response, http.StatusOK, &vaults); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return vaults, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVault Get a vault based on its name or ID
|
|
||||||
func (rs *restClient) GetVault(vaultQuery string) (*onepassword.Vault, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetVault")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
if vaultQuery == "" {
|
|
||||||
return nil, fmt.Errorf("Please provide either the vault name or its ID.")
|
|
||||||
}
|
|
||||||
if !isValidUUID(vaultQuery) {
|
|
||||||
return rs.GetVaultByTitle(vaultQuery)
|
|
||||||
}
|
|
||||||
return rs.GetVaultByUUID(vaultQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) GetVaultByUUID(uuid string) (*onepassword.Vault, error) {
|
|
||||||
if !isValidUUID(uuid) {
|
|
||||||
return nil, vaultUUIDError
|
|
||||||
}
|
|
||||||
|
|
||||||
span := rs.tracer.StartSpan("GetVaultByUUID")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
vaultURL := fmt.Sprintf("/v1/vaults/%s", uuid)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, vaultURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var vault onepassword.Vault
|
|
||||||
if err := parseResponse(response, http.StatusOK, &vault); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &vault, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) GetVaultByTitle(vaultName string) (*onepassword.Vault, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetVaultByTitle")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
vaults, err := rs.GetVaultsByTitle(vaultName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(vaults) != 1 {
|
|
||||||
return nil, fmt.Errorf("Found %d vaults with title %q", len(vaults), vaultName)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &vaults[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) GetVaultsByTitle(title string) ([]onepassword.Vault, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetVaultsByTitle")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
filter := url.QueryEscape(fmt.Sprintf("title eq \"%s\"", title))
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults?filter=%s", filter)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var vaults []onepassword.Vault
|
|
||||||
if err := parseResponse(response, http.StatusOK, &vaults); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return vaults, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) getVaultUUID(vaultQuery string) (string, error) {
|
|
||||||
if vaultQuery == "" {
|
|
||||||
return "", fmt.Errorf("Please provide either the vault name or its ID.")
|
|
||||||
}
|
|
||||||
if isValidUUID(vaultQuery) {
|
|
||||||
return vaultQuery, nil
|
|
||||||
}
|
|
||||||
vault, err := rs.GetVaultByTitle(vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return vault.ID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetItem Get a specific Item from the 1Password Connect API by either title or UUID
|
|
||||||
func (rs *restClient) GetItem(itemQuery string, vaultQuery string) (*onepassword.Item, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetItem")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
if itemQuery == "" {
|
|
||||||
return nil, fmt.Errorf("Please provide either the item name or its ID.")
|
|
||||||
}
|
|
||||||
if !isValidUUID(itemQuery) {
|
|
||||||
return rs.GetItemByTitle(itemQuery, vaultQuery)
|
|
||||||
}
|
|
||||||
return rs.GetItemByUUID(itemQuery, vaultQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetItemByUUID Get a specific Item from the 1Password Connect API by its UUID
|
|
||||||
func (rs *restClient) GetItemByUUID(uuid string, vaultQuery string) (*onepassword.Item, error) {
|
|
||||||
if !isValidUUID(uuid) {
|
|
||||||
return nil, itemUUIDError
|
|
||||||
}
|
|
||||||
|
|
||||||
vaultUUID, err := rs.getVaultUUID(vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
span := rs.tracer.StartSpan("GetItemByUUID")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s", vaultUUID, uuid)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var item onepassword.Item
|
|
||||||
if err := parseResponse(response, http.StatusOK, &item); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &item, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) GetItemByTitle(title string, vaultQuery string) (*onepassword.Item, error) {
|
|
||||||
vaultUUID, err := rs.getVaultUUID(vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
span := rs.tracer.StartSpan("GetItemByTitle")
|
|
||||||
defer span.Finish()
|
|
||||||
items, err := rs.GetItemsByTitle(title, vaultUUID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(items) != 1 {
|
|
||||||
return nil, fmt.Errorf("Found %d item(s) in vault %q with title %q", len(items), vaultUUID, title)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &items[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) GetItemsByTitle(title string, vaultQuery string) ([]onepassword.Item, error) {
|
|
||||||
vaultUUID, err := rs.getVaultUUID(vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
span := rs.tracer.StartSpan("GetItemsByTitle")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
filter := url.QueryEscape(fmt.Sprintf("title eq \"%s\"", title))
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items?filter=%s", vaultUUID, filter)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var itemSummaries []onepassword.Item
|
|
||||||
if err := parseResponse(response, http.StatusOK, &itemSummaries); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
items := make([]onepassword.Item, len(itemSummaries))
|
|
||||||
for i, itemSummary := range itemSummaries {
|
|
||||||
tempItem, err := rs.GetItem(itemSummary.ID, itemSummary.Vault.ID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
items[i] = *tempItem
|
|
||||||
}
|
|
||||||
|
|
||||||
return items, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) GetItems(vaultQuery string) ([]onepassword.Item, error) {
|
|
||||||
vaultUUID, err := rs.getVaultUUID(vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
span := rs.tracer.StartSpan("GetItems")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items", vaultUUID)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var items []onepassword.Item
|
|
||||||
if err := parseResponse(response, http.StatusOK, &items); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return items, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) getItemUUID(itemQuery, vaultQuery string) (string, error) {
|
|
||||||
if itemQuery == "" {
|
|
||||||
return "", fmt.Errorf("Please provide either the item name or its ID.")
|
|
||||||
}
|
|
||||||
if isValidUUID(itemQuery) {
|
|
||||||
return itemQuery, nil
|
|
||||||
}
|
|
||||||
item, err := rs.GetItemByTitle(itemQuery, vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return item.ID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateItem Create a new item in a specified vault
|
|
||||||
func (rs *restClient) CreateItem(item *onepassword.Item, vaultQuery string) (*onepassword.Item, error) {
|
|
||||||
vaultUUID, err := rs.getVaultUUID(vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
span := rs.tracer.StartSpan("CreateItem")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items", vaultUUID)
|
|
||||||
itemBody, err := json.Marshal(item)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
request, err := rs.buildRequest(http.MethodPost, itemURL, bytes.NewBuffer(itemBody), span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var newItem onepassword.Item
|
|
||||||
if err := parseResponse(response, http.StatusOK, &newItem); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &newItem, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateItem Update a new item in a specified vault
|
|
||||||
func (rs *restClient) UpdateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
span := rs.tracer.StartSpan("UpdateItem")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s", item.Vault.ID, item.ID)
|
|
||||||
itemBody, err := json.Marshal(item)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
request, err := rs.buildRequest(http.MethodPut, itemURL, bytes.NewBuffer(itemBody), span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var newItem onepassword.Item
|
|
||||||
if err := parseResponse(response, http.StatusOK, &newItem); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &newItem, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteItem Delete a new item in a specified vault
|
|
||||||
func (rs *restClient) DeleteItem(item *onepassword.Item, vaultUUID string) error {
|
|
||||||
span := rs.tracer.StartSpan("DeleteItem")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s", item.Vault.ID, item.ID)
|
|
||||||
request, err := rs.buildRequest(http.MethodDelete, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := parseResponse(response, http.StatusNoContent, nil); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteItemByID Delete a new item in a specified vault, specifying the item's uuid
|
|
||||||
func (rs *restClient) DeleteItemByID(itemUUID string, vaultQuery string) error {
|
|
||||||
if !isValidUUID(itemUUID) {
|
|
||||||
return itemUUIDError
|
|
||||||
}
|
|
||||||
vaultUUID, err := rs.getVaultUUID(vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
span := rs.tracer.StartSpan("DeleteItemByID")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s", vaultUUID, itemUUID)
|
|
||||||
request, err := rs.buildRequest(http.MethodDelete, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := parseResponse(response, http.StatusNoContent, nil); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteItemByTitle Delete a new item in a specified vault, specifying the item's title
|
|
||||||
func (rs *restClient) DeleteItemByTitle(title string, vaultQuery string) error {
|
|
||||||
span := rs.tracer.StartSpan("DeleteItemByTitle")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
item, err := rs.GetItemByTitle(title, vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return rs.DeleteItem(item, item.Vault.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) GetFiles(itemQuery string, vaultQuery string) ([]onepassword.File, error) {
|
|
||||||
vaultUUID, err := rs.getVaultUUID(vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
itemUUID, err := rs.getItemUUID(itemQuery, vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
span := rs.tracer.StartSpan("GetFiles")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
jsonURL := fmt.Sprintf("/v1/vaults/%s/items/%s/files", vaultUUID, itemUUID)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, jsonURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := expectMinimumConnectVersion(response, version{1, 3, 0}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var files []onepassword.File
|
|
||||||
if err := parseResponse(response, http.StatusOK, &files); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFile Get a specific File in a specified item.
|
|
||||||
// This does not include the file contents. Call GetFileContent() to load the file's content.
|
|
||||||
func (rs *restClient) GetFile(uuid string, itemQuery string, vaultQuery string) (*onepassword.File, error) {
|
|
||||||
if !isValidUUID(uuid) {
|
|
||||||
return nil, fileUUIDError
|
|
||||||
}
|
|
||||||
vaultUUID, err := rs.getVaultUUID(vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
itemUUID, err := rs.getItemUUID(itemQuery, vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
span := rs.tracer.StartSpan("GetFile")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s/files/%s", vaultUUID, itemUUID, uuid)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := expectMinimumConnectVersion(response, version{1, 3, 0}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var file onepassword.File
|
|
||||||
if err := parseResponse(response, http.StatusOK, &file); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &file, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFileContent retrieves the file's content.
|
|
||||||
// If the file's content have previously been fetched, those contents are returned without making another request.
|
|
||||||
func (rs *restClient) GetFileContent(file *onepassword.File) ([]byte, error) {
|
|
||||||
if content, err := file.Content(); err == nil {
|
|
||||||
return content, nil
|
|
||||||
}
|
|
||||||
response, err := rs.retrieveDocumentContent(file)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
content, err := readResponseBody(response, http.StatusOK)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
file.SetContent(content)
|
|
||||||
return content, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) DownloadFile(file *onepassword.File, targetDirectory string, overwriteIfExists bool) (string, error) {
|
|
||||||
response, err := rs.retrieveDocumentContent(file)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
path := filepath.Join(targetDirectory, filepath.Base(file.Name))
|
|
||||||
|
|
||||||
var osFile *os.File
|
|
||||||
|
|
||||||
if overwriteIfExists {
|
|
||||||
osFile, err = createFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_, err = os.Stat(path)
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
osFile, err = createFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return "", fmt.Errorf("a file already exists under the %s path. In order to overwrite it, set `overwriteIfExists` to true", path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer osFile.Close()
|
|
||||||
if _, err = io.Copy(osFile, response.Body); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) retrieveDocumentContent(file *onepassword.File) (*http.Response, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetFileContent")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, file.ContentPath, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := expectMinimumConnectVersion(response, version{1, 3, 0}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return response, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createFile(path string) (*os.File, error) {
|
|
||||||
osFile, err := os.Create(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = os.Chmod(path, 0600)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return osFile, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) buildRequest(method string, path string, body io.Reader, span opentracing.Span) (*http.Request, error) {
|
|
||||||
url := fmt.Sprintf("%s%s", rs.URL, path)
|
|
||||||
|
|
||||||
request, err := http.NewRequest(method, url, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
request.Header.Set("Content-Type", "application/json")
|
|
||||||
request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rs.Token))
|
|
||||||
request.Header.Set("User-Agent", rs.userAgent)
|
|
||||||
|
|
||||||
ext.SpanKindRPCClient.Set(span)
|
|
||||||
ext.HTTPUrl.Set(span, path)
|
|
||||||
ext.HTTPMethod.Set(span, method)
|
|
||||||
|
|
||||||
rs.tracer.Inject(span.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(request.Header))
|
|
||||||
|
|
||||||
return request, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadToStruct(item *parsedItem, config reflect.Value) error {
|
|
||||||
t := config.Type()
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
value := config.Field(i)
|
|
||||||
field := t.Field(i)
|
|
||||||
|
|
||||||
if !value.CanSet() {
|
|
||||||
return fmt.Errorf("cannot load config into private fields")
|
|
||||||
}
|
|
||||||
|
|
||||||
item.fields = append(item.fields, &field)
|
|
||||||
item.values = append(item.values, &value)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadStructFromItem Load configuration values based on struct tag from one 1P item.
|
|
||||||
// It accepts as parameters item title/UUID and vault title/UUID.
|
|
||||||
func (rs *restClient) LoadStructFromItem(i interface{}, itemQuery string, vaultQuery string) error {
|
|
||||||
if itemQuery == "" {
|
|
||||||
return fmt.Errorf("Please provide either the item name or its ID.")
|
|
||||||
}
|
|
||||||
if isValidUUID(itemQuery) {
|
|
||||||
return rs.LoadStructFromItemByUUID(i, itemQuery, vaultQuery)
|
|
||||||
}
|
|
||||||
return rs.LoadStructFromItemByTitle(i, itemQuery, vaultQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadStructFromItemByUUID Load configuration values based on struct tag from one 1P item.
|
|
||||||
func (rs *restClient) LoadStructFromItemByUUID(i interface{}, itemUUID string, vaultQuery string) error {
|
|
||||||
vaultUUID, err := rs.getVaultUUID(vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !isValidUUID(itemUUID) {
|
|
||||||
return itemUUIDError
|
|
||||||
}
|
|
||||||
config, err := checkStruct(i)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
item := parsedItem{}
|
|
||||||
item.itemUUID = itemUUID
|
|
||||||
item.vaultUUID = vaultUUID
|
|
||||||
|
|
||||||
if err := loadToStruct(&item, config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := setValuesForTag(rs, &item, false); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadStructFromItemByTitle Load configuration values based on struct tag from one 1P item
|
|
||||||
func (rs *restClient) LoadStructFromItemByTitle(i interface{}, itemTitle string, vaultQuery string) error {
|
|
||||||
vaultUUID, err := rs.getVaultUUID(vaultQuery)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
config, err := checkStruct(i)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
item := parsedItem{}
|
|
||||||
item.itemTitle = itemTitle
|
|
||||||
item.vaultUUID = vaultUUID
|
|
||||||
|
|
||||||
if err := loadToStruct(&item, config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := setValuesForTag(rs, &item, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadStruct Load configuration values based on struct tag
|
|
||||||
func (rs *restClient) LoadStruct(i interface{}) error {
|
|
||||||
config, err := checkStruct(i)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
t := config.Type()
|
|
||||||
|
|
||||||
// Multiple fields may be from a single item so we will collect them
|
|
||||||
items := map[string]parsedItem{}
|
|
||||||
|
|
||||||
// Fetch the Vault from the environment
|
|
||||||
vaultUUID, envVarFound := os.LookupEnv(envVaultVar)
|
|
||||||
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
value := config.Field(i)
|
|
||||||
field := t.Field(i)
|
|
||||||
tag := field.Tag.Get(itemTag)
|
|
||||||
|
|
||||||
if tag == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !value.CanSet() {
|
|
||||||
return fmt.Errorf("Cannot load config into private fields")
|
|
||||||
}
|
|
||||||
|
|
||||||
itemVault, err := vaultUUIDForField(&field, vaultUUID, envVarFound)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !isValidUUID(itemVault) {
|
|
||||||
return vaultUUIDError
|
|
||||||
}
|
|
||||||
|
|
||||||
key := fmt.Sprintf("%s/%s", itemVault, tag)
|
|
||||||
parsed := items[key]
|
|
||||||
parsed.vaultUUID = itemVault
|
|
||||||
parsed.itemTitle = tag
|
|
||||||
parsed.fields = append(parsed.fields, &field)
|
|
||||||
parsed.values = append(parsed.values, &value)
|
|
||||||
items[key] = parsed
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range items {
|
|
||||||
if err := setValuesForTag(rs, &item, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseResponse(resp *http.Response, expectedStatusCode int, result interface{}) error {
|
|
||||||
body, err := readResponseBody(resp, expectedStatusCode)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if result != nil {
|
|
||||||
if err := json.Unmarshal(body, result); err != nil {
|
|
||||||
return fmt.Errorf("decoding response: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readResponseBody(resp *http.Response, expectedStatusCode int) ([]byte, error) {
|
|
||||||
defer resp.Body.Close()
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if resp.StatusCode != expectedStatusCode {
|
|
||||||
var errResp *onepassword.Error
|
|
||||||
if err := json.Unmarshal(body, &errResp); err != nil {
|
|
||||||
return nil, fmt.Errorf("decoding error response: %s", err)
|
|
||||||
}
|
|
||||||
return nil, errResp
|
|
||||||
}
|
|
||||||
return body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidUUID(u string) bool {
|
|
||||||
r := regexp.MustCompile("^[a-z0-9]{26}$")
|
|
||||||
return r.MatchString(u)
|
|
||||||
}
|
|
||||||
209
vendor/github.com/1Password/connect-sdk-go/connect/config_helper.go
generated
vendored
209
vendor/github.com/1Password/connect-sdk-go/connect/config_helper.go
generated
vendored
@@ -1,209 +0,0 @@
|
|||||||
package connect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/onepassword"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
vaultTag = "opvault"
|
|
||||||
itemTag = "opitem"
|
|
||||||
sectionTag = "opsection"
|
|
||||||
fieldTag = "opfield"
|
|
||||||
urlTag = "opurl"
|
|
||||||
|
|
||||||
envVaultVar = "OP_VAULT"
|
|
||||||
)
|
|
||||||
|
|
||||||
type parsedItem struct {
|
|
||||||
vaultUUID string
|
|
||||||
itemUUID string
|
|
||||||
itemTitle string
|
|
||||||
fields []*reflect.StructField
|
|
||||||
values []*reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkStruct(i interface{}) (reflect.Value, error) {
|
|
||||||
configP := reflect.ValueOf(i)
|
|
||||||
if configP.Kind() != reflect.Ptr {
|
|
||||||
return reflect.Value{}, fmt.Errorf("you must pass a pointer to Config struct")
|
|
||||||
}
|
|
||||||
|
|
||||||
config := configP.Elem()
|
|
||||||
if config.Kind() != reflect.Struct {
|
|
||||||
return reflect.Value{}, fmt.Errorf("config values can only be loaded into a struct")
|
|
||||||
}
|
|
||||||
return config, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
func vaultUUIDForField(field *reflect.StructField, vaultUUID string, envVaultFound bool) (string, error) {
|
|
||||||
// Check to see if a specific vault has been specified on the field
|
|
||||||
// If the env vault id has not been found and item doesn't have a vault
|
|
||||||
// return an error
|
|
||||||
if vaultUUIDTag := field.Tag.Get(vaultTag); vaultUUIDTag == "" {
|
|
||||||
if !envVaultFound {
|
|
||||||
return "", fmt.Errorf("There is no vault for %q field", field.Name)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return vaultUUIDTag, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return vaultUUID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setValuesForTag(client Client, parsedItem *parsedItem, byTitle bool) error {
|
|
||||||
var item *onepassword.Item
|
|
||||||
var err error
|
|
||||||
if byTitle {
|
|
||||||
item, err = client.GetItemByTitle(parsedItem.itemTitle, parsedItem.vaultUUID)
|
|
||||||
} else {
|
|
||||||
item, err = client.GetItem(parsedItem.itemUUID, parsedItem.vaultUUID)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, field := range parsedItem.fields {
|
|
||||||
value := parsedItem.values[i]
|
|
||||||
|
|
||||||
if field.Type == reflect.TypeOf(onepassword.ItemURL{}) {
|
|
||||||
url := &onepassword.ItemURL{
|
|
||||||
Primary: urlPrimaryForName(field.Tag.Get(urlTag), item.URLs),
|
|
||||||
Label: urlLabelForName(field.Tag.Get(urlTag), item.URLs),
|
|
||||||
URL: urlURLForName(field.Tag.Get(urlTag), item.URLs),
|
|
||||||
}
|
|
||||||
value.Set(reflect.ValueOf(*url))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
path := fmt.Sprintf("%s.%s", field.Tag.Get(sectionTag), field.Tag.Get(fieldTag))
|
|
||||||
if path == "." {
|
|
||||||
if field.Type == reflect.TypeOf(onepassword.Item{}) {
|
|
||||||
value.Set(reflect.ValueOf(*item))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return fmt.Errorf("There is no %q specified for %q", fieldTag, field.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasSuffix(path, ".") {
|
|
||||||
if field.Type == reflect.TypeOf(onepassword.ItemSection{}) {
|
|
||||||
section := &onepassword.ItemSection{
|
|
||||||
ID: sectionIDForName(field.Tag.Get(sectionTag), item.Sections),
|
|
||||||
Label: sectionLabelForName(field.Tag.Get(sectionTag), item.Sections),
|
|
||||||
}
|
|
||||||
value.Set(reflect.ValueOf(*section))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sectionID := sectionIDForName(field.Tag.Get(sectionTag), item.Sections)
|
|
||||||
|
|
||||||
for _, f := range item.Fields {
|
|
||||||
fieldSectionID := ""
|
|
||||||
if f.Section != nil {
|
|
||||||
fieldSectionID = f.Section.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
if fieldSectionID == sectionID && f.Label == field.Tag.Get(fieldTag) {
|
|
||||||
if err := setValue(value, f.Value); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setValue(value *reflect.Value, toSet string) error {
|
|
||||||
switch value.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
value.SetString(toSet)
|
|
||||||
case reflect.Int:
|
|
||||||
v, err := strconv.Atoi(toSet)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
value.SetInt(int64(v))
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("Unsupported type %q. Only string, int64, and onepassword.Item are supported", value.Kind())
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func sectionIDForName(name string, sections []*onepassword.ItemSection) string {
|
|
||||||
if sections == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range sections {
|
|
||||||
if name == strings.ToLower(s.Label) {
|
|
||||||
return s.ID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func sectionLabelForName(name string, sections []*onepassword.ItemSection) string {
|
|
||||||
if sections == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range sections {
|
|
||||||
if name == strings.ToLower(s.Label) {
|
|
||||||
return s.Label
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func urlPrimaryForName(name string, itemURLs []onepassword.ItemURL) bool {
|
|
||||||
if itemURLs == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, url := range itemURLs {
|
|
||||||
if url.Label == strings.ToLower(name) {
|
|
||||||
return url.Primary
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func urlLabelForName(name string, itemURLs []onepassword.ItemURL) string {
|
|
||||||
if itemURLs == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, url := range itemURLs {
|
|
||||||
if url.Label == strings.ToLower(name) {
|
|
||||||
return url.Label
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func urlURLForName(name string, itemURLs []onepassword.ItemURL) string {
|
|
||||||
if itemURLs == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, url := range itemURLs {
|
|
||||||
if url.Label == strings.ToLower(name) {
|
|
||||||
return url.URL
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
|
|
||||||
}
|
|
||||||
104
vendor/github.com/1Password/connect-sdk-go/connect/version.go
generated
vendored
104
vendor/github.com/1Password/connect-sdk-go/connect/version.go
generated
vendored
@@ -1,104 +0,0 @@
|
|||||||
package connect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SDKVersion is the latest Semantic Version of the library
|
|
||||||
// Do not rename this variable without changing the regex in the Makefile
|
|
||||||
const SDKVersion = "1.5.0"
|
|
||||||
|
|
||||||
const VersionHeaderKey = "1Password-Connect-Version"
|
|
||||||
|
|
||||||
// expectMinimumConnectVersion returns an error if the provided minimum version for Connect is lower than the version
|
|
||||||
// reported in the response from Connect.
|
|
||||||
func expectMinimumConnectVersion(resp *http.Response, minimumVersion version) error {
|
|
||||||
serverVersion, err := getServerVersion(resp)
|
|
||||||
if err != nil {
|
|
||||||
// Return gracefully if server version cannot be determined reliably
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !serverVersion.IsGreaterOrEqualThan(minimumVersion) {
|
|
||||||
return fmt.Errorf("need at least version %s of Connect for this function, detected version %s. Please update your Connect server", minimumVersion, serverVersion)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getServerVersion(resp *http.Response) (serverVersion, error) {
|
|
||||||
versionHeader := resp.Header.Get(VersionHeaderKey)
|
|
||||||
if versionHeader == "" {
|
|
||||||
// The last version without the version header was v1.2.0
|
|
||||||
return serverVersion{
|
|
||||||
version: version{1, 2, 0},
|
|
||||||
orEarlier: true,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return parseServerVersion(versionHeader)
|
|
||||||
}
|
|
||||||
|
|
||||||
type version struct {
|
|
||||||
major int
|
|
||||||
minor int
|
|
||||||
patch int
|
|
||||||
}
|
|
||||||
|
|
||||||
// serverVersion describes the version reported by the server.
|
|
||||||
type serverVersion struct {
|
|
||||||
version
|
|
||||||
// orEarlier is true if the version is derived from the lack of a version header from the server.
|
|
||||||
orEarlier bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v version) String() string {
|
|
||||||
return fmt.Sprintf("%d.%d.%d", v.major, v.minor, v.patch)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v serverVersion) String() string {
|
|
||||||
if v.orEarlier {
|
|
||||||
return v.version.String() + " (or earlier)"
|
|
||||||
}
|
|
||||||
return v.version.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsGreaterOrEqualThan returns true if the lefthand-side version is equal to or or a higher version than the provided
|
|
||||||
// minimum according to the semantic versioning rules.
|
|
||||||
func (v version) IsGreaterOrEqualThan(min version) bool {
|
|
||||||
if v.major != min.major {
|
|
||||||
// Different major version
|
|
||||||
return v.major > min.major
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.minor != min.minor {
|
|
||||||
// Same major, but different minor version
|
|
||||||
return v.minor > min.minor
|
|
||||||
}
|
|
||||||
|
|
||||||
// Same major and minor version
|
|
||||||
return v.patch >= min.patch
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseServerVersion(v string) (serverVersion, error) {
|
|
||||||
spl := strings.Split(v, ".")
|
|
||||||
if len(spl) != 3 {
|
|
||||||
return serverVersion{}, errors.New("wrong length")
|
|
||||||
}
|
|
||||||
var res [3]int
|
|
||||||
for i := range res {
|
|
||||||
tmp, err := strconv.Atoi(spl[i])
|
|
||||||
if err != nil {
|
|
||||||
return serverVersion{}, err
|
|
||||||
}
|
|
||||||
res[i] = tmp
|
|
||||||
}
|
|
||||||
return serverVersion{
|
|
||||||
version: version{
|
|
||||||
major: res[0],
|
|
||||||
minor: res[1],
|
|
||||||
patch: res[2],
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
21
vendor/github.com/1Password/connect-sdk-go/onepassword/errors.go
generated
vendored
21
vendor/github.com/1Password/connect-sdk-go/onepassword/errors.go
generated
vendored
@@ -1,21 +0,0 @@
|
|||||||
package onepassword
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// Error is an error returned by the Connect API.
|
|
||||||
type Error struct {
|
|
||||||
StatusCode int `json:"status"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Error) Error() string {
|
|
||||||
return fmt.Sprintf("status %d: %s", e.StatusCode, e.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Error) Is(target error) bool {
|
|
||||||
t, ok := target.(*Error)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return t.Message == e.Message && t.StatusCode == e.StatusCode
|
|
||||||
}
|
|
||||||
49
vendor/github.com/1Password/connect-sdk-go/onepassword/files.go
generated
vendored
49
vendor/github.com/1Password/connect-sdk-go/onepassword/files.go
generated
vendored
@@ -1,49 +0,0 @@
|
|||||||
package onepassword
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type File struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Section *ItemSection `json:"section,omitempty"`
|
|
||||||
Size int `json:"size"`
|
|
||||||
ContentPath string `json:"content_path"`
|
|
||||||
content []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *File) UnmarshalJSON(data []byte) error {
|
|
||||||
var jsonFile struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Section *ItemSection `json:"section,omitempty"`
|
|
||||||
Size int `json:"size"`
|
|
||||||
ContentPath string `json:"content_path"`
|
|
||||||
Content []byte `json:"content,omitempty"`
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &jsonFile); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f.ID = jsonFile.ID
|
|
||||||
f.Name = jsonFile.Name
|
|
||||||
f.Section = jsonFile.Section
|
|
||||||
f.Size = jsonFile.Size
|
|
||||||
f.ContentPath = jsonFile.ContentPath
|
|
||||||
f.content = jsonFile.Content
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Content returns the content of the file if they have been loaded and returns an error if they have not been loaded.
|
|
||||||
// Use `client.GetFileContent(file *File)` instead to make sure the content is fetched automatically if not present.
|
|
||||||
func (f *File) Content() ([]byte, error) {
|
|
||||||
if f.content == nil {
|
|
||||||
return nil, errors.New("file content not loaded")
|
|
||||||
}
|
|
||||||
return f.content, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *File) SetContent(content []byte) {
|
|
||||||
f.content = content
|
|
||||||
}
|
|
||||||
167
vendor/github.com/1Password/connect-sdk-go/onepassword/items.go
generated
vendored
167
vendor/github.com/1Password/connect-sdk-go/onepassword/items.go
generated
vendored
@@ -1,167 +0,0 @@
|
|||||||
package onepassword
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ItemCategory Represents the template of the Item
|
|
||||||
type ItemCategory string
|
|
||||||
|
|
||||||
const (
|
|
||||||
Login ItemCategory = "LOGIN"
|
|
||||||
Password ItemCategory = "PASSWORD"
|
|
||||||
ApiCredential ItemCategory = "API_CREDENTIAL"
|
|
||||||
Server ItemCategory = "SERVER"
|
|
||||||
Database ItemCategory = "DATABASE"
|
|
||||||
CreditCard ItemCategory = "CREDIT_CARD"
|
|
||||||
Membership ItemCategory = "MEMBERSHIP"
|
|
||||||
Passport ItemCategory = "PASSPORT"
|
|
||||||
SoftwareLicense ItemCategory = "SOFTWARE_LICENSE"
|
|
||||||
OutdoorLicense ItemCategory = "OUTDOOR_LICENSE"
|
|
||||||
SecureNote ItemCategory = "SECURE_NOTE"
|
|
||||||
WirelessRouter ItemCategory = "WIRELESS_ROUTER"
|
|
||||||
BankAccount ItemCategory = "BANK_ACCOUNT"
|
|
||||||
DriverLicense ItemCategory = "DRIVER_LICENSE"
|
|
||||||
Identity ItemCategory = "IDENTITY"
|
|
||||||
RewardProgram ItemCategory = "REWARD_PROGRAM"
|
|
||||||
Document ItemCategory = "DOCUMENT"
|
|
||||||
EmailAccount ItemCategory = "EMAIL_ACCOUNT"
|
|
||||||
SocialSecurityNumber ItemCategory = "SOCIAL_SECURITY_NUMBER"
|
|
||||||
MedicalRecord ItemCategory = "MEDICAL_RECORD"
|
|
||||||
SSHKey ItemCategory = "SSH_KEY"
|
|
||||||
Custom ItemCategory = "CUSTOM"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UnmarshalJSON Unmarshall Item Category enum strings to Go string enums
|
|
||||||
func (ic *ItemCategory) UnmarshalJSON(b []byte) error {
|
|
||||||
var s string
|
|
||||||
json.Unmarshal(b, &s)
|
|
||||||
category := ItemCategory(s)
|
|
||||||
switch category {
|
|
||||||
case Login, Password, Server, Database, CreditCard, Membership, Passport, SoftwareLicense,
|
|
||||||
OutdoorLicense, SecureNote, WirelessRouter, BankAccount, DriverLicense, Identity, RewardProgram,
|
|
||||||
Document, EmailAccount, SocialSecurityNumber, ApiCredential, MedicalRecord, SSHKey:
|
|
||||||
*ic = category
|
|
||||||
default:
|
|
||||||
*ic = Custom
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Item represents an item returned to the consumer
|
|
||||||
type Item struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Title string `json:"title"`
|
|
||||||
|
|
||||||
URLs []ItemURL `json:"urls,omitempty"`
|
|
||||||
Favorite bool `json:"favorite,omitempty"`
|
|
||||||
Tags []string `json:"tags,omitempty"`
|
|
||||||
Version int `json:"version,omitempty"`
|
|
||||||
|
|
||||||
Vault ItemVault `json:"vault"`
|
|
||||||
Category ItemCategory `json:"category,omitempty"` // TODO: switch this to `category`
|
|
||||||
|
|
||||||
Sections []*ItemSection `json:"sections,omitempty"`
|
|
||||||
Fields []*ItemField `json:"fields,omitempty"`
|
|
||||||
Files []*File `json:"files,omitempty"`
|
|
||||||
|
|
||||||
LastEditedBy string `json:"lastEditedBy,omitempty"`
|
|
||||||
CreatedAt time.Time `json:"createdAt,omitempty"`
|
|
||||||
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
|
||||||
|
|
||||||
// Deprecated: Connect does not return trashed items.
|
|
||||||
Trashed bool `json:"trashed,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemVault represents the Vault the Item is found in
|
|
||||||
type ItemVault struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemURL is a simplified item URL
|
|
||||||
type ItemURL struct {
|
|
||||||
Primary bool `json:"primary,omitempty"`
|
|
||||||
Label string `json:"label,omitempty"`
|
|
||||||
URL string `json:"href"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemSection Representation of a Section on an item
|
|
||||||
type ItemSection struct {
|
|
||||||
ID string `json:"id,omitempty"`
|
|
||||||
Label string `json:"label,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GeneratorRecipe Representation of a "recipe" used to generate a field
|
|
||||||
type GeneratorRecipe struct {
|
|
||||||
Length int `json:"length,omitempty"`
|
|
||||||
CharacterSets []string `json:"characterSets,omitempty"`
|
|
||||||
ExcludeCharacters string `json:"excludeCharacters,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemField Representation of a single field on an Item
|
|
||||||
type ItemField struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Section *ItemSection `json:"section,omitempty"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Purpose string `json:"purpose,omitempty"`
|
|
||||||
Label string `json:"label,omitempty"`
|
|
||||||
Value string `json:"value,omitempty"`
|
|
||||||
Generate bool `json:"generate,omitempty"`
|
|
||||||
Recipe *GeneratorRecipe `json:"recipe,omitempty"`
|
|
||||||
Entropy float64 `json:"entropy,omitempty"`
|
|
||||||
TOTP string `json:"totp,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetValue Retrieve the value of a field on the item by its label. To specify a
|
|
||||||
// field from a specific section pass in <section label>.<field label>. If
|
|
||||||
// no field matching the selector is found return "".
|
|
||||||
func (i *Item) GetValue(field string) string {
|
|
||||||
if i == nil || len(i.Fields) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
sectionFilter := false
|
|
||||||
sectionLabel := ""
|
|
||||||
fieldLabel := field
|
|
||||||
if strings.Contains(field, ".") {
|
|
||||||
parts := strings.Split(field, ".")
|
|
||||||
|
|
||||||
// Test to make sure the . isn't the last character
|
|
||||||
if len(parts) == 2 {
|
|
||||||
sectionFilter = true
|
|
||||||
sectionLabel = parts[0]
|
|
||||||
fieldLabel = parts[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, f := range i.Fields {
|
|
||||||
if sectionFilter {
|
|
||||||
if f.Section != nil {
|
|
||||||
if sectionLabel != i.SectionLabelForID(f.Section.ID) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fieldLabel == f.Label {
|
|
||||||
return f.Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Item) SectionLabelForID(id string) string {
|
|
||||||
if i != nil || len(i.Sections) > 0 {
|
|
||||||
for _, s := range i.Sections {
|
|
||||||
if s.ID == id {
|
|
||||||
return s.Label
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
46
vendor/github.com/1Password/connect-sdk-go/onepassword/vaults.go
generated
vendored
46
vendor/github.com/1Password/connect-sdk-go/onepassword/vaults.go
generated
vendored
@@ -1,46 +0,0 @@
|
|||||||
package onepassword
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Vault represents a 1password Vault
|
|
||||||
type Vault struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
|
|
||||||
AttrVersion int `json:"attributeVersion,omitempty"`
|
|
||||||
ContentVersoin int `json:"contentVersion,omitempty"`
|
|
||||||
Items int `json:"items,omitempty"`
|
|
||||||
Type VaultType `json:"type,omitempty"`
|
|
||||||
|
|
||||||
CreatedAt time.Time `json:"createdAt,omitempty"`
|
|
||||||
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// VaultType Representation of what the Vault Type is
|
|
||||||
type VaultType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
PersonalVault VaultType = "PERSONAL"
|
|
||||||
EveryoneVault VaultType = "EVERYONE"
|
|
||||||
TransferVault VaultType = "TRANSFER"
|
|
||||||
UserCreatedVault VaultType = "USER_CREATED"
|
|
||||||
UnknownVault VaultType = "UNKNOWN"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UnmarshalJSON Unmarshall Vault Type enum strings to Go string enums
|
|
||||||
func (vt *VaultType) UnmarshalJSON(b []byte) error {
|
|
||||||
var s string
|
|
||||||
json.Unmarshal(b, &s)
|
|
||||||
vaultType := VaultType(s)
|
|
||||||
switch vaultType {
|
|
||||||
case PersonalVault, EveryoneVault, TransferVault, UserCreatedVault:
|
|
||||||
*vt = vaultType
|
|
||||||
default:
|
|
||||||
*vt = UnknownVault
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
32
vendor/github.com/Azure/go-autorest/.gitignore
generated
vendored
32
vendor/github.com/Azure/go-autorest/.gitignore
generated
vendored
@@ -1,32 +0,0 @@
|
|||||||
# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore)
|
|
||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
.DS_Store
|
|
||||||
.idea/
|
|
||||||
.vscode/
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
||||||
*.prof
|
|
||||||
|
|
||||||
# go-autorest specific
|
|
||||||
vendor/
|
|
||||||
autorest/azure/example/example
|
|
||||||
1004
vendor/github.com/Azure/go-autorest/CHANGELOG.md
generated
vendored
1004
vendor/github.com/Azure/go-autorest/CHANGELOG.md
generated
vendored
File diff suppressed because it is too large
Load Diff
23
vendor/github.com/Azure/go-autorest/GNUmakefile
generated
vendored
23
vendor/github.com/Azure/go-autorest/GNUmakefile
generated
vendored
@@ -1,23 +0,0 @@
|
|||||||
DIR?=./autorest/
|
|
||||||
|
|
||||||
default: build
|
|
||||||
|
|
||||||
build: fmt
|
|
||||||
go install $(DIR)
|
|
||||||
|
|
||||||
test:
|
|
||||||
go test $(DIR) || exit 1
|
|
||||||
|
|
||||||
vet:
|
|
||||||
@echo "go vet ."
|
|
||||||
@go vet $(DIR)... ; if [ $$? -eq 1 ]; then \
|
|
||||||
echo ""; \
|
|
||||||
echo "Vet found suspicious constructs. Please check the reported constructs"; \
|
|
||||||
echo "and fix them if necessary before submitting the code for review."; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
fmt:
|
|
||||||
gofmt -w $(DIR)
|
|
||||||
|
|
||||||
.PHONY: build test vet fmt
|
|
||||||
324
vendor/github.com/Azure/go-autorest/Gopkg.lock
generated
vendored
324
vendor/github.com/Azure/go-autorest/Gopkg.lock
generated
vendored
@@ -1,324 +0,0 @@
|
|||||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
|
||||||
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e"
|
|
||||||
name = "contrib.go.opencensus.io/exporter/ocagent"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea"
|
|
||||||
version = "v0.6.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20"
|
|
||||||
name = "github.com/census-instrumentation/opencensus-proto"
|
|
||||||
packages = [
|
|
||||||
"gen-go/agent/common/v1",
|
|
||||||
"gen-go/agent/metrics/v1",
|
|
||||||
"gen-go/agent/trace/v1",
|
|
||||||
"gen-go/metrics/v1",
|
|
||||||
"gen-go/resource/v1",
|
|
||||||
"gen-go/trace/v1",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "d89fa54de508111353cb0b06403c00569be780d8"
|
|
||||||
version = "v0.2.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
|
||||||
name = "github.com/davecgh/go-spew"
|
|
||||||
packages = ["spew"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
|
||||||
version = "v1.1.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55"
|
|
||||||
name = "github.com/dgrijalva/jwt-go"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
|
||||||
version = "v3.2.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965"
|
|
||||||
name = "github.com/dimchansky/utfbom"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c"
|
|
||||||
version = "v1.1.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
|
|
||||||
name = "github.com/golang/groupcache"
|
|
||||||
packages = ["lru"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "611e8accdfc92c4187d399e95ce826046d4c8d73"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa"
|
|
||||||
name = "github.com/golang/protobuf"
|
|
||||||
packages = [
|
|
||||||
"descriptor",
|
|
||||||
"jsonpb",
|
|
||||||
"proto",
|
|
||||||
"protoc-gen-go/descriptor",
|
|
||||||
"ptypes",
|
|
||||||
"ptypes/any",
|
|
||||||
"ptypes/duration",
|
|
||||||
"ptypes/struct",
|
|
||||||
"ptypes/timestamp",
|
|
||||||
"ptypes/wrappers",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
|
|
||||||
version = "v1.3.2"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806"
|
|
||||||
name = "github.com/grpc-ecosystem/grpc-gateway"
|
|
||||||
packages = [
|
|
||||||
"internal",
|
|
||||||
"runtime",
|
|
||||||
"utilities",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009"
|
|
||||||
version = "v1.12.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79"
|
|
||||||
name = "github.com/mitchellh/go-homedir"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
|
|
||||||
version = "v1.1.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
|
||||||
name = "github.com/pmezard/go-difflib"
|
|
||||||
packages = ["difflib"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
|
||||||
version = "v1.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551"
|
|
||||||
name = "github.com/stretchr/testify"
|
|
||||||
packages = [
|
|
||||||
"assert",
|
|
||||||
"require",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
|
|
||||||
version = "v1.4.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71"
|
|
||||||
name = "go.opencensus.io"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"internal",
|
|
||||||
"internal/tagencoding",
|
|
||||||
"metric/metricdata",
|
|
||||||
"metric/metricproducer",
|
|
||||||
"plugin/ocgrpc",
|
|
||||||
"plugin/ochttp",
|
|
||||||
"plugin/ochttp/propagation/b3",
|
|
||||||
"plugin/ochttp/propagation/tracecontext",
|
|
||||||
"resource",
|
|
||||||
"stats",
|
|
||||||
"stats/internal",
|
|
||||||
"stats/view",
|
|
||||||
"tag",
|
|
||||||
"trace",
|
|
||||||
"trace/internal",
|
|
||||||
"trace/propagation",
|
|
||||||
"trace/tracestate",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "aad2c527c5defcf89b5afab7f37274304195a6b2"
|
|
||||||
version = "v0.22.2"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae"
|
|
||||||
name = "golang.org/x/crypto"
|
|
||||||
packages = [
|
|
||||||
"pkcs12",
|
|
||||||
"pkcs12/internal/rc2",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "e9b2fee46413994441b28dfca259d911d963dfed"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43"
|
|
||||||
name = "golang.org/x/lint"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"golint",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910"
|
|
||||||
name = "golang.org/x/net"
|
|
||||||
packages = [
|
|
||||||
"http/httpguts",
|
|
||||||
"http2",
|
|
||||||
"http2/hpack",
|
|
||||||
"idna",
|
|
||||||
"internal/timeseries",
|
|
||||||
"trace",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "1ddd1de85cb0337b623b740a609d35817d516a8d"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b"
|
|
||||||
name = "golang.org/x/sync"
|
|
||||||
packages = ["semaphore"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8"
|
|
||||||
name = "golang.org/x/sys"
|
|
||||||
packages = ["unix"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405"
|
|
||||||
name = "golang.org/x/text"
|
|
||||||
packages = [
|
|
||||||
"collate",
|
|
||||||
"collate/build",
|
|
||||||
"internal/colltab",
|
|
||||||
"internal/gen",
|
|
||||||
"internal/language",
|
|
||||||
"internal/language/compact",
|
|
||||||
"internal/tag",
|
|
||||||
"internal/triegen",
|
|
||||||
"internal/ucd",
|
|
||||||
"language",
|
|
||||||
"secure/bidirule",
|
|
||||||
"transform",
|
|
||||||
"unicode/bidi",
|
|
||||||
"unicode/cldr",
|
|
||||||
"unicode/norm",
|
|
||||||
"unicode/rangetable",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
|
|
||||||
version = "v0.3.2"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7"
|
|
||||||
name = "golang.org/x/tools"
|
|
||||||
packages = [
|
|
||||||
"go/ast/astutil",
|
|
||||||
"go/gcexportdata",
|
|
||||||
"go/internal/gcimporter",
|
|
||||||
"go/types/typeutil",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877"
|
|
||||||
name = "google.golang.org/api"
|
|
||||||
packages = ["support/bundler"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "8a410c21381766a810817fd6200fce8838ecb277"
|
|
||||||
version = "v0.14.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd"
|
|
||||||
name = "google.golang.org/genproto"
|
|
||||||
packages = [
|
|
||||||
"googleapis/api/httpbody",
|
|
||||||
"googleapis/rpc/status",
|
|
||||||
"protobuf/field_mask",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "51378566eb590fa106d1025ea12835a4416dda84"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301"
|
|
||||||
name = "google.golang.org/grpc"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"backoff",
|
|
||||||
"balancer",
|
|
||||||
"balancer/base",
|
|
||||||
"balancer/roundrobin",
|
|
||||||
"binarylog/grpc_binarylog_v1",
|
|
||||||
"codes",
|
|
||||||
"connectivity",
|
|
||||||
"credentials",
|
|
||||||
"credentials/internal",
|
|
||||||
"encoding",
|
|
||||||
"encoding/proto",
|
|
||||||
"grpclog",
|
|
||||||
"internal",
|
|
||||||
"internal/backoff",
|
|
||||||
"internal/balancerload",
|
|
||||||
"internal/binarylog",
|
|
||||||
"internal/buffer",
|
|
||||||
"internal/channelz",
|
|
||||||
"internal/envconfig",
|
|
||||||
"internal/grpcrand",
|
|
||||||
"internal/grpcsync",
|
|
||||||
"internal/resolver/dns",
|
|
||||||
"internal/resolver/passthrough",
|
|
||||||
"internal/syscall",
|
|
||||||
"internal/transport",
|
|
||||||
"keepalive",
|
|
||||||
"metadata",
|
|
||||||
"naming",
|
|
||||||
"peer",
|
|
||||||
"resolver",
|
|
||||||
"serviceconfig",
|
|
||||||
"stats",
|
|
||||||
"status",
|
|
||||||
"tap",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514"
|
|
||||||
version = "v1.25.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737"
|
|
||||||
name = "gopkg.in/yaml.v2"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce"
|
|
||||||
version = "v2.2.7"
|
|
||||||
|
|
||||||
[solve-meta]
|
|
||||||
analyzer-name = "dep"
|
|
||||||
analyzer-version = 1
|
|
||||||
input-imports = [
|
|
||||||
"contrib.go.opencensus.io/exporter/ocagent",
|
|
||||||
"github.com/dgrijalva/jwt-go",
|
|
||||||
"github.com/dimchansky/utfbom",
|
|
||||||
"github.com/mitchellh/go-homedir",
|
|
||||||
"github.com/stretchr/testify/require",
|
|
||||||
"go.opencensus.io/plugin/ochttp",
|
|
||||||
"go.opencensus.io/plugin/ochttp/propagation/tracecontext",
|
|
||||||
"go.opencensus.io/stats/view",
|
|
||||||
"go.opencensus.io/trace",
|
|
||||||
"golang.org/x/crypto/pkcs12",
|
|
||||||
"golang.org/x/lint/golint",
|
|
||||||
]
|
|
||||||
solver-name = "gps-cdcl"
|
|
||||||
solver-version = 1
|
|
||||||
59
vendor/github.com/Azure/go-autorest/Gopkg.toml
generated
vendored
59
vendor/github.com/Azure/go-autorest/Gopkg.toml
generated
vendored
@@ -1,59 +0,0 @@
|
|||||||
# Gopkg.toml example
|
|
||||||
#
|
|
||||||
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
|
||||||
# for detailed Gopkg.toml documentation.
|
|
||||||
#
|
|
||||||
# required = ["github.com/user/thing/cmd/thing"]
|
|
||||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
|
||||||
#
|
|
||||||
# [[constraint]]
|
|
||||||
# name = "github.com/user/project"
|
|
||||||
# version = "1.0.0"
|
|
||||||
#
|
|
||||||
# [[constraint]]
|
|
||||||
# name = "github.com/user/project2"
|
|
||||||
# branch = "dev"
|
|
||||||
# source = "github.com/myfork/project2"
|
|
||||||
#
|
|
||||||
# [[override]]
|
|
||||||
# name = "github.com/x/y"
|
|
||||||
# version = "2.4.0"
|
|
||||||
#
|
|
||||||
# [prune]
|
|
||||||
# non-go = false
|
|
||||||
# go-tests = true
|
|
||||||
# unused-packages = true
|
|
||||||
|
|
||||||
required = ["golang.org/x/lint/golint"]
|
|
||||||
|
|
||||||
[prune]
|
|
||||||
go-tests = true
|
|
||||||
unused-packages = true
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "contrib.go.opencensus.io/exporter/ocagent"
|
|
||||||
version = "0.6.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/dgrijalva/jwt-go"
|
|
||||||
version = "3.2.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/dimchansky/utfbom"
|
|
||||||
version = "1.1.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/mitchellh/go-homedir"
|
|
||||||
version = "1.1.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/stretchr/testify"
|
|
||||||
version = "1.3.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "go.opencensus.io"
|
|
||||||
version = "0.22.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
branch = "master"
|
|
||||||
name = "golang.org/x/crypto"
|
|
||||||
191
vendor/github.com/Azure/go-autorest/LICENSE
generated
vendored
191
vendor/github.com/Azure/go-autorest/LICENSE
generated
vendored
@@ -1,191 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
Copyright 2015 Microsoft Corporation
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
165
vendor/github.com/Azure/go-autorest/README.md
generated
vendored
165
vendor/github.com/Azure/go-autorest/README.md
generated
vendored
@@ -1,165 +0,0 @@
|
|||||||
# go-autorest
|
|
||||||
|
|
||||||
[](https://godoc.org/github.com/Azure/go-autorest/autorest)
|
|
||||||
[](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master)
|
|
||||||
[](https://goreportcard.com/report/Azure/go-autorest)
|
|
||||||
|
|
||||||
Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages.
|
|
||||||
|
|
||||||
An authentication client tested with Azure Active Directory (AAD) is also
|
|
||||||
provided in this repo in the package
|
|
||||||
`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package
|
|
||||||
is maintained only as part of the Azure Go SDK and is not related to other
|
|
||||||
"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD).
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
Package go-autorest implements an HTTP request pipeline suitable for use across
|
|
||||||
multiple goroutines and provides the shared routines used by packages generated
|
|
||||||
by [Autorest](https://github.com/Azure/autorest.go).
|
|
||||||
|
|
||||||
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
|
|
||||||
and Responding. A typical pattern is:
|
|
||||||
|
|
||||||
```go
|
|
||||||
req, err := Prepare(&http.Request{},
|
|
||||||
token.WithAuthorization())
|
|
||||||
|
|
||||||
resp, err := Send(req,
|
|
||||||
WithLogging(logger),
|
|
||||||
DoErrorIfStatusCode(http.StatusInternalServerError),
|
|
||||||
DoCloseIfError(),
|
|
||||||
DoRetryForAttempts(5, time.Second))
|
|
||||||
|
|
||||||
err = Respond(resp,
|
|
||||||
ByDiscardingBody(),
|
|
||||||
ByClosing())
|
|
||||||
```
|
|
||||||
|
|
||||||
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
|
|
||||||
and then pass the data along, pass the data first and then modify the result, or wrap themselves
|
|
||||||
around passing the data (such as a logger might do). Decorators run in the order provided. For
|
|
||||||
example, the following:
|
|
||||||
|
|
||||||
```go
|
|
||||||
req, err := Prepare(&http.Request{},
|
|
||||||
WithBaseURL("https://microsoft.com/"),
|
|
||||||
WithPath("a"),
|
|
||||||
WithPath("b"),
|
|
||||||
WithPath("c"))
|
|
||||||
```
|
|
||||||
|
|
||||||
will set the URL to:
|
|
||||||
|
|
||||||
```
|
|
||||||
https://microsoft.com/a/b/c
|
|
||||||
```
|
|
||||||
|
|
||||||
Preparers and Responders may be shared and re-used (assuming the underlying decorators support
|
|
||||||
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
|
|
||||||
shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
|
|
||||||
all bound together by means of input / output channels.
|
|
||||||
|
|
||||||
Decorators hold their passed state within a closure (such as the path components in the example
|
|
||||||
above). Be careful to share Preparers and Responders only in a context where such held state
|
|
||||||
applies. For example, it may not make sense to share a Preparer that applies a query string from a
|
|
||||||
fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
|
|
||||||
struct (e.g., `ByUnmarshallingJson`) is likely incorrect.
|
|
||||||
|
|
||||||
Errors raised by autorest objects and methods will conform to the `autorest.Error` interface.
|
|
||||||
|
|
||||||
See the included examples for more detail. For details on the suggested use of this package by
|
|
||||||
generated clients, see the Client described below.
|
|
||||||
|
|
||||||
## Helpers
|
|
||||||
|
|
||||||
### Handling Swagger Dates
|
|
||||||
|
|
||||||
The Swagger specification (https://swagger.io) that drives AutoRest
|
|
||||||
(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
|
|
||||||
github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct
|
|
||||||
parsing and formatting.
|
|
||||||
|
|
||||||
### Handling Empty Values
|
|
||||||
|
|
||||||
In JSON, missing values have different semantics than empty values. This is especially true for
|
|
||||||
services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains
|
|
||||||
only those values to modify. Missing values are to be left unchanged. Developers, then, require a
|
|
||||||
means to both specify an empty value and to leave the value out of the submitted JSON.
|
|
||||||
|
|
||||||
The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits
|
|
||||||
empty values from the rendered JSON. Since Go defines default values for all base types (such as ""
|
|
||||||
for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package
|
|
||||||
treats default values as meaning empty, omitting them from the rendered JSON. This means that, using
|
|
||||||
the Go base types encoded through the default JSON package, it is not possible to create JSON to
|
|
||||||
clear a value at the server.
|
|
||||||
|
|
||||||
The workaround within the Go community is to use pointers to base types in lieu of base types within
|
|
||||||
structures that map to JSON. For example, instead of a value of type `string`, the workaround uses
|
|
||||||
`*string`. While this enables distinguishing empty values from those to be unchanged, creating
|
|
||||||
pointers to a base type (notably constant, in-line values) requires additional variables. This, for
|
|
||||||
example,
|
|
||||||
|
|
||||||
```go
|
|
||||||
s := struct {
|
|
||||||
S *string
|
|
||||||
}{ S: &"foo" }
|
|
||||||
```
|
|
||||||
fails, while, this
|
|
||||||
|
|
||||||
```go
|
|
||||||
v := "foo"
|
|
||||||
s := struct {
|
|
||||||
S *string
|
|
||||||
}{ S: &v }
|
|
||||||
```
|
|
||||||
succeeds.
|
|
||||||
|
|
||||||
To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for
|
|
||||||
Go base types which have Swagger analogs. It also provides a helper that converts between
|
|
||||||
`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value
|
|
||||||
associated with a key should be cleared. With the helpers, the previous example becomes
|
|
||||||
|
|
||||||
```go
|
|
||||||
s := struct {
|
|
||||||
S *string
|
|
||||||
}{ S: to.StringPtr("foo") }
|
|
||||||
```
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get github.com/Azure/go-autorest/autorest
|
|
||||||
go get github.com/Azure/go-autorest/autorest/azure
|
|
||||||
go get github.com/Azure/go-autorest/autorest/date
|
|
||||||
go get github.com/Azure/go-autorest/autorest/to
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using with Go Modules
|
|
||||||
In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules.
|
|
||||||
|
|
||||||
- autorest/adal
|
|
||||||
- autorest/azure/auth
|
|
||||||
- autorest/azure/cli
|
|
||||||
- autorest/date
|
|
||||||
- autorest/mocks
|
|
||||||
- autorest/to
|
|
||||||
- autorest/validation
|
|
||||||
- autorest
|
|
||||||
- logger
|
|
||||||
- tracing
|
|
||||||
|
|
||||||
Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules.
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
See LICENSE file.
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
This project has adopted the [Microsoft Open Source Code of
|
|
||||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
|
||||||
see the [Code of Conduct
|
|
||||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
|
||||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
|
||||||
questions or comments.
|
|
||||||
191
vendor/github.com/Azure/go-autorest/autorest/LICENSE
generated
vendored
191
vendor/github.com/Azure/go-autorest/autorest/LICENSE
generated
vendored
@@ -1,191 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
Copyright 2015 Microsoft Corporation
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
191
vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
generated
vendored
191
vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
generated
vendored
@@ -1,191 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
Copyright 2015 Microsoft Corporation
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
294
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
294
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
@@ -1,294 +0,0 @@
|
|||||||
# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `adal` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/).
|
|
||||||
|
|
||||||
# Azure Active Directory authentication for Go
|
|
||||||
|
|
||||||
This is a standalone package for authenticating with Azure Active
|
|
||||||
Directory from other Go libraries and applications, in particular the [Azure SDK
|
|
||||||
for Go](https://github.com/Azure/azure-sdk-for-go).
|
|
||||||
|
|
||||||
Note: Despite the package's name it is not related to other "ADAL" libraries
|
|
||||||
maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues
|
|
||||||
should be opened in [this repo's](https://github.com/Azure/go-autorest/issues)
|
|
||||||
or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue
|
|
||||||
trackers.
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get -u github.com/Azure/go-autorest/autorest/adal
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
|
|
||||||
|
|
||||||
### Register an Azure AD Application with secret
|
|
||||||
|
|
||||||
|
|
||||||
1. Register a new application with a `secret` credential
|
|
||||||
|
|
||||||
```
|
|
||||||
az ad app create \
|
|
||||||
--display-name example-app \
|
|
||||||
--homepage https://example-app/home \
|
|
||||||
--identifier-uris https://example-app/app \
|
|
||||||
--password secret
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Create a service principal using the `Application ID` from previous step
|
|
||||||
|
|
||||||
```
|
|
||||||
az ad sp create --id "Application ID"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace `Application ID` with `appId` from step 1.
|
|
||||||
|
|
||||||
### Register an Azure AD Application with certificate
|
|
||||||
|
|
||||||
1. Create a private key
|
|
||||||
|
|
||||||
```
|
|
||||||
openssl genrsa -out "example-app.key" 2048
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Create the certificate
|
|
||||||
|
|
||||||
```
|
|
||||||
openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr"
|
|
||||||
openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Create the PKCS12 version of the certificate containing also the private key
|
|
||||||
|
|
||||||
```
|
|
||||||
openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass:
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Register a new application with the certificate content form `example-app.crt`
|
|
||||||
|
|
||||||
```
|
|
||||||
certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)"
|
|
||||||
|
|
||||||
az ad app create \
|
|
||||||
--display-name example-app \
|
|
||||||
--homepage https://example-app/home \
|
|
||||||
--identifier-uris https://example-app/app \
|
|
||||||
--key-usage Verify --end-date 2018-01-01 \
|
|
||||||
--key-value "${certificateContents}"
|
|
||||||
```
|
|
||||||
|
|
||||||
5. Create a service principal using the `Application ID` from previous step
|
|
||||||
|
|
||||||
```
|
|
||||||
az ad sp create --id "APPLICATION_ID"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace `APPLICATION_ID` with `appId` from step 4.
|
|
||||||
|
|
||||||
|
|
||||||
### Grant the necessary permissions
|
|
||||||
|
|
||||||
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
|
|
||||||
level. There is a set of [pre-defined roles](https://docs.microsoft.com/azure/active-directory/role-based-access-built-in-roles)
|
|
||||||
which can be assigned to a service principal of an Azure AD application depending of your needs.
|
|
||||||
|
|
||||||
```
|
|
||||||
az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step.
|
|
||||||
* Replace the `ROLE_NAME` with a role name of your choice.
|
|
||||||
|
|
||||||
It is also possible to define custom role definitions.
|
|
||||||
|
|
||||||
```
|
|
||||||
az role definition create --role-definition role-definition.json
|
|
||||||
```
|
|
||||||
|
|
||||||
* Check [custom roles](https://docs.microsoft.com/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
|
|
||||||
|
|
||||||
|
|
||||||
### Acquire Access Token
|
|
||||||
|
|
||||||
The common configuration used by all flows:
|
|
||||||
|
|
||||||
```Go
|
|
||||||
const activeDirectoryEndpoint = "https://login.microsoftonline.com/"
|
|
||||||
tenantID := "TENANT_ID"
|
|
||||||
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID)
|
|
||||||
|
|
||||||
applicationID := "APPLICATION_ID"
|
|
||||||
|
|
||||||
callback := func(token adal.Token) error {
|
|
||||||
// This is called after the token is acquired
|
|
||||||
}
|
|
||||||
|
|
||||||
// The resource for which the token is acquired
|
|
||||||
resource := "https://management.core.windows.net/"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace the `TENANT_ID` with your tenant ID.
|
|
||||||
* Replace the `APPLICATION_ID` with the value from previous section.
|
|
||||||
|
|
||||||
#### Client Credentials
|
|
||||||
|
|
||||||
```Go
|
|
||||||
applicationSecret := "APPLICATION_SECRET"
|
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalToken(
|
|
||||||
*oauthConfig,
|
|
||||||
appliationID,
|
|
||||||
applicationSecret,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Acquire a new access token
|
|
||||||
err = spt.Refresh()
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace the `APPLICATION_SECRET` with the `password` value from previous section.
|
|
||||||
|
|
||||||
#### Client Certificate
|
|
||||||
|
|
||||||
```Go
|
|
||||||
certificatePath := "./example-app.pfx"
|
|
||||||
|
|
||||||
certData, err := ioutil.ReadFile(certificatePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the certificate and private key from pfx file
|
|
||||||
certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromCertificate(
|
|
||||||
*oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
certificate,
|
|
||||||
rsaPrivateKey,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
// Acquire a new access token
|
|
||||||
err = spt.Refresh()
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Update the certificate path to point to the example-app.pfx file which was created in previous section.
|
|
||||||
|
|
||||||
|
|
||||||
#### Device Code
|
|
||||||
|
|
||||||
```Go
|
|
||||||
oauthClient := &http.Client{}
|
|
||||||
|
|
||||||
// Acquire the device code
|
|
||||||
deviceCode, err := adal.InitiateDeviceAuth(
|
|
||||||
oauthClient,
|
|
||||||
*oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
resource)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to start device auth flow: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display the authentication message
|
|
||||||
fmt.Println(*deviceCode.Message)
|
|
||||||
|
|
||||||
// Wait here until the user is authenticated
|
|
||||||
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to finish device auth flow: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromManualToken(
|
|
||||||
*oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
resource,
|
|
||||||
*token,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Username password authenticate
|
|
||||||
|
|
||||||
```Go
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
|
|
||||||
*oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
username,
|
|
||||||
password,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Authorization code authenticate
|
|
||||||
|
|
||||||
``` Go
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
|
|
||||||
*oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
clientSecret,
|
|
||||||
authorizationCode,
|
|
||||||
redirectURI,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
err = spt.Refresh()
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Command Line Tool
|
|
||||||
|
|
||||||
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.
|
|
||||||
|
|
||||||
```
|
|
||||||
adal -h
|
|
||||||
|
|
||||||
Usage of ./adal:
|
|
||||||
-applicationId string
|
|
||||||
application id
|
|
||||||
-certificatePath string
|
|
||||||
path to pk12/PFC application certificate
|
|
||||||
-mode string
|
|
||||||
authentication mode (device, secret, cert, refresh) (default "device")
|
|
||||||
-resource string
|
|
||||||
resource for which the token is requested
|
|
||||||
-secret string
|
|
||||||
application secret
|
|
||||||
-tenantId string
|
|
||||||
tenant id
|
|
||||||
-tokenCachePath string
|
|
||||||
location of oath token cache (default "/home/cgc/.adal/accessToken.json")
|
|
||||||
```
|
|
||||||
|
|
||||||
Example acquire a token for `https://management.core.windows.net/` using device code flow:
|
|
||||||
|
|
||||||
```
|
|
||||||
adal -mode device \
|
|
||||||
-applicationId "APPLICATION_ID" \
|
|
||||||
-tenantId "TENANT_ID" \
|
|
||||||
-resource https://management.core.windows.net/
|
|
||||||
|
|
||||||
```
|
|
||||||
151
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
151
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
@@ -1,151 +0,0 @@
|
|||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OAuthConfig represents the endpoints needed
|
|
||||||
// in OAuth operations
|
|
||||||
type OAuthConfig struct {
|
|
||||||
AuthorityEndpoint url.URL `json:"authorityEndpoint"`
|
|
||||||
AuthorizeEndpoint url.URL `json:"authorizeEndpoint"`
|
|
||||||
TokenEndpoint url.URL `json:"tokenEndpoint"`
|
|
||||||
DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsZero returns true if the OAuthConfig object is zero-initialized.
|
|
||||||
func (oac OAuthConfig) IsZero() bool {
|
|
||||||
return oac == OAuthConfig{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateStringParam(param, name string) error {
|
|
||||||
if len(param) == 0 {
|
|
||||||
return fmt.Errorf("parameter '" + name + "' cannot be empty")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
|
|
||||||
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
|
|
||||||
apiVer := "1.0"
|
|
||||||
return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
|
|
||||||
// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
|
|
||||||
func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
|
|
||||||
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
api := ""
|
|
||||||
// it's legal for tenantID to be empty so don't validate it
|
|
||||||
if apiVersion != nil {
|
|
||||||
if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
api = fmt.Sprintf("?api-version=%s", *apiVersion)
|
|
||||||
}
|
|
||||||
u, err := url.Parse(activeDirectoryEndpoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
authorityURL, err := u.Parse(tenantID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &OAuthConfig{
|
|
||||||
AuthorityEndpoint: *authorityURL,
|
|
||||||
AuthorizeEndpoint: *authorizeURL,
|
|
||||||
TokenEndpoint: *tokenURL,
|
|
||||||
DeviceCodeEndpoint: *deviceCodeURL,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs.
|
|
||||||
type MultiTenantOAuthConfig interface {
|
|
||||||
PrimaryTenant() *OAuthConfig
|
|
||||||
AuxiliaryTenants() []*OAuthConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// OAuthOptions contains optional OAuthConfig creation arguments.
|
|
||||||
type OAuthOptions struct {
|
|
||||||
APIVersion string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c OAuthOptions) apiVersion() string {
|
|
||||||
if c.APIVersion != "" {
|
|
||||||
return fmt.Sprintf("?api-version=%s", c.APIVersion)
|
|
||||||
}
|
|
||||||
return "1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration.
|
|
||||||
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information.
|
|
||||||
func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) {
|
|
||||||
if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 {
|
|
||||||
return nil, errors.New("must specify one to three auxiliary tenants")
|
|
||||||
}
|
|
||||||
mtCfg := multiTenantOAuthConfig{
|
|
||||||
cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1),
|
|
||||||
}
|
|
||||||
apiVer := options.apiVersion()
|
|
||||||
pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err)
|
|
||||||
}
|
|
||||||
mtCfg.cfgs[0] = pri
|
|
||||||
for i := range auxiliaryTenantIDs {
|
|
||||||
aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err)
|
|
||||||
}
|
|
||||||
mtCfg.cfgs[i+1] = aux
|
|
||||||
}
|
|
||||||
return mtCfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type multiTenantOAuthConfig struct {
|
|
||||||
// first config in the slice is the primary tenant
|
|
||||||
cfgs []*OAuthConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig {
|
|
||||||
return m.cfgs[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig {
|
|
||||||
return m.cfgs[1:]
|
|
||||||
}
|
|
||||||
273
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
273
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
@@ -1,273 +0,0 @@
|
|||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
/*
|
|
||||||
This file is largely based on rjw57/oauth2device's code, with the follow differences:
|
|
||||||
* scope -> resource, and only allow a single one
|
|
||||||
* receive "Message" in the DeviceCode struct and show it to users as the prompt
|
|
||||||
* azure-xplat-cli has the following behavior that this emulates:
|
|
||||||
- does not send client_secret during the token exchange
|
|
||||||
- sends resource again in the token exchange request
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
logPrefix = "autorest/adal/devicetoken:"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow
|
|
||||||
ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow
|
|
||||||
ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow
|
|
||||||
ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow
|
|
||||||
ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
|
|
||||||
ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow
|
|
||||||
ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix)
|
|
||||||
|
|
||||||
// ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow
|
|
||||||
ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix)
|
|
||||||
|
|
||||||
errCodeSendingFails = "Error occurred while sending request for Device Authorization Code"
|
|
||||||
errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint"
|
|
||||||
errTokenSendingFails = "Error occurred while sending request with device code for a token"
|
|
||||||
errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
|
|
||||||
errStatusNotOK = "Error HTTP status != 200"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeviceCode is the object returned by the device auth endpoint
|
|
||||||
// It contains information to instruct the user to complete the auth flow
|
|
||||||
type DeviceCode struct {
|
|
||||||
DeviceCode *string `json:"device_code,omitempty"`
|
|
||||||
UserCode *string `json:"user_code,omitempty"`
|
|
||||||
VerificationURL *string `json:"verification_url,omitempty"`
|
|
||||||
ExpiresIn *int64 `json:"expires_in,string,omitempty"`
|
|
||||||
Interval *int64 `json:"interval,string,omitempty"`
|
|
||||||
|
|
||||||
Message *string `json:"message"` // Azure specific
|
|
||||||
Resource string // store the following, stored when initiating, used when exchanging
|
|
||||||
OAuthConfig OAuthConfig
|
|
||||||
ClientID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokenError is the object returned by the token exchange endpoint
|
|
||||||
// when something is amiss
|
|
||||||
type TokenError struct {
|
|
||||||
Error *string `json:"error,omitempty"`
|
|
||||||
ErrorCodes []int `json:"error_codes,omitempty"`
|
|
||||||
ErrorDescription *string `json:"error_description,omitempty"`
|
|
||||||
Timestamp *string `json:"timestamp,omitempty"`
|
|
||||||
TraceID *string `json:"trace_id,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeviceToken is the object return by the token exchange endpoint
|
|
||||||
// It can either look like a Token or an ErrorToken, so put both here
|
|
||||||
// and check for presence of "Error" to know if we are in error state
|
|
||||||
type deviceToken struct {
|
|
||||||
Token
|
|
||||||
TokenError
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
|
|
||||||
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
|
||||||
// Deprecated: use InitiateDeviceAuthWithContext() instead.
|
|
||||||
func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
|
||||||
return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode
|
|
||||||
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
|
||||||
func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
|
||||||
v := url.Values{
|
|
||||||
"client_id": []string{clientID},
|
|
||||||
"resource": []string{resource},
|
|
||||||
}
|
|
||||||
|
|
||||||
s := v.Encode()
|
|
||||||
body := ioutil.NopCloser(strings.NewReader(s))
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
req.ContentLength = int64(len(s))
|
|
||||||
req.Header.Set(contentType, mimeTypeFormPost)
|
|
||||||
resp, err := sender.Do(req.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
rb, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(strings.Trim(string(rb), " ")) == 0 {
|
|
||||||
return nil, ErrDeviceCodeEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
var code DeviceCode
|
|
||||||
err = json.Unmarshal(rb, &code)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
code.ClientID = clientID
|
|
||||||
code.Resource = resource
|
|
||||||
code.OAuthConfig = oauthConfig
|
|
||||||
|
|
||||||
return &code, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
|
||||||
// to see if the device flow has: been completed, timed out, or otherwise failed
|
|
||||||
// Deprecated: use CheckForUserCompletionWithContext() instead.
|
|
||||||
func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
|
||||||
return CheckForUserCompletionWithContext(context.Background(), sender, code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
|
||||||
// to see if the device flow has: been completed, timed out, or otherwise failed
|
|
||||||
func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) {
|
|
||||||
v := url.Values{
|
|
||||||
"client_id": []string{code.ClientID},
|
|
||||||
"code": []string{*code.DeviceCode},
|
|
||||||
"grant_type": []string{OAuthGrantTypeDeviceCode},
|
|
||||||
"resource": []string{code.Resource},
|
|
||||||
}
|
|
||||||
|
|
||||||
s := v.Encode()
|
|
||||||
body := ioutil.NopCloser(strings.NewReader(s))
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
req.ContentLength = int64(len(s))
|
|
||||||
req.Header.Set(contentType, mimeTypeFormPost)
|
|
||||||
resp, err := sender.Do(req.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
rb, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK)
|
|
||||||
}
|
|
||||||
if len(strings.Trim(string(rb), " ")) == 0 {
|
|
||||||
return nil, ErrOAuthTokenEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
var token deviceToken
|
|
||||||
err = json.Unmarshal(rb, &token)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if token.Error == nil {
|
|
||||||
return &token.Token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch *token.Error {
|
|
||||||
case "authorization_pending":
|
|
||||||
return nil, ErrDeviceAuthorizationPending
|
|
||||||
case "slow_down":
|
|
||||||
return nil, ErrDeviceSlowDown
|
|
||||||
case "access_denied":
|
|
||||||
return nil, ErrDeviceAccessDenied
|
|
||||||
case "code_expired":
|
|
||||||
return nil, ErrDeviceCodeExpired
|
|
||||||
default:
|
|
||||||
// return a more meaningful error message if available
|
|
||||||
if token.ErrorDescription != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription)
|
|
||||||
}
|
|
||||||
return nil, ErrDeviceGeneric
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
|
|
||||||
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
|
||||||
// Deprecated: use WaitForUserCompletionWithContext() instead.
|
|
||||||
func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
|
||||||
return WaitForUserCompletionWithContext(context.Background(), sender, code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error
|
|
||||||
// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
|
||||||
func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) {
|
|
||||||
intervalDuration := time.Duration(*code.Interval) * time.Second
|
|
||||||
waitDuration := intervalDuration
|
|
||||||
|
|
||||||
for {
|
|
||||||
token, err := CheckForUserCompletionWithContext(ctx, sender, code)
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
return token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch err {
|
|
||||||
case ErrDeviceSlowDown:
|
|
||||||
waitDuration += waitDuration
|
|
||||||
case ErrDeviceAuthorizationPending:
|
|
||||||
// noop
|
|
||||||
default: // everything else is "fatal" to us
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if waitDuration > (intervalDuration * 3) {
|
|
||||||
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-time.After(waitDuration):
|
|
||||||
// noop
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil, ctx.Err()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
25
vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
generated
vendored
25
vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
generated
vendored
@@ -1,25 +0,0 @@
|
|||||||
//go:build modhack
|
|
||||||
// +build modhack
|
|
||||||
|
|
||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// This file, and the github.com/Azure/go-autorest import, won't actually become part of
|
|
||||||
// the resultant binary.
|
|
||||||
|
|
||||||
// Necessary for safely adding multi-module repo.
|
|
||||||
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
|
||||||
import _ "github.com/Azure/go-autorest"
|
|
||||||
135
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
135
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
@@ -1,135 +0,0 @@
|
|||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/pkcs12"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrMissingCertificate is returned when no local certificate is found in the provided PFX data.
|
|
||||||
ErrMissingCertificate = errors.New("adal: certificate missing")
|
|
||||||
|
|
||||||
// ErrMissingPrivateKey is returned when no private key is found in the provided PFX data.
|
|
||||||
ErrMissingPrivateKey = errors.New("adal: private key missing")
|
|
||||||
)
|
|
||||||
|
|
||||||
// LoadToken restores a Token object from a file located at 'path'.
|
|
||||||
func LoadToken(path string) (*Token, error) {
|
|
||||||
file, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
var token Token
|
|
||||||
|
|
||||||
dec := json.NewDecoder(file)
|
|
||||||
if err = dec.Decode(&token); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err)
|
|
||||||
}
|
|
||||||
return &token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveToken persists an oauth token at the given location on disk.
|
|
||||||
// It moves the new file into place so it can safely be used to replace an existing file
|
|
||||||
// that maybe accessed by multiple processes.
|
|
||||||
func SaveToken(path string, mode os.FileMode, token Token) error {
|
|
||||||
dir := filepath.Dir(path)
|
|
||||||
err := os.MkdirAll(dir, os.ModePerm)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
newFile, err := ioutil.TempFile(dir, "token")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create the temp file to write the token: %v", err)
|
|
||||||
}
|
|
||||||
tempPath := newFile.Name()
|
|
||||||
|
|
||||||
if err := json.NewEncoder(newFile).Encode(token); err != nil {
|
|
||||||
return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err)
|
|
||||||
}
|
|
||||||
if err := newFile.Close(); err != nil {
|
|
||||||
return fmt.Errorf("failed to close temp file %s: %v", tempPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Atomic replace to avoid multi-writer file corruptions
|
|
||||||
if err := os.Rename(tempPath, path); err != nil {
|
|
||||||
return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err)
|
|
||||||
}
|
|
||||||
if err := os.Chmod(path, mode); err != nil {
|
|
||||||
return fmt.Errorf("failed to chmod the token file %s: %v", path, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data.
|
|
||||||
// The PFX data must contain a private key along with a certificate whose public key matches that of the
|
|
||||||
// private key or an error is returned.
|
|
||||||
// If the private key is not password protected pass the empty string for password.
|
|
||||||
func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
|
||||||
blocks, err := pkcs12.ToPEM(pfxData, password)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
// first extract the private key
|
|
||||||
var priv *rsa.PrivateKey
|
|
||||||
for _, block := range blocks {
|
|
||||||
if block.Type == "PRIVATE KEY" {
|
|
||||||
priv, err = x509.ParsePKCS1PrivateKey(block.Bytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if priv == nil {
|
|
||||||
return nil, nil, ErrMissingPrivateKey
|
|
||||||
}
|
|
||||||
// now find the certificate with the matching public key of our private key
|
|
||||||
var cert *x509.Certificate
|
|
||||||
for _, block := range blocks {
|
|
||||||
if block.Type == "CERTIFICATE" {
|
|
||||||
pcert, err := x509.ParseCertificate(block.Bytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
certKey, ok := pcert.PublicKey.(*rsa.PublicKey)
|
|
||||||
if !ok {
|
|
||||||
// keep looking
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 {
|
|
||||||
// found a match
|
|
||||||
cert = pcert
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cert == nil {
|
|
||||||
return nil, nil, ErrMissingCertificate
|
|
||||||
}
|
|
||||||
return cert, priv, nil
|
|
||||||
}
|
|
||||||
101
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
101
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
@@ -1,101 +0,0 @@
|
|||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/http/cookiejar"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/tracing"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
contentType = "Content-Type"
|
|
||||||
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DO NOT ACCESS THIS DIRECTLY. go through sender()
|
|
||||||
var defaultSender Sender
|
|
||||||
var defaultSenderInit = &sync.Once{}
|
|
||||||
|
|
||||||
// Sender is the interface that wraps the Do method to send HTTP requests.
|
|
||||||
//
|
|
||||||
// The standard http.Client conforms to this interface.
|
|
||||||
type Sender interface {
|
|
||||||
Do(*http.Request) (*http.Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SenderFunc is a method that implements the Sender interface.
|
|
||||||
type SenderFunc func(*http.Request) (*http.Response, error)
|
|
||||||
|
|
||||||
// Do implements the Sender interface on SenderFunc.
|
|
||||||
func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
|
|
||||||
return sf(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
|
|
||||||
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
|
||||||
// http.Response result.
|
|
||||||
type SendDecorator func(Sender) Sender
|
|
||||||
|
|
||||||
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
|
|
||||||
func CreateSender(decorators ...SendDecorator) Sender {
|
|
||||||
return DecorateSender(sender(), decorators...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
|
|
||||||
// the Sender. Decorators are applied in the order received, but their affect upon the request
|
|
||||||
// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
|
|
||||||
// post-decorator (pass the http.Request along and react to the results in http.Response).
|
|
||||||
func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
|
|
||||||
for _, decorate := range decorators {
|
|
||||||
s = decorate(s)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func sender() Sender {
|
|
||||||
// note that we can't init defaultSender in init() since it will
|
|
||||||
// execute before calling code has had a chance to enable tracing
|
|
||||||
defaultSenderInit.Do(func() {
|
|
||||||
// copied from http.DefaultTransport with a TLS minimum version.
|
|
||||||
transport := &http.Transport{
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
DialContext: (&net.Dialer{
|
|
||||||
Timeout: 30 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).DialContext,
|
|
||||||
ForceAttemptHTTP2: true,
|
|
||||||
MaxIdleConns: 100,
|
|
||||||
IdleConnTimeout: 90 * time.Second,
|
|
||||||
TLSHandshakeTimeout: 10 * time.Second,
|
|
||||||
ExpectContinueTimeout: 1 * time.Second,
|
|
||||||
TLSClientConfig: &tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
var roundTripper http.RoundTripper = transport
|
|
||||||
if tracing.IsEnabled() {
|
|
||||||
roundTripper = tracing.NewTransport(transport)
|
|
||||||
}
|
|
||||||
j, _ := cookiejar.New(nil)
|
|
||||||
defaultSender = &http.Client{Jar: j, Transport: roundTripper}
|
|
||||||
})
|
|
||||||
return defaultSender
|
|
||||||
}
|
|
||||||
1396
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
1396
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
File diff suppressed because it is too large
Load Diff
76
vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go
generated
vendored
76
vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go
generated
vendored
@@ -1,76 +0,0 @@
|
|||||||
//go:build go1.13
|
|
||||||
// +build go1.13
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package adal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
|
|
||||||
tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
// http.NewRequestWithContext() was added in Go 1.13
|
|
||||||
req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil)
|
|
||||||
q := req.URL.Query()
|
|
||||||
q.Add("api-version", msiAPIVersion)
|
|
||||||
req.URL.RawQuery = q.Encode()
|
|
||||||
return sender.Do(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
|
|
||||||
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
|
|
||||||
func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
|
|
||||||
if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
|
|
||||||
return fmt.Errorf("failed to refresh primary token: %w", err)
|
|
||||||
}
|
|
||||||
for _, aux := range mt.AuxiliaryTokens {
|
|
||||||
if err := aux.EnsureFreshWithContext(ctx); err != nil {
|
|
||||||
return fmt.Errorf("failed to refresh auxiliary token: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefreshWithContext obtains a fresh token for the Service Principal.
|
|
||||||
func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
|
|
||||||
if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
|
|
||||||
return fmt.Errorf("failed to refresh primary token: %w", err)
|
|
||||||
}
|
|
||||||
for _, aux := range mt.AuxiliaryTokens {
|
|
||||||
if err := aux.RefreshWithContext(ctx); err != nil {
|
|
||||||
return fmt.Errorf("failed to refresh auxiliary token: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefreshExchangeWithContext refreshes the token, but for a different resource.
|
|
||||||
func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
|
|
||||||
if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
|
|
||||||
return fmt.Errorf("failed to refresh primary token: %w", err)
|
|
||||||
}
|
|
||||||
for _, aux := range mt.AuxiliaryTokens {
|
|
||||||
if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
|
|
||||||
return fmt.Errorf("failed to refresh auxiliary token: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
75
vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go
generated
vendored
75
vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go
generated
vendored
@@ -1,75 +0,0 @@
|
|||||||
//go:build !go1.13
|
|
||||||
// +build !go1.13
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package adal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
|
|
||||||
tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil)
|
|
||||||
req = req.WithContext(tempCtx)
|
|
||||||
q := req.URL.Query()
|
|
||||||
q.Add("api-version", msiAPIVersion)
|
|
||||||
req.URL.RawQuery = q.Encode()
|
|
||||||
return sender.Do(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
|
|
||||||
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
|
|
||||||
func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
|
|
||||||
if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, aux := range mt.AuxiliaryTokens {
|
|
||||||
if err := aux.EnsureFreshWithContext(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefreshWithContext obtains a fresh token for the Service Principal.
|
|
||||||
func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
|
|
||||||
if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, aux := range mt.AuxiliaryTokens {
|
|
||||||
if err := aux.RefreshWithContext(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefreshExchangeWithContext refreshes the token, but for a different resource.
|
|
||||||
func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
|
|
||||||
if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, aux := range mt.AuxiliaryTokens {
|
|
||||||
if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
45
vendor/github.com/Azure/go-autorest/autorest/adal/version.go
generated
vendored
45
vendor/github.com/Azure/go-autorest/autorest/adal/version.go
generated
vendored
@@ -1,45 +0,0 @@
|
|||||||
package adal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
const number = "v1.0.0"
|
|
||||||
|
|
||||||
var (
|
|
||||||
ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s",
|
|
||||||
runtime.Version(),
|
|
||||||
runtime.GOARCH,
|
|
||||||
runtime.GOOS,
|
|
||||||
number,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version.
|
|
||||||
func UserAgent() string {
|
|
||||||
return ua
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddToUserAgent adds an extension to the current user agent
|
|
||||||
func AddToUserAgent(extension string) error {
|
|
||||||
if extension != "" {
|
|
||||||
ua = fmt.Sprintf("%s %s", ua, extension)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua)
|
|
||||||
}
|
|
||||||
353
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
353
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
@@ -1,353 +0,0 @@
|
|||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest/adal"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
bearerChallengeHeader = "Www-Authenticate"
|
|
||||||
bearer = "Bearer"
|
|
||||||
tenantID = "tenantID"
|
|
||||||
apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key"
|
|
||||||
bingAPISdkHeader = "X-BingApis-SDK-Client"
|
|
||||||
golangBingAPISdkHeaderValue = "Go-SDK"
|
|
||||||
authorization = "Authorization"
|
|
||||||
basic = "Basic"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Authorizer is the interface that provides a PrepareDecorator used to supply request
|
|
||||||
// authorization. Most often, the Authorizer decorator runs last so it has access to the full
|
|
||||||
// state of the formed HTTP request.
|
|
||||||
type Authorizer interface {
|
|
||||||
WithAuthorization() PrepareDecorator
|
|
||||||
}
|
|
||||||
|
|
||||||
// NullAuthorizer implements a default, "do nothing" Authorizer.
|
|
||||||
type NullAuthorizer struct{}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that does nothing.
|
|
||||||
func (na NullAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return WithNothing()
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIKeyAuthorizer implements API Key authorization.
|
|
||||||
type APIKeyAuthorizer struct {
|
|
||||||
headers map[string]interface{}
|
|
||||||
queryParameters map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers.
|
|
||||||
func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer {
|
|
||||||
return NewAPIKeyAuthorizer(headers, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters.
|
|
||||||
func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer {
|
|
||||||
return NewAPIKeyAuthorizer(nil, queryParameters)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers.
|
|
||||||
func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer {
|
|
||||||
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters.
|
|
||||||
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CognitiveServicesAuthorizer implements authorization for Cognitive Services.
|
|
||||||
type CognitiveServicesAuthorizer struct {
|
|
||||||
subscriptionKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCognitiveServicesAuthorizer is
|
|
||||||
func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer {
|
|
||||||
return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization is
|
|
||||||
func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
headers := make(map[string]interface{})
|
|
||||||
headers[apiKeyAuthorizerHeader] = csa.subscriptionKey
|
|
||||||
headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue
|
|
||||||
|
|
||||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
|
||||||
}
|
|
||||||
|
|
||||||
// BearerAuthorizer implements the bearer authorization
|
|
||||||
type BearerAuthorizer struct {
|
|
||||||
tokenProvider adal.OAuthTokenProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider
|
|
||||||
func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer {
|
|
||||||
return &BearerAuthorizer{tokenProvider: tp}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|
||||||
// value is "Bearer " followed by the token.
|
|
||||||
//
|
|
||||||
// By default, the token will be automatically refreshed through the Refresher interface.
|
|
||||||
func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
// the ordering is important here, prefer RefresherWithContext if available
|
|
||||||
if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok {
|
|
||||||
err = refresher.EnsureFreshWithContext(r.Context())
|
|
||||||
} else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok {
|
|
||||||
err = refresher.EnsureFresh()
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
var resp *http.Response
|
|
||||||
if tokError, ok := err.(adal.TokenRefreshError); ok {
|
|
||||||
resp = tokError.Response()
|
|
||||||
}
|
|
||||||
return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp,
|
|
||||||
"Failed to refresh the Token for request to %s", r.URL)
|
|
||||||
}
|
|
||||||
return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())))
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST.
|
|
||||||
func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider {
|
|
||||||
return ba.tokenProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
// BearerAuthorizerCallbackFunc is the authentication callback signature.
|
|
||||||
type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error)
|
|
||||||
|
|
||||||
// BearerAuthorizerCallback implements bearer authorization via a callback.
|
|
||||||
type BearerAuthorizerCallback struct {
|
|
||||||
sender Sender
|
|
||||||
callback BearerAuthorizerCallbackFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
|
|
||||||
// is invoked when the HTTP request is submitted.
|
|
||||||
func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
|
|
||||||
if s == nil {
|
|
||||||
s = sender(tls.RenegotiateNever)
|
|
||||||
}
|
|
||||||
return &BearerAuthorizerCallback{sender: s, callback: callback}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
|
|
||||||
// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback.
|
|
||||||
//
|
|
||||||
// By default, the token will be automatically refreshed through the Refresher interface.
|
|
||||||
func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
// make a copy of the request and remove the body as it's not
|
|
||||||
// required and avoids us having to create a copy of it.
|
|
||||||
rCopy := *r
|
|
||||||
removeRequestBody(&rCopy)
|
|
||||||
|
|
||||||
resp, err := bacb.sender.Do(&rCopy)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
DrainResponseBody(resp)
|
|
||||||
if resp.StatusCode == 401 && hasBearerChallenge(resp.Header) {
|
|
||||||
bc, err := newBearerChallenge(resp.Header)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
if bacb.callback != nil {
|
|
||||||
ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"])
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
return Prepare(r, ba.WithAuthorization())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns true if the HTTP response contains a bearer challenge
|
|
||||||
func hasBearerChallenge(header http.Header) bool {
|
|
||||||
authHeader := header.Get(bearerChallengeHeader)
|
|
||||||
if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type bearerChallenge struct {
|
|
||||||
values map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBearerChallenge(header http.Header) (bc bearerChallenge, err error) {
|
|
||||||
challenge := strings.TrimSpace(header.Get(bearerChallengeHeader))
|
|
||||||
trimmedChallenge := challenge[len(bearer)+1:]
|
|
||||||
|
|
||||||
// challenge is a set of key=value pairs that are comma delimited
|
|
||||||
pairs := strings.Split(trimmedChallenge, ",")
|
|
||||||
if len(pairs) < 1 {
|
|
||||||
err = fmt.Errorf("challenge '%s' contains no pairs", challenge)
|
|
||||||
return bc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
bc.values = make(map[string]string)
|
|
||||||
for i := range pairs {
|
|
||||||
trimmedPair := strings.TrimSpace(pairs[i])
|
|
||||||
pair := strings.Split(trimmedPair, "=")
|
|
||||||
if len(pair) == 2 {
|
|
||||||
// remove the enclosing quotes
|
|
||||||
key := strings.Trim(pair[0], "\"")
|
|
||||||
value := strings.Trim(pair[1], "\"")
|
|
||||||
|
|
||||||
switch key {
|
|
||||||
case "authorization", "authorization_uri":
|
|
||||||
// strip the tenant ID from the authorization URL
|
|
||||||
asURL, err := url.Parse(value)
|
|
||||||
if err != nil {
|
|
||||||
return bc, err
|
|
||||||
}
|
|
||||||
bc.values[tenantID] = asURL.Path[1:]
|
|
||||||
default:
|
|
||||||
bc.values[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return bc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// EventGridKeyAuthorizer implements authorization for event grid using key authentication.
|
|
||||||
type EventGridKeyAuthorizer struct {
|
|
||||||
topicKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer
|
|
||||||
// with the specified topic key.
|
|
||||||
func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer {
|
|
||||||
return EventGridKeyAuthorizer{topicKey: topicKey}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header.
|
|
||||||
func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
headers := map[string]interface{}{
|
|
||||||
"aeg-sas-key": egta.topicKey,
|
|
||||||
}
|
|
||||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
|
||||||
}
|
|
||||||
|
|
||||||
// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header
|
|
||||||
// with the value "Basic <TOKEN>" where <TOKEN> is a base64-encoded username:password tuple.
|
|
||||||
type BasicAuthorizer struct {
|
|
||||||
userName string
|
|
||||||
password string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password.
|
|
||||||
func NewBasicAuthorizer(userName, password string) *BasicAuthorizer {
|
|
||||||
return &BasicAuthorizer{
|
|
||||||
userName: userName,
|
|
||||||
password: password,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|
||||||
// value is "Basic " followed by the base64-encoded username:password tuple.
|
|
||||||
func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
headers := make(map[string]interface{})
|
|
||||||
headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password)))
|
|
||||||
|
|
||||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants.
|
|
||||||
type MultiTenantServicePrincipalTokenAuthorizer interface {
|
|
||||||
WithAuthorization() PrepareDecorator
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider
|
|
||||||
func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer {
|
|
||||||
return NewMultiTenantBearerAuthorizer(tp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiTenantBearerAuthorizer implements bearer authorization across multiple tenants.
|
|
||||||
type MultiTenantBearerAuthorizer struct {
|
|
||||||
tp adal.MultitenantOAuthTokenProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMultiTenantBearerAuthorizer creates a MultiTenantBearerAuthorizer using the given token provider.
|
|
||||||
func NewMultiTenantBearerAuthorizer(tp adal.MultitenantOAuthTokenProvider) *MultiTenantBearerAuthorizer {
|
|
||||||
return &MultiTenantBearerAuthorizer{tp: tp}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the
|
|
||||||
// primary token along with the auxiliary authorization header using the auxiliary tokens.
|
|
||||||
//
|
|
||||||
// By default, the token will be automatically refreshed through the Refresher interface.
|
|
||||||
func (mt *MultiTenantBearerAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
if refresher, ok := mt.tp.(adal.RefresherWithContext); ok {
|
|
||||||
err = refresher.EnsureFreshWithContext(r.Context())
|
|
||||||
if err != nil {
|
|
||||||
var resp *http.Response
|
|
||||||
if tokError, ok := err.(adal.TokenRefreshError); ok {
|
|
||||||
resp = tokError.Response()
|
|
||||||
}
|
|
||||||
return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp,
|
|
||||||
"Failed to refresh one or more Tokens for request to %s", r.URL)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken())))
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
auxTokens := mt.tp.AuxiliaryOAuthTokens()
|
|
||||||
for i := range auxTokens {
|
|
||||||
auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i])
|
|
||||||
}
|
|
||||||
return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", ")))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokenProvider returns the underlying MultitenantOAuthTokenProvider for this authorizer.
|
|
||||||
func (mt *MultiTenantBearerAuthorizer) TokenProvider() adal.MultitenantOAuthTokenProvider {
|
|
||||||
return mt.tp
|
|
||||||
}
|
|
||||||
66
vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go
generated
vendored
66
vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go
generated
vendored
@@ -1,66 +0,0 @@
|
|||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SASTokenAuthorizer implements an authorization for SAS Token Authentication
|
|
||||||
// this can be used for interaction with Blob Storage Endpoints
|
|
||||||
type SASTokenAuthorizer struct {
|
|
||||||
sasToken string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials
|
|
||||||
func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) {
|
|
||||||
if strings.TrimSpace(sasToken) == "" {
|
|
||||||
return nil, fmt.Errorf("sasToken cannot be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
token := sasToken
|
|
||||||
if strings.HasPrefix(sasToken, "?") {
|
|
||||||
token = strings.TrimPrefix(sasToken, "?")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &SASTokenAuthorizer{
|
|
||||||
sasToken: token,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the
|
|
||||||
// URI's query parameters. This can be used for the Blob, Queue, and File Services.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature
|
|
||||||
func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.URL.RawQuery == "" {
|
|
||||||
r.URL.RawQuery = sas.sasToken
|
|
||||||
} else if !strings.Contains(r.URL.RawQuery, sas.sasToken) {
|
|
||||||
r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Prepare(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
307
vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go
generated
vendored
307
vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go
generated
vendored
@@ -1,307 +0,0 @@
|
|||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SharedKeyType defines the enumeration for the various shared key types.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types.
|
|
||||||
type SharedKeyType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// SharedKey is used to authorize against blobs, files and queues services.
|
|
||||||
SharedKey SharedKeyType = "sharedKey"
|
|
||||||
|
|
||||||
// SharedKeyForTable is used to authorize against the table service.
|
|
||||||
SharedKeyForTable SharedKeyType = "sharedKeyTable"
|
|
||||||
|
|
||||||
// SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for
|
|
||||||
// backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead.
|
|
||||||
SharedKeyLite SharedKeyType = "sharedKeyLite"
|
|
||||||
|
|
||||||
// SharedKeyLiteForTable is used to authorize against the table service. It's provided for
|
|
||||||
// backwards compatibility with older table API versions. Prefer SharedKeyForTable instead.
|
|
||||||
SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
headerAccept = "Accept"
|
|
||||||
headerAcceptCharset = "Accept-Charset"
|
|
||||||
headerContentEncoding = "Content-Encoding"
|
|
||||||
headerContentLength = "Content-Length"
|
|
||||||
headerContentMD5 = "Content-MD5"
|
|
||||||
headerContentLanguage = "Content-Language"
|
|
||||||
headerIfModifiedSince = "If-Modified-Since"
|
|
||||||
headerIfMatch = "If-Match"
|
|
||||||
headerIfNoneMatch = "If-None-Match"
|
|
||||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
|
||||||
headerDate = "Date"
|
|
||||||
headerXMSDate = "X-Ms-Date"
|
|
||||||
headerXMSVersion = "x-ms-version"
|
|
||||||
headerRange = "Range"
|
|
||||||
)
|
|
||||||
|
|
||||||
const storageEmulatorAccountName = "devstoreaccount1"
|
|
||||||
|
|
||||||
// SharedKeyAuthorizer implements an authorization for Shared Key
|
|
||||||
// this can be used for interaction with Blob, File and Queue Storage Endpoints
|
|
||||||
type SharedKeyAuthorizer struct {
|
|
||||||
accountName string
|
|
||||||
accountKey []byte
|
|
||||||
keyType SharedKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type.
|
|
||||||
func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) {
|
|
||||||
key, err := base64.StdEncoding.DecodeString(accountKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("malformed storage account key: %v", err)
|
|
||||||
}
|
|
||||||
return &SharedKeyAuthorizer{
|
|
||||||
accountName: accountName,
|
|
||||||
accountKey: key,
|
|
||||||
keyType: keyType,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|
||||||
// value is "<SharedKeyType> " followed by the computed key.
|
|
||||||
// This can be used for the Blob, Queue, and File Services
|
|
||||||
//
|
|
||||||
// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key
|
|
||||||
// You may use Shared Key authorization to authorize a request made against the
|
|
||||||
// 2009-09-19 version and later of the Blob and Queue services,
|
|
||||||
// and version 2014-02-14 and later of the File services.
|
|
||||||
func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
return Prepare(r, WithHeader(headerAuthorization, sk))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) {
|
|
||||||
canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Header == nil {
|
|
||||||
req.Header = http.Header{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensure date is set
|
|
||||||
if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" {
|
|
||||||
date := time.Now().UTC().Format(http.TimeFormat)
|
|
||||||
req.Header.Set(headerXMSDate, date)
|
|
||||||
}
|
|
||||||
canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return createAuthorizationHeader(accName, accKey, canString, keyType), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) {
|
|
||||||
errMsg := "buildCanonicalizedResource error: %s"
|
|
||||||
u, err := url.Parse(uri)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf(errMsg, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
cr := bytes.NewBufferString("")
|
|
||||||
if accountName != storageEmulatorAccountName {
|
|
||||||
cr.WriteString("/")
|
|
||||||
cr.WriteString(getCanonicalizedAccountName(accountName))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(u.Path) > 0 {
|
|
||||||
// Any portion of the CanonicalizedResource string that is derived from
|
|
||||||
// the resource's URI should be encoded exactly as it is in the URI.
|
|
||||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
|
||||||
cr.WriteString(u.EscapedPath())
|
|
||||||
} else {
|
|
||||||
// a slash is required to indicate the root path
|
|
||||||
cr.WriteString("/")
|
|
||||||
}
|
|
||||||
|
|
||||||
params, err := url.ParseQuery(u.RawQuery)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf(errMsg, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
|
|
||||||
if keyType == SharedKey {
|
|
||||||
if len(params) > 0 {
|
|
||||||
cr.WriteString("\n")
|
|
||||||
|
|
||||||
keys := []string{}
|
|
||||||
for key := range params {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
completeParams := []string{}
|
|
||||||
for _, key := range keys {
|
|
||||||
if len(params[key]) > 1 {
|
|
||||||
sort.Strings(params[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
|
|
||||||
}
|
|
||||||
cr.WriteString(strings.Join(completeParams, "\n"))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// search for "comp" parameter, if exists then add it to canonicalizedresource
|
|
||||||
if v, ok := params["comp"]; ok {
|
|
||||||
cr.WriteString("?comp=" + v[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(cr.Bytes()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCanonicalizedAccountName(accountName string) string {
|
|
||||||
// since we may be trying to access a secondary storage account, we need to
|
|
||||||
// remove the -secondary part of the storage name
|
|
||||||
return strings.TrimSuffix(accountName, "-secondary")
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) {
|
|
||||||
contentLength := headers.Get(headerContentLength)
|
|
||||||
if contentLength == "0" {
|
|
||||||
contentLength = ""
|
|
||||||
}
|
|
||||||
date := headers.Get(headerDate)
|
|
||||||
if v := headers.Get(headerXMSDate); v != "" {
|
|
||||||
if keyType == SharedKey || keyType == SharedKeyLite {
|
|
||||||
date = ""
|
|
||||||
} else {
|
|
||||||
date = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var canString string
|
|
||||||
switch keyType {
|
|
||||||
case SharedKey:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
verb,
|
|
||||||
headers.Get(headerContentEncoding),
|
|
||||||
headers.Get(headerContentLanguage),
|
|
||||||
contentLength,
|
|
||||||
headers.Get(headerContentMD5),
|
|
||||||
headers.Get(headerContentType),
|
|
||||||
date,
|
|
||||||
headers.Get(headerIfModifiedSince),
|
|
||||||
headers.Get(headerIfMatch),
|
|
||||||
headers.Get(headerIfNoneMatch),
|
|
||||||
headers.Get(headerIfUnmodifiedSince),
|
|
||||||
headers.Get(headerRange),
|
|
||||||
buildCanonicalizedHeader(headers),
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
case SharedKeyForTable:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
verb,
|
|
||||||
headers.Get(headerContentMD5),
|
|
||||||
headers.Get(headerContentType),
|
|
||||||
date,
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
case SharedKeyLite:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
verb,
|
|
||||||
headers.Get(headerContentMD5),
|
|
||||||
headers.Get(headerContentType),
|
|
||||||
date,
|
|
||||||
buildCanonicalizedHeader(headers),
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
case SharedKeyLiteForTable:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
date,
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("key type '%s' is not supported", keyType)
|
|
||||||
}
|
|
||||||
return canString, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCanonicalizedHeader(headers http.Header) string {
|
|
||||||
cm := make(map[string]string)
|
|
||||||
|
|
||||||
for k := range headers {
|
|
||||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
|
||||||
if strings.HasPrefix(headerName, "x-ms-") {
|
|
||||||
cm[headerName] = headers.Get(k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cm) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
keys := []string{}
|
|
||||||
for key := range cm {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
ch := bytes.NewBufferString("")
|
|
||||||
|
|
||||||
for _, key := range keys {
|
|
||||||
ch.WriteString(key)
|
|
||||||
ch.WriteRune(':')
|
|
||||||
ch.WriteString(cm[key])
|
|
||||||
ch.WriteRune('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.TrimSuffix(string(ch.Bytes()), "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string {
|
|
||||||
h := hmac.New(sha256.New, accountKey)
|
|
||||||
h.Write([]byte(canonicalizedString))
|
|
||||||
signature := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
|
||||||
var key string
|
|
||||||
switch keyType {
|
|
||||||
case SharedKey, SharedKeyForTable:
|
|
||||||
key = "SharedKey"
|
|
||||||
case SharedKeyLite, SharedKeyLiteForTable:
|
|
||||||
key = "SharedKeyLite"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature)
|
|
||||||
}
|
|
||||||
150
vendor/github.com/Azure/go-autorest/autorest/autorest.go
generated
vendored
150
vendor/github.com/Azure/go-autorest/autorest/autorest.go
generated
vendored
@@ -1,150 +0,0 @@
|
|||||||
/*
|
|
||||||
Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines
|
|
||||||
and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/)
|
|
||||||
generated Go code.
|
|
||||||
|
|
||||||
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
|
|
||||||
and Responding. A typical pattern is:
|
|
||||||
|
|
||||||
req, err := Prepare(&http.Request{},
|
|
||||||
token.WithAuthorization())
|
|
||||||
|
|
||||||
resp, err := Send(req,
|
|
||||||
WithLogging(logger),
|
|
||||||
DoErrorIfStatusCode(http.StatusInternalServerError),
|
|
||||||
DoCloseIfError(),
|
|
||||||
DoRetryForAttempts(5, time.Second))
|
|
||||||
|
|
||||||
err = Respond(resp,
|
|
||||||
ByDiscardingBody(),
|
|
||||||
ByClosing())
|
|
||||||
|
|
||||||
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
|
|
||||||
and then pass the data along, pass the data first and then modify the result, or wrap themselves
|
|
||||||
around passing the data (such as a logger might do). Decorators run in the order provided. For
|
|
||||||
example, the following:
|
|
||||||
|
|
||||||
req, err := Prepare(&http.Request{},
|
|
||||||
WithBaseURL("https://microsoft.com/"),
|
|
||||||
WithPath("a"),
|
|
||||||
WithPath("b"),
|
|
||||||
WithPath("c"))
|
|
||||||
|
|
||||||
will set the URL to:
|
|
||||||
|
|
||||||
https://microsoft.com/a/b/c
|
|
||||||
|
|
||||||
Preparers and Responders may be shared and re-used (assuming the underlying decorators support
|
|
||||||
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
|
|
||||||
shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
|
|
||||||
all bound together by means of input / output channels.
|
|
||||||
|
|
||||||
Decorators hold their passed state within a closure (such as the path components in the example
|
|
||||||
above). Be careful to share Preparers and Responders only in a context where such held state
|
|
||||||
applies. For example, it may not make sense to share a Preparer that applies a query string from a
|
|
||||||
fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
|
|
||||||
struct (e.g., ByUnmarshallingJson) is likely incorrect.
|
|
||||||
|
|
||||||
Lastly, the Swagger specification (https://swagger.io) that drives AutoRest
|
|
||||||
(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
|
|
||||||
github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure
|
|
||||||
correct parsing and formatting.
|
|
||||||
|
|
||||||
Errors raised by autorest objects and methods will conform to the autorest.Error interface.
|
|
||||||
|
|
||||||
See the included examples for more detail. For details on the suggested use of this package by
|
|
||||||
generated clients, see the Client described below.
|
|
||||||
*/
|
|
||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// HeaderLocation specifies the HTTP Location header.
|
|
||||||
HeaderLocation = "Location"
|
|
||||||
|
|
||||||
// HeaderRetryAfter specifies the HTTP Retry-After header.
|
|
||||||
HeaderRetryAfter = "Retry-After"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set
|
|
||||||
// and false otherwise.
|
|
||||||
func ResponseHasStatusCode(resp *http.Response, codes ...int) bool {
|
|
||||||
if resp == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return containsInt(codes, resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLocation retrieves the URL from the Location header of the passed response.
|
|
||||||
func GetLocation(resp *http.Response) string {
|
|
||||||
return resp.Header.Get(HeaderLocation)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If
|
|
||||||
// the header is absent or is malformed, it will return the supplied default delay time.Duration.
|
|
||||||
func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration {
|
|
||||||
retry := resp.Header.Get(HeaderRetryAfter)
|
|
||||||
if retry == "" {
|
|
||||||
return defaultDelay
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := time.ParseDuration(retry + "s")
|
|
||||||
if err != nil {
|
|
||||||
return defaultDelay
|
|
||||||
}
|
|
||||||
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPollingRequest allocates and returns a new http.Request to poll for the passed response.
|
|
||||||
func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) {
|
|
||||||
location := GetLocation(resp)
|
|
||||||
if location == "" {
|
|
||||||
return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling")
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := Prepare(&http.Request{Cancel: cancel},
|
|
||||||
AsGet(),
|
|
||||||
WithBaseURL(location))
|
|
||||||
if err != nil {
|
|
||||||
return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location)
|
|
||||||
}
|
|
||||||
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response.
|
|
||||||
func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) {
|
|
||||||
location := GetLocation(resp)
|
|
||||||
if location == "" {
|
|
||||||
return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling")
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := Prepare((&http.Request{}).WithContext(ctx),
|
|
||||||
AsGet(),
|
|
||||||
WithBaseURL(location))
|
|
||||||
if err != nil {
|
|
||||||
return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location)
|
|
||||||
}
|
|
||||||
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
995
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
995
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
@@ -1,995 +0,0 @@
|
|||||||
package azure
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
|
||||||
"github.com/Azure/go-autorest/logger"
|
|
||||||
"github.com/Azure/go-autorest/tracing"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
headerAsyncOperation = "Azure-AsyncOperation"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
operationInProgress string = "InProgress"
|
|
||||||
operationCanceled string = "Canceled"
|
|
||||||
operationFailed string = "Failed"
|
|
||||||
operationSucceeded string = "Succeeded"
|
|
||||||
)
|
|
||||||
|
|
||||||
var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK}
|
|
||||||
|
|
||||||
// FutureAPI contains the set of methods on the Future type.
|
|
||||||
type FutureAPI interface {
|
|
||||||
// Response returns the last HTTP response.
|
|
||||||
Response() *http.Response
|
|
||||||
|
|
||||||
// Status returns the last status message of the operation.
|
|
||||||
Status() string
|
|
||||||
|
|
||||||
// PollingMethod returns the method used to monitor the status of the asynchronous operation.
|
|
||||||
PollingMethod() PollingMethodType
|
|
||||||
|
|
||||||
// DoneWithContext queries the service to see if the operation has completed.
|
|
||||||
DoneWithContext(context.Context, autorest.Sender) (bool, error)
|
|
||||||
|
|
||||||
// GetPollingDelay returns a duration the application should wait before checking
|
|
||||||
// the status of the asynchronous request and true; this value is returned from
|
|
||||||
// the service via the Retry-After response header. If the header wasn't returned
|
|
||||||
// then the function returns the zero-value time.Duration and false.
|
|
||||||
GetPollingDelay() (time.Duration, bool)
|
|
||||||
|
|
||||||
// WaitForCompletionRef will return when one of the following conditions is met: the long
|
|
||||||
// running operation has completed, the provided context is cancelled, or the client's
|
|
||||||
// polling duration has been exceeded. It will retry failed polling attempts based on
|
|
||||||
// the retry value defined in the client up to the maximum retry attempts.
|
|
||||||
// If no deadline is specified in the context then the client.PollingDuration will be
|
|
||||||
// used to determine if a default deadline should be used.
|
|
||||||
// If PollingDuration is greater than zero the value will be used as the context's timeout.
|
|
||||||
// If PollingDuration is zero then no default deadline will be used.
|
|
||||||
WaitForCompletionRef(context.Context, autorest.Client) error
|
|
||||||
|
|
||||||
// MarshalJSON implements the json.Marshaler interface.
|
|
||||||
MarshalJSON() ([]byte, error)
|
|
||||||
|
|
||||||
// MarshalJSON implements the json.Unmarshaler interface.
|
|
||||||
UnmarshalJSON([]byte) error
|
|
||||||
|
|
||||||
// PollingURL returns the URL used for retrieving the status of the long-running operation.
|
|
||||||
PollingURL() string
|
|
||||||
|
|
||||||
// GetResult should be called once polling has completed successfully.
|
|
||||||
// It makes the final GET call to retrieve the resultant payload.
|
|
||||||
GetResult(autorest.Sender) (*http.Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ FutureAPI = (*Future)(nil)
|
|
||||||
|
|
||||||
// Future provides a mechanism to access the status and results of an asynchronous request.
|
|
||||||
// Since futures are stateful they should be passed by value to avoid race conditions.
|
|
||||||
type Future struct {
|
|
||||||
pt pollingTracker
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFutureFromResponse returns a new Future object initialized
|
|
||||||
// with the initial response from an asynchronous operation.
|
|
||||||
func NewFutureFromResponse(resp *http.Response) (Future, error) {
|
|
||||||
pt, err := createPollingTracker(resp)
|
|
||||||
return Future{pt: pt}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Response returns the last HTTP response.
|
|
||||||
func (f Future) Response() *http.Response {
|
|
||||||
if f.pt == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return f.pt.latestResponse()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status returns the last status message of the operation.
|
|
||||||
func (f Future) Status() string {
|
|
||||||
if f.pt == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return f.pt.pollingStatus()
|
|
||||||
}
|
|
||||||
|
|
||||||
// PollingMethod returns the method used to monitor the status of the asynchronous operation.
|
|
||||||
func (f Future) PollingMethod() PollingMethodType {
|
|
||||||
if f.pt == nil {
|
|
||||||
return PollingUnknown
|
|
||||||
}
|
|
||||||
return f.pt.pollingMethod()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoneWithContext queries the service to see if the operation has completed.
|
|
||||||
func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) {
|
|
||||||
ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext")
|
|
||||||
defer func() {
|
|
||||||
sc := -1
|
|
||||||
resp := f.Response()
|
|
||||||
if resp != nil {
|
|
||||||
sc = resp.StatusCode
|
|
||||||
}
|
|
||||||
tracing.EndSpan(ctx, sc, err)
|
|
||||||
}()
|
|
||||||
|
|
||||||
if f.pt == nil {
|
|
||||||
return false, autorest.NewError("Future", "Done", "future is not initialized")
|
|
||||||
}
|
|
||||||
if f.pt.hasTerminated() {
|
|
||||||
return true, f.pt.pollingError()
|
|
||||||
}
|
|
||||||
if err := f.pt.pollForStatus(ctx, sender); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if err := f.pt.checkForErrors(); err != nil {
|
|
||||||
return f.pt.hasTerminated(), err
|
|
||||||
}
|
|
||||||
if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if err := f.pt.initPollingMethod(); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if err := f.pt.updatePollingMethod(); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return f.pt.hasTerminated(), f.pt.pollingError()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPollingDelay returns a duration the application should wait before checking
|
|
||||||
// the status of the asynchronous request and true; this value is returned from
|
|
||||||
// the service via the Retry-After response header. If the header wasn't returned
|
|
||||||
// then the function returns the zero-value time.Duration and false.
|
|
||||||
func (f Future) GetPollingDelay() (time.Duration, bool) {
|
|
||||||
if f.pt == nil {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
resp := f.pt.latestResponse()
|
|
||||||
if resp == nil {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
retry := resp.Header.Get(autorest.HeaderRetryAfter)
|
|
||||||
if retry == "" {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := time.ParseDuration(retry + "s")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return d, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForCompletionRef will return when one of the following conditions is met: the long
|
|
||||||
// running operation has completed, the provided context is cancelled, or the client's
|
|
||||||
// polling duration has been exceeded. It will retry failed polling attempts based on
|
|
||||||
// the retry value defined in the client up to the maximum retry attempts.
|
|
||||||
// If no deadline is specified in the context then the client.PollingDuration will be
|
|
||||||
// used to determine if a default deadline should be used.
|
|
||||||
// If PollingDuration is greater than zero the value will be used as the context's timeout.
|
|
||||||
// If PollingDuration is zero then no default deadline will be used.
|
|
||||||
func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) {
|
|
||||||
ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef")
|
|
||||||
defer func() {
|
|
||||||
sc := -1
|
|
||||||
resp := f.Response()
|
|
||||||
if resp != nil {
|
|
||||||
sc = resp.StatusCode
|
|
||||||
}
|
|
||||||
tracing.EndSpan(ctx, sc, err)
|
|
||||||
}()
|
|
||||||
cancelCtx := ctx
|
|
||||||
// if the provided context already has a deadline don't override it
|
|
||||||
_, hasDeadline := ctx.Deadline()
|
|
||||||
if d := client.PollingDuration; !hasDeadline && d != 0 {
|
|
||||||
var cancel context.CancelFunc
|
|
||||||
cancelCtx, cancel = context.WithTimeout(ctx, d)
|
|
||||||
defer cancel()
|
|
||||||
}
|
|
||||||
// if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll
|
|
||||||
if delay, ok := f.GetPollingDelay(); ok {
|
|
||||||
logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: initial polling delay")
|
|
||||||
if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed {
|
|
||||||
err = cancelCtx.Err()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
done, err := f.DoneWithContext(ctx, client)
|
|
||||||
for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) {
|
|
||||||
if attempts >= client.RetryAttempts {
|
|
||||||
return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded")
|
|
||||||
}
|
|
||||||
// we want delayAttempt to be zero in the non-error case so
|
|
||||||
// that DelayForBackoff doesn't perform exponential back-off
|
|
||||||
var delayAttempt int
|
|
||||||
var delay time.Duration
|
|
||||||
if err == nil {
|
|
||||||
// check for Retry-After delay, if not present use the client's polling delay
|
|
||||||
var ok bool
|
|
||||||
delay, ok = f.GetPollingDelay()
|
|
||||||
if !ok {
|
|
||||||
logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: Using client polling delay")
|
|
||||||
delay = client.PollingDelay
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// there was an error polling for status so perform exponential
|
|
||||||
// back-off based on the number of attempts using the client's retry
|
|
||||||
// duration. update attempts after delayAttempt to avoid off-by-one.
|
|
||||||
logger.Instance.Writef(logger.LogError, "WaitForCompletionRef: %s\n", err)
|
|
||||||
delayAttempt = attempts
|
|
||||||
delay = client.RetryDuration
|
|
||||||
attempts++
|
|
||||||
}
|
|
||||||
// wait until the delay elapses or the context is cancelled
|
|
||||||
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done())
|
|
||||||
if !delayElapsed {
|
|
||||||
return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON implements the json.Marshaler interface.
|
|
||||||
func (f Future) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(f.pt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
|
||||||
func (f *Future) UnmarshalJSON(data []byte) error {
|
|
||||||
// unmarshal into JSON object to determine the tracker type
|
|
||||||
obj := map[string]interface{}{}
|
|
||||||
err := json.Unmarshal(data, &obj)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if obj["method"] == nil {
|
|
||||||
return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property")
|
|
||||||
}
|
|
||||||
method := obj["method"].(string)
|
|
||||||
switch strings.ToUpper(method) {
|
|
||||||
case http.MethodDelete:
|
|
||||||
f.pt = &pollingTrackerDelete{}
|
|
||||||
case http.MethodPatch:
|
|
||||||
f.pt = &pollingTrackerPatch{}
|
|
||||||
case http.MethodPost:
|
|
||||||
f.pt = &pollingTrackerPost{}
|
|
||||||
case http.MethodPut:
|
|
||||||
f.pt = &pollingTrackerPut{}
|
|
||||||
default:
|
|
||||||
return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method)
|
|
||||||
}
|
|
||||||
// now unmarshal into the tracker
|
|
||||||
return json.Unmarshal(data, &f.pt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PollingURL returns the URL used for retrieving the status of the long-running operation.
|
|
||||||
func (f Future) PollingURL() string {
|
|
||||||
if f.pt == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return f.pt.pollingURL()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetResult should be called once polling has completed successfully.
|
|
||||||
// It makes the final GET call to retrieve the resultant payload.
|
|
||||||
func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) {
|
|
||||||
if f.pt.finalGetURL() == "" {
|
|
||||||
// we can end up in this situation if the async operation returns a 200
|
|
||||||
// with no polling URLs. in that case return the response which should
|
|
||||||
// contain the JSON payload (only do this for successful terminal cases).
|
|
||||||
if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() {
|
|
||||||
return lr, nil
|
|
||||||
}
|
|
||||||
return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result")
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resp, err := sender.Do(req)
|
|
||||||
if err == nil && resp.Body != nil {
|
|
||||||
// copy the body and close it so callers don't have to
|
|
||||||
defer resp.Body.Close()
|
|
||||||
b, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
resp.Body = ioutil.NopCloser(bytes.NewReader(b))
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type pollingTracker interface {
|
|
||||||
// these methods can differ per tracker
|
|
||||||
|
|
||||||
// checks the response headers and status code to determine the polling mechanism
|
|
||||||
updatePollingMethod() error
|
|
||||||
|
|
||||||
// checks the response for tracker-specific error conditions
|
|
||||||
checkForErrors() error
|
|
||||||
|
|
||||||
// returns true if provisioning state should be checked
|
|
||||||
provisioningStateApplicable() bool
|
|
||||||
|
|
||||||
// methods common to all trackers
|
|
||||||
|
|
||||||
// initializes a tracker's polling URL and method, called for each iteration.
|
|
||||||
// these values can be overridden by each polling tracker as required.
|
|
||||||
initPollingMethod() error
|
|
||||||
|
|
||||||
// initializes the tracker's internal state, call this when the tracker is created
|
|
||||||
initializeState() error
|
|
||||||
|
|
||||||
// makes an HTTP request to check the status of the LRO
|
|
||||||
pollForStatus(ctx context.Context, sender autorest.Sender) error
|
|
||||||
|
|
||||||
// updates internal tracker state, call this after each call to pollForStatus
|
|
||||||
updatePollingState(provStateApl bool) error
|
|
||||||
|
|
||||||
// returns the error response from the service, can be nil
|
|
||||||
pollingError() error
|
|
||||||
|
|
||||||
// returns the polling method being used
|
|
||||||
pollingMethod() PollingMethodType
|
|
||||||
|
|
||||||
// returns the state of the LRO as returned from the service
|
|
||||||
pollingStatus() string
|
|
||||||
|
|
||||||
// returns the URL used for polling status
|
|
||||||
pollingURL() string
|
|
||||||
|
|
||||||
// returns the URL used for the final GET to retrieve the resource
|
|
||||||
finalGetURL() string
|
|
||||||
|
|
||||||
// returns true if the LRO is in a terminal state
|
|
||||||
hasTerminated() bool
|
|
||||||
|
|
||||||
// returns true if the LRO is in a failed terminal state
|
|
||||||
hasFailed() bool
|
|
||||||
|
|
||||||
// returns true if the LRO is in a successful terminal state
|
|
||||||
hasSucceeded() bool
|
|
||||||
|
|
||||||
// returns the cached HTTP response after a call to pollForStatus(), can be nil
|
|
||||||
latestResponse() *http.Response
|
|
||||||
}
|
|
||||||
|
|
||||||
type pollingTrackerBase struct {
|
|
||||||
// resp is the last response, either from the submission of the LRO or from polling
|
|
||||||
resp *http.Response
|
|
||||||
|
|
||||||
// method is the HTTP verb, this is needed for deserialization
|
|
||||||
Method string `json:"method"`
|
|
||||||
|
|
||||||
// rawBody is the raw JSON response body
|
|
||||||
rawBody map[string]interface{}
|
|
||||||
|
|
||||||
// denotes if polling is using async-operation or location header
|
|
||||||
Pm PollingMethodType `json:"pollingMethod"`
|
|
||||||
|
|
||||||
// the URL to poll for status
|
|
||||||
URI string `json:"pollingURI"`
|
|
||||||
|
|
||||||
// the state of the LRO as returned from the service
|
|
||||||
State string `json:"lroState"`
|
|
||||||
|
|
||||||
// the URL to GET for the final result
|
|
||||||
FinalGetURI string `json:"resultURI"`
|
|
||||||
|
|
||||||
// used to hold an error object returned from the service
|
|
||||||
Err *ServiceError `json:"error,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *pollingTrackerBase) initializeState() error {
|
|
||||||
// determine the initial polling state based on response body and/or HTTP status
|
|
||||||
// code. this is applicable to the initial LRO response, not polling responses!
|
|
||||||
pt.Method = pt.resp.Request.Method
|
|
||||||
if err := pt.updateRawBody(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch pt.resp.StatusCode {
|
|
||||||
case http.StatusOK:
|
|
||||||
if ps := pt.getProvisioningState(); ps != nil {
|
|
||||||
pt.State = *ps
|
|
||||||
if pt.hasFailed() {
|
|
||||||
pt.updateErrorFromResponse()
|
|
||||||
return pt.pollingError()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
pt.State = operationSucceeded
|
|
||||||
}
|
|
||||||
case http.StatusCreated:
|
|
||||||
if ps := pt.getProvisioningState(); ps != nil {
|
|
||||||
pt.State = *ps
|
|
||||||
} else {
|
|
||||||
pt.State = operationInProgress
|
|
||||||
}
|
|
||||||
case http.StatusAccepted:
|
|
||||||
pt.State = operationInProgress
|
|
||||||
case http.StatusNoContent:
|
|
||||||
pt.State = operationSucceeded
|
|
||||||
default:
|
|
||||||
pt.State = operationFailed
|
|
||||||
pt.updateErrorFromResponse()
|
|
||||||
return pt.pollingError()
|
|
||||||
}
|
|
||||||
return pt.initPollingMethod()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerBase) getProvisioningState() *string {
|
|
||||||
if pt.rawBody != nil && pt.rawBody["properties"] != nil {
|
|
||||||
p := pt.rawBody["properties"].(map[string]interface{})
|
|
||||||
if ps := p["provisioningState"]; ps != nil {
|
|
||||||
s := ps.(string)
|
|
||||||
return &s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *pollingTrackerBase) updateRawBody() error {
|
|
||||||
pt.rawBody = map[string]interface{}{}
|
|
||||||
if pt.resp.ContentLength != 0 {
|
|
||||||
defer pt.resp.Body.Close()
|
|
||||||
b, err := ioutil.ReadAll(pt.resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body")
|
|
||||||
}
|
|
||||||
// put the body back so it's available to other callers
|
|
||||||
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
|
|
||||||
// observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty
|
|
||||||
if len(b) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err = json.Unmarshal(b, &pt.rawBody); err != nil {
|
|
||||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error {
|
|
||||||
req, err := http.NewRequest(http.MethodGet, pt.URI, nil)
|
|
||||||
if err != nil {
|
|
||||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = req.WithContext(ctx)
|
|
||||||
preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...)
|
|
||||||
req, err = preparer.Prepare(req)
|
|
||||||
if err != nil {
|
|
||||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request")
|
|
||||||
}
|
|
||||||
pt.resp, err = sender.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
|
|
||||||
}
|
|
||||||
if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) {
|
|
||||||
// reset the service error on success case
|
|
||||||
pt.Err = nil
|
|
||||||
err = pt.updateRawBody()
|
|
||||||
} else {
|
|
||||||
// check response body for error content
|
|
||||||
pt.updateErrorFromResponse()
|
|
||||||
err = pt.pollingError()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// attempts to unmarshal a ServiceError type from the response body.
|
|
||||||
// if that fails then make a best attempt at creating something meaningful.
|
|
||||||
// NOTE: this assumes that the async operation has failed.
|
|
||||||
func (pt *pollingTrackerBase) updateErrorFromResponse() {
|
|
||||||
var err error
|
|
||||||
if pt.resp.ContentLength != 0 {
|
|
||||||
type respErr struct {
|
|
||||||
ServiceError *ServiceError `json:"error"`
|
|
||||||
}
|
|
||||||
re := respErr{}
|
|
||||||
defer pt.resp.Body.Close()
|
|
||||||
var b []byte
|
|
||||||
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil {
|
|
||||||
goto Default
|
|
||||||
}
|
|
||||||
// put the body back so it's available to other callers
|
|
||||||
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
|
|
||||||
if len(b) == 0 {
|
|
||||||
goto Default
|
|
||||||
}
|
|
||||||
if err = json.Unmarshal(b, &re); err != nil {
|
|
||||||
goto Default
|
|
||||||
}
|
|
||||||
// unmarshalling the error didn't yield anything, try unwrapped error
|
|
||||||
if re.ServiceError == nil {
|
|
||||||
err = json.Unmarshal(b, &re.ServiceError)
|
|
||||||
if err != nil {
|
|
||||||
goto Default
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// the unmarshaller will ensure re.ServiceError is non-nil
|
|
||||||
// even if there was no content unmarshalled so check the code.
|
|
||||||
if re.ServiceError.Code != "" {
|
|
||||||
pt.Err = re.ServiceError
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Default:
|
|
||||||
se := &ServiceError{
|
|
||||||
Code: pt.pollingStatus(),
|
|
||||||
Message: "The async operation failed.",
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
se.InnerError = make(map[string]interface{})
|
|
||||||
se.InnerError["unmarshalError"] = err.Error()
|
|
||||||
}
|
|
||||||
// stick the response body into the error object in hopes
|
|
||||||
// it contains something useful to help diagnose the failure.
|
|
||||||
if len(pt.rawBody) > 0 {
|
|
||||||
se.AdditionalInfo = []map[string]interface{}{
|
|
||||||
pt.rawBody,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pt.Err = se
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error {
|
|
||||||
if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil {
|
|
||||||
pt.State = pt.rawBody["status"].(string)
|
|
||||||
} else {
|
|
||||||
if pt.resp.StatusCode == http.StatusAccepted {
|
|
||||||
pt.State = operationInProgress
|
|
||||||
} else if provStateApl {
|
|
||||||
if ps := pt.getProvisioningState(); ps != nil {
|
|
||||||
pt.State = *ps
|
|
||||||
} else {
|
|
||||||
pt.State = operationSucceeded
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// if the operation has failed update the error state
|
|
||||||
if pt.hasFailed() {
|
|
||||||
pt.updateErrorFromResponse()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerBase) pollingError() error {
|
|
||||||
if pt.Err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return pt.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerBase) pollingMethod() PollingMethodType {
|
|
||||||
return pt.Pm
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerBase) pollingStatus() string {
|
|
||||||
return pt.State
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerBase) pollingURL() string {
|
|
||||||
return pt.URI
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerBase) finalGetURL() string {
|
|
||||||
return pt.FinalGetURI
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerBase) hasTerminated() bool {
|
|
||||||
return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerBase) hasFailed() bool {
|
|
||||||
return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerBase) hasSucceeded() bool {
|
|
||||||
return strings.EqualFold(pt.State, operationSucceeded)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerBase) latestResponse() *http.Response {
|
|
||||||
return pt.resp
|
|
||||||
}
|
|
||||||
|
|
||||||
// error checking common to all trackers
|
|
||||||
func (pt pollingTrackerBase) baseCheckForErrors() error {
|
|
||||||
// for Azure-AsyncOperations the response body cannot be nil or empty
|
|
||||||
if pt.Pm == PollingAsyncOperation {
|
|
||||||
if pt.resp.Body == nil || pt.resp.ContentLength == 0 {
|
|
||||||
return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil")
|
|
||||||
}
|
|
||||||
if pt.rawBody["status"] == nil {
|
|
||||||
return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// default initialization of polling URL/method. each verb tracker will update this as required.
|
|
||||||
func (pt *pollingTrackerBase) initPollingMethod() error {
|
|
||||||
if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil {
|
|
||||||
return err
|
|
||||||
} else if ao != "" {
|
|
||||||
pt.URI = ao
|
|
||||||
pt.Pm = PollingAsyncOperation
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
|
|
||||||
return err
|
|
||||||
} else if lh != "" {
|
|
||||||
pt.URI = lh
|
|
||||||
pt.Pm = PollingLocation
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// it's ok if we didn't find a polling header, this will be handled elsewhere
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DELETE
|
|
||||||
|
|
||||||
type pollingTrackerDelete struct {
|
|
||||||
pollingTrackerBase
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *pollingTrackerDelete) updatePollingMethod() error {
|
|
||||||
// for 201 the Location header is required
|
|
||||||
if pt.resp.StatusCode == http.StatusCreated {
|
|
||||||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
|
|
||||||
return err
|
|
||||||
} else if lh == "" {
|
|
||||||
return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response")
|
|
||||||
} else {
|
|
||||||
pt.URI = lh
|
|
||||||
}
|
|
||||||
pt.Pm = PollingLocation
|
|
||||||
pt.FinalGetURI = pt.URI
|
|
||||||
}
|
|
||||||
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
|
|
||||||
if pt.resp.StatusCode == http.StatusAccepted {
|
|
||||||
ao, err := getURLFromAsyncOpHeader(pt.resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if ao != "" {
|
|
||||||
pt.URI = ao
|
|
||||||
pt.Pm = PollingAsyncOperation
|
|
||||||
}
|
|
||||||
// if the Location header is invalid and we already have a polling URL
|
|
||||||
// then we don't care if the Location header URL is malformed.
|
|
||||||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" {
|
|
||||||
return err
|
|
||||||
} else if lh != "" {
|
|
||||||
if ao == "" {
|
|
||||||
pt.URI = lh
|
|
||||||
pt.Pm = PollingLocation
|
|
||||||
}
|
|
||||||
// when both headers are returned we use the value in the Location header for the final GET
|
|
||||||
pt.FinalGetURI = lh
|
|
||||||
}
|
|
||||||
// make sure a polling URL was found
|
|
||||||
if pt.URI == "" {
|
|
||||||
return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerDelete) checkForErrors() error {
|
|
||||||
return pt.baseCheckForErrors()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerDelete) provisioningStateApplicable() bool {
|
|
||||||
return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent
|
|
||||||
}
|
|
||||||
|
|
||||||
// PATCH
|
|
||||||
|
|
||||||
type pollingTrackerPatch struct {
|
|
||||||
pollingTrackerBase
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *pollingTrackerPatch) updatePollingMethod() error {
|
|
||||||
// by default we can use the original URL for polling and final GET
|
|
||||||
if pt.URI == "" {
|
|
||||||
pt.URI = pt.resp.Request.URL.String()
|
|
||||||
}
|
|
||||||
if pt.FinalGetURI == "" {
|
|
||||||
pt.FinalGetURI = pt.resp.Request.URL.String()
|
|
||||||
}
|
|
||||||
if pt.Pm == PollingUnknown {
|
|
||||||
pt.Pm = PollingRequestURI
|
|
||||||
}
|
|
||||||
// for 201 it's permissible for no headers to be returned
|
|
||||||
if pt.resp.StatusCode == http.StatusCreated {
|
|
||||||
if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil {
|
|
||||||
return err
|
|
||||||
} else if ao != "" {
|
|
||||||
pt.URI = ao
|
|
||||||
pt.Pm = PollingAsyncOperation
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
|
|
||||||
// note the absence of the "final GET" mechanism for PATCH
|
|
||||||
if pt.resp.StatusCode == http.StatusAccepted {
|
|
||||||
ao, err := getURLFromAsyncOpHeader(pt.resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if ao != "" {
|
|
||||||
pt.URI = ao
|
|
||||||
pt.Pm = PollingAsyncOperation
|
|
||||||
}
|
|
||||||
if ao == "" {
|
|
||||||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
|
|
||||||
return err
|
|
||||||
} else if lh == "" {
|
|
||||||
return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response")
|
|
||||||
} else {
|
|
||||||
pt.URI = lh
|
|
||||||
pt.Pm = PollingLocation
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerPatch) checkForErrors() error {
|
|
||||||
return pt.baseCheckForErrors()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerPatch) provisioningStateApplicable() bool {
|
|
||||||
return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated
|
|
||||||
}
|
|
||||||
|
|
||||||
// POST
|
|
||||||
|
|
||||||
type pollingTrackerPost struct {
|
|
||||||
pollingTrackerBase
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *pollingTrackerPost) updatePollingMethod() error {
|
|
||||||
// 201 requires Location header
|
|
||||||
if pt.resp.StatusCode == http.StatusCreated {
|
|
||||||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
|
|
||||||
return err
|
|
||||||
} else if lh == "" {
|
|
||||||
return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response")
|
|
||||||
} else {
|
|
||||||
pt.URI = lh
|
|
||||||
pt.FinalGetURI = lh
|
|
||||||
pt.Pm = PollingLocation
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
|
|
||||||
if pt.resp.StatusCode == http.StatusAccepted {
|
|
||||||
ao, err := getURLFromAsyncOpHeader(pt.resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if ao != "" {
|
|
||||||
pt.URI = ao
|
|
||||||
pt.Pm = PollingAsyncOperation
|
|
||||||
}
|
|
||||||
// if the Location header is invalid and we already have a polling URL
|
|
||||||
// then we don't care if the Location header URL is malformed.
|
|
||||||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" {
|
|
||||||
return err
|
|
||||||
} else if lh != "" {
|
|
||||||
if ao == "" {
|
|
||||||
pt.URI = lh
|
|
||||||
pt.Pm = PollingLocation
|
|
||||||
}
|
|
||||||
// when both headers are returned we use the value in the Location header for the final GET
|
|
||||||
pt.FinalGetURI = lh
|
|
||||||
}
|
|
||||||
// make sure a polling URL was found
|
|
||||||
if pt.URI == "" {
|
|
||||||
return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerPost) checkForErrors() error {
|
|
||||||
return pt.baseCheckForErrors()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerPost) provisioningStateApplicable() bool {
|
|
||||||
return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent
|
|
||||||
}
|
|
||||||
|
|
||||||
// PUT
|
|
||||||
|
|
||||||
type pollingTrackerPut struct {
|
|
||||||
pollingTrackerBase
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt *pollingTrackerPut) updatePollingMethod() error {
|
|
||||||
// by default we can use the original URL for polling and final GET
|
|
||||||
if pt.URI == "" {
|
|
||||||
pt.URI = pt.resp.Request.URL.String()
|
|
||||||
}
|
|
||||||
if pt.FinalGetURI == "" {
|
|
||||||
pt.FinalGetURI = pt.resp.Request.URL.String()
|
|
||||||
}
|
|
||||||
if pt.Pm == PollingUnknown {
|
|
||||||
pt.Pm = PollingRequestURI
|
|
||||||
}
|
|
||||||
// for 201 it's permissible for no headers to be returned
|
|
||||||
if pt.resp.StatusCode == http.StatusCreated {
|
|
||||||
if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil {
|
|
||||||
return err
|
|
||||||
} else if ao != "" {
|
|
||||||
pt.URI = ao
|
|
||||||
pt.Pm = PollingAsyncOperation
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
|
|
||||||
if pt.resp.StatusCode == http.StatusAccepted {
|
|
||||||
ao, err := getURLFromAsyncOpHeader(pt.resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if ao != "" {
|
|
||||||
pt.URI = ao
|
|
||||||
pt.Pm = PollingAsyncOperation
|
|
||||||
}
|
|
||||||
// if the Location header is invalid and we already have a polling URL
|
|
||||||
// then we don't care if the Location header URL is malformed.
|
|
||||||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" {
|
|
||||||
return err
|
|
||||||
} else if lh != "" {
|
|
||||||
if ao == "" {
|
|
||||||
pt.URI = lh
|
|
||||||
pt.Pm = PollingLocation
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// make sure a polling URL was found
|
|
||||||
if pt.URI == "" {
|
|
||||||
return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerPut) checkForErrors() error {
|
|
||||||
err := pt.baseCheckForErrors()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// if there are no LRO headers then the body cannot be empty
|
|
||||||
ao, err := getURLFromAsyncOpHeader(pt.resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
lh, err := getURLFromLocationHeader(pt.resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ao == "" && lh == "" && len(pt.rawBody) == 0 {
|
|
||||||
return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pt pollingTrackerPut) provisioningStateApplicable() bool {
|
|
||||||
return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated
|
|
||||||
}
|
|
||||||
|
|
||||||
// creates a polling tracker based on the verb of the original request
|
|
||||||
func createPollingTracker(resp *http.Response) (pollingTracker, error) {
|
|
||||||
var pt pollingTracker
|
|
||||||
switch strings.ToUpper(resp.Request.Method) {
|
|
||||||
case http.MethodDelete:
|
|
||||||
pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}}
|
|
||||||
case http.MethodPatch:
|
|
||||||
pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}}
|
|
||||||
case http.MethodPost:
|
|
||||||
pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}}
|
|
||||||
case http.MethodPut:
|
|
||||||
pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}}
|
|
||||||
default:
|
|
||||||
return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method)
|
|
||||||
}
|
|
||||||
if err := pt.initializeState(); err != nil {
|
|
||||||
return pt, err
|
|
||||||
}
|
|
||||||
// this initializes the polling header values, we do this during creation in case the
|
|
||||||
// initial response send us invalid values; this way the API call will return a non-nil
|
|
||||||
// error (not doing this means the error shows up in Future.Done)
|
|
||||||
return pt, pt.updatePollingMethod()
|
|
||||||
}
|
|
||||||
|
|
||||||
// gets the polling URL from the Azure-AsyncOperation header.
|
|
||||||
// ensures the URL is well-formed and absolute.
|
|
||||||
func getURLFromAsyncOpHeader(resp *http.Response) (string, error) {
|
|
||||||
s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation))
|
|
||||||
if s == "" {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
if !isValidURL(s) {
|
|
||||||
return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s)
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// gets the polling URL from the Location header.
|
|
||||||
// ensures the URL is well-formed and absolute.
|
|
||||||
func getURLFromLocationHeader(resp *http.Response) (string, error) {
|
|
||||||
s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation))
|
|
||||||
if s == "" {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
if !isValidURL(s) {
|
|
||||||
return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s)
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify that the URL is valid and absolute
|
|
||||||
func isValidURL(s string) bool {
|
|
||||||
u, err := url.Parse(s)
|
|
||||||
return err == nil && u.IsAbs()
|
|
||||||
}
|
|
||||||
|
|
||||||
// PollingMethodType defines a type used for enumerating polling mechanisms.
|
|
||||||
type PollingMethodType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header.
|
|
||||||
PollingAsyncOperation PollingMethodType = "AsyncOperation"
|
|
||||||
|
|
||||||
// PollingLocation indicates the polling method uses the Location header.
|
|
||||||
PollingLocation PollingMethodType = "Location"
|
|
||||||
|
|
||||||
// PollingRequestURI indicates the polling method uses the original request URI.
|
|
||||||
PollingRequestURI PollingMethodType = "RequestURI"
|
|
||||||
|
|
||||||
// PollingUnknown indicates an unknown polling method and is the default value.
|
|
||||||
PollingUnknown PollingMethodType = ""
|
|
||||||
)
|
|
||||||
|
|
||||||
// AsyncOpIncompleteError is the type that's returned from a future that has not completed.
|
|
||||||
type AsyncOpIncompleteError struct {
|
|
||||||
// FutureType is the name of the type composed of a azure.Future.
|
|
||||||
FutureType string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns an error message including the originating type name of the error.
|
|
||||||
func (e AsyncOpIncompleteError) Error() string {
|
|
||||||
return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters.
|
|
||||||
func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError {
|
|
||||||
return AsyncOpIncompleteError{
|
|
||||||
FutureType: futureType,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
388
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
388
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
@@ -1,388 +0,0 @@
|
|||||||
// Package azure provides Azure-specific implementations used with AutoRest.
|
|
||||||
// See the included examples for more detail.
|
|
||||||
package azure
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// HeaderClientID is the Azure extension header to set a user-specified request ID.
|
|
||||||
HeaderClientID = "x-ms-client-request-id"
|
|
||||||
|
|
||||||
// HeaderReturnClientID is the Azure extension header to set if the user-specified request ID
|
|
||||||
// should be included in the response.
|
|
||||||
HeaderReturnClientID = "x-ms-return-client-request-id"
|
|
||||||
|
|
||||||
// HeaderContentType is the type of the content in the HTTP response.
|
|
||||||
HeaderContentType = "Content-Type"
|
|
||||||
|
|
||||||
// HeaderRequestID is the Azure extension header of the service generated request ID returned
|
|
||||||
// in the response.
|
|
||||||
HeaderRequestID = "x-ms-request-id"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServiceError encapsulates the error response from an Azure service.
|
|
||||||
// It adhears to the OData v4 specification for error responses.
|
|
||||||
type ServiceError struct {
|
|
||||||
Code string `json:"code"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
Target *string `json:"target"`
|
|
||||||
Details []map[string]interface{} `json:"details"`
|
|
||||||
InnerError map[string]interface{} `json:"innererror"`
|
|
||||||
AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (se ServiceError) Error() string {
|
|
||||||
result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message)
|
|
||||||
|
|
||||||
if se.Target != nil {
|
|
||||||
result += fmt.Sprintf(" Target=%q", *se.Target)
|
|
||||||
}
|
|
||||||
|
|
||||||
if se.Details != nil {
|
|
||||||
d, err := json.Marshal(se.Details)
|
|
||||||
if err != nil {
|
|
||||||
result += fmt.Sprintf(" Details=%v", se.Details)
|
|
||||||
}
|
|
||||||
result += fmt.Sprintf(" Details=%s", d)
|
|
||||||
}
|
|
||||||
|
|
||||||
if se.InnerError != nil {
|
|
||||||
d, err := json.Marshal(se.InnerError)
|
|
||||||
if err != nil {
|
|
||||||
result += fmt.Sprintf(" InnerError=%v", se.InnerError)
|
|
||||||
}
|
|
||||||
result += fmt.Sprintf(" InnerError=%s", d)
|
|
||||||
}
|
|
||||||
|
|
||||||
if se.AdditionalInfo != nil {
|
|
||||||
d, err := json.Marshal(se.AdditionalInfo)
|
|
||||||
if err != nil {
|
|
||||||
result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo)
|
|
||||||
}
|
|
||||||
result += fmt.Sprintf(" AdditionalInfo=%s", d)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type.
|
|
||||||
func (se *ServiceError) UnmarshalJSON(b []byte) error {
|
|
||||||
// http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
|
|
||||||
|
|
||||||
type serviceErrorInternal struct {
|
|
||||||
Code string `json:"code"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
Target *string `json:"target,omitempty"`
|
|
||||||
AdditionalInfo []map[string]interface{} `json:"additionalInfo,omitempty"`
|
|
||||||
// not all services conform to the OData v4 spec.
|
|
||||||
// the following fields are where we've seen discrepancies
|
|
||||||
|
|
||||||
// spec calls for []map[string]interface{} but have seen map[string]interface{}
|
|
||||||
Details interface{} `json:"details,omitempty"`
|
|
||||||
|
|
||||||
// spec calls for map[string]interface{} but have seen []map[string]interface{} and string
|
|
||||||
InnerError interface{} `json:"innererror,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
sei := serviceErrorInternal{}
|
|
||||||
if err := json.Unmarshal(b, &sei); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy the fields we know to be correct
|
|
||||||
se.AdditionalInfo = sei.AdditionalInfo
|
|
||||||
se.Code = sei.Code
|
|
||||||
se.Message = sei.Message
|
|
||||||
se.Target = sei.Target
|
|
||||||
|
|
||||||
// converts an []interface{} to []map[string]interface{}
|
|
||||||
arrayOfObjs := func(v interface{}) ([]map[string]interface{}, bool) {
|
|
||||||
arrayOf, ok := v.([]interface{})
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
final := []map[string]interface{}{}
|
|
||||||
for _, item := range arrayOf {
|
|
||||||
as, ok := item.(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
final = append(final, as)
|
|
||||||
}
|
|
||||||
return final, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// convert the remaining fields, falling back to raw JSON if necessary
|
|
||||||
|
|
||||||
if c, ok := arrayOfObjs(sei.Details); ok {
|
|
||||||
se.Details = c
|
|
||||||
} else if c, ok := sei.Details.(map[string]interface{}); ok {
|
|
||||||
se.Details = []map[string]interface{}{c}
|
|
||||||
} else if sei.Details != nil {
|
|
||||||
// stuff into Details
|
|
||||||
se.Details = []map[string]interface{}{
|
|
||||||
{"raw": sei.Details},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c, ok := sei.InnerError.(map[string]interface{}); ok {
|
|
||||||
se.InnerError = c
|
|
||||||
} else if c, ok := arrayOfObjs(sei.InnerError); ok {
|
|
||||||
// if there's only one error extract it
|
|
||||||
if len(c) == 1 {
|
|
||||||
se.InnerError = c[0]
|
|
||||||
} else {
|
|
||||||
// multiple errors, stuff them into the value
|
|
||||||
se.InnerError = map[string]interface{}{
|
|
||||||
"multi": c,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if c, ok := sei.InnerError.(string); ok {
|
|
||||||
se.InnerError = map[string]interface{}{"error": c}
|
|
||||||
} else if sei.InnerError != nil {
|
|
||||||
// stuff into InnerError
|
|
||||||
se.InnerError = map[string]interface{}{
|
|
||||||
"raw": sei.InnerError,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestError describes an error response returned by Azure service.
|
|
||||||
type RequestError struct {
|
|
||||||
autorest.DetailedError
|
|
||||||
|
|
||||||
// The error returned by the Azure service.
|
|
||||||
ServiceError *ServiceError `json:"error" xml:"Error"`
|
|
||||||
|
|
||||||
// The request id (from the x-ms-request-id-header) of the request.
|
|
||||||
RequestID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a human-friendly error message from service error.
|
|
||||||
func (e RequestError) Error() string {
|
|
||||||
return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v",
|
|
||||||
e.StatusCode, e.ServiceError)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAzureError returns true if the passed error is an Azure Service error; false otherwise.
|
|
||||||
func IsAzureError(e error) bool {
|
|
||||||
_, ok := e.(*RequestError)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resource contains details about an Azure resource.
|
|
||||||
type Resource struct {
|
|
||||||
SubscriptionID string
|
|
||||||
ResourceGroup string
|
|
||||||
Provider string
|
|
||||||
ResourceType string
|
|
||||||
ResourceName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// String function returns a string in form of azureResourceID
|
|
||||||
func (r Resource) String() string {
|
|
||||||
return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseResourceID parses a resource ID into a ResourceDetails struct.
|
|
||||||
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/template-functions-resource?tabs=json#resourceid.
|
|
||||||
func ParseResourceID(resourceID string) (Resource, error) {
|
|
||||||
|
|
||||||
const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)`
|
|
||||||
resourceIDPattern := regexp.MustCompile(resourceIDPatternText)
|
|
||||||
match := resourceIDPattern.FindStringSubmatch(resourceID)
|
|
||||||
|
|
||||||
if len(match) == 0 {
|
|
||||||
return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID)
|
|
||||||
}
|
|
||||||
|
|
||||||
v := strings.Split(match[5], "/")
|
|
||||||
resourceName := v[len(v)-1]
|
|
||||||
|
|
||||||
result := Resource{
|
|
||||||
SubscriptionID: match[1],
|
|
||||||
ResourceGroup: match[2],
|
|
||||||
Provider: match[3],
|
|
||||||
ResourceType: match[4],
|
|
||||||
ResourceName: resourceName,
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewErrorWithError creates a new Error conforming object from the
|
|
||||||
// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
|
|
||||||
// if resp is nil), message, and original error. message is treated as a format
|
|
||||||
// string to which the optional args apply.
|
|
||||||
func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError {
|
|
||||||
if v, ok := original.(*RequestError); ok {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
|
|
||||||
statusCode := autorest.UndefinedStatusCode
|
|
||||||
if resp != nil {
|
|
||||||
statusCode = resp.StatusCode
|
|
||||||
}
|
|
||||||
return RequestError{
|
|
||||||
DetailedError: autorest.DetailedError{
|
|
||||||
Original: original,
|
|
||||||
PackageType: packageType,
|
|
||||||
Method: method,
|
|
||||||
StatusCode: statusCode,
|
|
||||||
Message: fmt.Sprintf(message, args...),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of
|
|
||||||
// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g.,
|
|
||||||
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id
|
|
||||||
// header to true such that UUID accompanies the http.Response.
|
|
||||||
func WithReturningClientID(uuid string) autorest.PrepareDecorator {
|
|
||||||
preparer := autorest.CreatePreparer(
|
|
||||||
WithClientID(uuid),
|
|
||||||
WithReturnClientID(true))
|
|
||||||
|
|
||||||
return func(p autorest.Preparer) autorest.Preparer {
|
|
||||||
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
return preparer.Prepare(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithClientID returns a PrepareDecorator that adds an HTTP extension header of
|
|
||||||
// x-ms-client-request-id whose value is passed, undecorated UUID (e.g.,
|
|
||||||
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA").
|
|
||||||
func WithClientID(uuid string) autorest.PrepareDecorator {
|
|
||||||
return autorest.WithHeader(HeaderClientID, uuid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of
|
|
||||||
// x-ms-return-client-request-id whose boolean value indicates if the value of the
|
|
||||||
// x-ms-client-request-id header should be included in the http.Response.
|
|
||||||
func WithReturnClientID(b bool) autorest.PrepareDecorator {
|
|
||||||
return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the
|
|
||||||
// http.Request sent to the service (and returned in the http.Response)
|
|
||||||
func ExtractClientID(resp *http.Response) string {
|
|
||||||
return autorest.ExtractHeaderValue(HeaderClientID, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractRequestID extracts the Azure server generated request identifier from the
|
|
||||||
// x-ms-request-id header.
|
|
||||||
func ExtractRequestID(resp *http.Response) string {
|
|
||||||
return autorest.ExtractHeaderValue(HeaderRequestID, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithErrorUnlessStatusCode returns a RespondDecorator that emits an
|
|
||||||
// azure.RequestError by reading the response body unless the response HTTP status code
|
|
||||||
// is among the set passed.
|
|
||||||
//
|
|
||||||
// If there is a chance service may return responses other than the Azure error
|
|
||||||
// format and the response cannot be parsed into an error, a decoding error will
|
|
||||||
// be returned containing the response body. In any case, the Responder will
|
|
||||||
// return an error if the status code is not satisfied.
|
|
||||||
//
|
|
||||||
// If this Responder returns an error, the response body will be replaced with
|
|
||||||
// an in-memory reader, which needs no further closing.
|
|
||||||
func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
|
|
||||||
return func(r autorest.Responder) autorest.Responder {
|
|
||||||
return autorest.ResponderFunc(func(resp *http.Response) error {
|
|
||||||
err := r.Respond(resp)
|
|
||||||
if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) {
|
|
||||||
var e RequestError
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
encodedAs := autorest.EncodedAsJSON
|
|
||||||
if strings.Contains(resp.Header.Get("Content-Type"), "xml") {
|
|
||||||
encodedAs = autorest.EncodedAsXML
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy and replace the Body in case it does not contain an error object.
|
|
||||||
// This will leave the Body available to the caller.
|
|
||||||
b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e)
|
|
||||||
resp.Body = ioutil.NopCloser(&b)
|
|
||||||
if decodeErr != nil {
|
|
||||||
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, decodeErr)
|
|
||||||
}
|
|
||||||
if e.ServiceError == nil {
|
|
||||||
// Check if error is unwrapped ServiceError
|
|
||||||
decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
|
|
||||||
if err := decoder.Decode(&e.ServiceError); err != nil {
|
|
||||||
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// for example, should the API return the literal value `null` as the response
|
|
||||||
if e.ServiceError == nil {
|
|
||||||
e.ServiceError = &ServiceError{
|
|
||||||
Code: "Unknown",
|
|
||||||
Message: "Unknown service error",
|
|
||||||
Details: []map[string]interface{}{
|
|
||||||
{
|
|
||||||
"HttpResponse.Body": b.String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.ServiceError != nil && e.ServiceError.Message == "" {
|
|
||||||
// if we're here it means the returned error wasn't OData v4 compliant.
|
|
||||||
// try to unmarshal the body in hopes of getting something.
|
|
||||||
rawBody := map[string]interface{}{}
|
|
||||||
decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
|
|
||||||
if err := decoder.Decode(&rawBody); err != nil {
|
|
||||||
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.ServiceError = &ServiceError{
|
|
||||||
Code: "Unknown",
|
|
||||||
Message: "Unknown service error",
|
|
||||||
}
|
|
||||||
if len(rawBody) > 0 {
|
|
||||||
e.ServiceError.Details = []map[string]interface{}{rawBody}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
e.Response = resp
|
|
||||||
e.RequestID = ExtractRequestID(resp)
|
|
||||||
if e.StatusCode == nil {
|
|
||||||
e.StatusCode = resp.StatusCode
|
|
||||||
}
|
|
||||||
err = &e
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
331
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
331
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
@@ -1,331 +0,0 @@
|
|||||||
package azure
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// EnvironmentFilepathName captures the name of the environment variable containing the path to the file
|
|
||||||
// to be used while populating the Azure Environment.
|
|
||||||
EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
|
|
||||||
|
|
||||||
// NotAvailable is used for endpoints and resource IDs that are not available for a given cloud.
|
|
||||||
NotAvailable = "N/A"
|
|
||||||
)
|
|
||||||
|
|
||||||
var environments = map[string]Environment{
|
|
||||||
"AZURECHINACLOUD": ChinaCloud,
|
|
||||||
"AZUREGERMANCLOUD": GermanCloud,
|
|
||||||
"AZURECLOUD": PublicCloud,
|
|
||||||
"AZUREPUBLICCLOUD": PublicCloud,
|
|
||||||
"AZUREUSGOVERNMENT": USGovernmentCloud,
|
|
||||||
"AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, //TODO: deprecate
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResourceIdentifier contains a set of Azure resource IDs.
|
|
||||||
type ResourceIdentifier struct {
|
|
||||||
Graph string `json:"graph"`
|
|
||||||
KeyVault string `json:"keyVault"`
|
|
||||||
Datalake string `json:"datalake"`
|
|
||||||
Batch string `json:"batch"`
|
|
||||||
OperationalInsights string `json:"operationalInsights"`
|
|
||||||
OSSRDBMS string `json:"ossRDBMS"`
|
|
||||||
Storage string `json:"storage"`
|
|
||||||
Synapse string `json:"synapse"`
|
|
||||||
ServiceBus string `json:"serviceBus"`
|
|
||||||
SQLDatabase string `json:"sqlDatabase"`
|
|
||||||
CosmosDB string `json:"cosmosDB"`
|
|
||||||
ManagedHSM string `json:"managedHSM"`
|
|
||||||
MicrosoftGraph string `json:"microsoftGraph"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Environment represents a set of endpoints for each of Azure's Clouds.
|
|
||||||
type Environment struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
ManagementPortalURL string `json:"managementPortalURL"`
|
|
||||||
PublishSettingsURL string `json:"publishSettingsURL"`
|
|
||||||
ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
|
|
||||||
ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
|
|
||||||
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
|
|
||||||
GalleryEndpoint string `json:"galleryEndpoint"`
|
|
||||||
KeyVaultEndpoint string `json:"keyVaultEndpoint"`
|
|
||||||
ManagedHSMEndpoint string `json:"managedHSMEndpoint"`
|
|
||||||
GraphEndpoint string `json:"graphEndpoint"`
|
|
||||||
ServiceBusEndpoint string `json:"serviceBusEndpoint"`
|
|
||||||
BatchManagementEndpoint string `json:"batchManagementEndpoint"`
|
|
||||||
MicrosoftGraphEndpoint string `json:"microsoftGraphEndpoint"`
|
|
||||||
StorageEndpointSuffix string `json:"storageEndpointSuffix"`
|
|
||||||
CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
|
|
||||||
MariaDBDNSSuffix string `json:"mariaDBDNSSuffix"`
|
|
||||||
MySQLDatabaseDNSSuffix string `json:"mySqlDatabaseDNSSuffix"`
|
|
||||||
PostgresqlDatabaseDNSSuffix string `json:"postgresqlDatabaseDNSSuffix"`
|
|
||||||
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
|
|
||||||
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
|
|
||||||
KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
|
|
||||||
ManagedHSMDNSSuffix string `json:"managedHSMDNSSuffix"`
|
|
||||||
ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
|
|
||||||
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
|
|
||||||
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
|
|
||||||
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
|
|
||||||
TokenAudience string `json:"tokenAudience"`
|
|
||||||
APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"`
|
|
||||||
SynapseEndpointSuffix string `json:"synapseEndpointSuffix"`
|
|
||||||
DatalakeSuffix string `json:"datalakeSuffix"`
|
|
||||||
ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// PublicCloud is the default public Azure cloud environment
|
|
||||||
PublicCloud = Environment{
|
|
||||||
Name: "AzurePublicCloud",
|
|
||||||
ManagementPortalURL: "https://manage.windowsazure.com/",
|
|
||||||
PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index",
|
|
||||||
ServiceManagementEndpoint: "https://management.core.windows.net/",
|
|
||||||
ResourceManagerEndpoint: "https://management.azure.com/",
|
|
||||||
ActiveDirectoryEndpoint: "https://login.microsoftonline.com/",
|
|
||||||
GalleryEndpoint: "https://gallery.azure.com/",
|
|
||||||
KeyVaultEndpoint: "https://vault.azure.net/",
|
|
||||||
ManagedHSMEndpoint: "https://managedhsm.azure.net/",
|
|
||||||
GraphEndpoint: "https://graph.windows.net/",
|
|
||||||
ServiceBusEndpoint: "https://servicebus.windows.net/",
|
|
||||||
BatchManagementEndpoint: "https://batch.core.windows.net/",
|
|
||||||
MicrosoftGraphEndpoint: "https://graph.microsoft.com/",
|
|
||||||
StorageEndpointSuffix: "core.windows.net",
|
|
||||||
CosmosDBDNSSuffix: "documents.azure.com",
|
|
||||||
MariaDBDNSSuffix: "mariadb.database.azure.com",
|
|
||||||
MySQLDatabaseDNSSuffix: "mysql.database.azure.com",
|
|
||||||
PostgresqlDatabaseDNSSuffix: "postgres.database.azure.com",
|
|
||||||
SQLDatabaseDNSSuffix: "database.windows.net",
|
|
||||||
TrafficManagerDNSSuffix: "trafficmanager.net",
|
|
||||||
KeyVaultDNSSuffix: "vault.azure.net",
|
|
||||||
ManagedHSMDNSSuffix: "managedhsm.azure.net",
|
|
||||||
ServiceBusEndpointSuffix: "servicebus.windows.net",
|
|
||||||
ServiceManagementVMDNSSuffix: "cloudapp.net",
|
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
|
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
|
||||||
TokenAudience: "https://management.azure.com/",
|
|
||||||
APIManagementHostNameSuffix: "azure-api.net",
|
|
||||||
SynapseEndpointSuffix: "dev.azuresynapse.net",
|
|
||||||
DatalakeSuffix: "azuredatalakestore.net",
|
|
||||||
ResourceIdentifiers: ResourceIdentifier{
|
|
||||||
Graph: "https://graph.windows.net/",
|
|
||||||
KeyVault: "https://vault.azure.net",
|
|
||||||
Datalake: "https://datalake.azure.net/",
|
|
||||||
Batch: "https://batch.core.windows.net/",
|
|
||||||
OperationalInsights: "https://api.loganalytics.io",
|
|
||||||
OSSRDBMS: "https://ossrdbms-aad.database.windows.net",
|
|
||||||
Storage: "https://storage.azure.com/",
|
|
||||||
Synapse: "https://dev.azuresynapse.net",
|
|
||||||
ServiceBus: "https://servicebus.azure.net/",
|
|
||||||
SQLDatabase: "https://database.windows.net/",
|
|
||||||
CosmosDB: "https://cosmos.azure.com",
|
|
||||||
ManagedHSM: "https://managedhsm.azure.net",
|
|
||||||
MicrosoftGraph: "https://graph.microsoft.com/",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// USGovernmentCloud is the cloud environment for the US Government
|
|
||||||
USGovernmentCloud = Environment{
|
|
||||||
Name: "AzureUSGovernmentCloud",
|
|
||||||
ManagementPortalURL: "https://manage.windowsazure.us/",
|
|
||||||
PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index",
|
|
||||||
ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/",
|
|
||||||
ResourceManagerEndpoint: "https://management.usgovcloudapi.net/",
|
|
||||||
ActiveDirectoryEndpoint: "https://login.microsoftonline.us/",
|
|
||||||
GalleryEndpoint: "https://gallery.usgovcloudapi.net/",
|
|
||||||
KeyVaultEndpoint: "https://vault.usgovcloudapi.net/",
|
|
||||||
ManagedHSMEndpoint: NotAvailable,
|
|
||||||
GraphEndpoint: "https://graph.windows.net/",
|
|
||||||
ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/",
|
|
||||||
BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/",
|
|
||||||
MicrosoftGraphEndpoint: "https://graph.microsoft.us/",
|
|
||||||
StorageEndpointSuffix: "core.usgovcloudapi.net",
|
|
||||||
CosmosDBDNSSuffix: "documents.azure.us",
|
|
||||||
MariaDBDNSSuffix: "mariadb.database.usgovcloudapi.net",
|
|
||||||
MySQLDatabaseDNSSuffix: "mysql.database.usgovcloudapi.net",
|
|
||||||
PostgresqlDatabaseDNSSuffix: "postgres.database.usgovcloudapi.net",
|
|
||||||
SQLDatabaseDNSSuffix: "database.usgovcloudapi.net",
|
|
||||||
TrafficManagerDNSSuffix: "usgovtrafficmanager.net",
|
|
||||||
KeyVaultDNSSuffix: "vault.usgovcloudapi.net",
|
|
||||||
ManagedHSMDNSSuffix: NotAvailable,
|
|
||||||
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
|
|
||||||
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
|
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net",
|
|
||||||
ContainerRegistryDNSSuffix: "azurecr.us",
|
|
||||||
TokenAudience: "https://management.usgovcloudapi.net/",
|
|
||||||
APIManagementHostNameSuffix: "azure-api.us",
|
|
||||||
SynapseEndpointSuffix: "dev.azuresynapse.usgovcloudapi.net",
|
|
||||||
DatalakeSuffix: NotAvailable,
|
|
||||||
ResourceIdentifiers: ResourceIdentifier{
|
|
||||||
Graph: "https://graph.windows.net/",
|
|
||||||
KeyVault: "https://vault.usgovcloudapi.net",
|
|
||||||
Datalake: NotAvailable,
|
|
||||||
Batch: "https://batch.core.usgovcloudapi.net/",
|
|
||||||
OperationalInsights: "https://api.loganalytics.us",
|
|
||||||
OSSRDBMS: "https://ossrdbms-aad.database.usgovcloudapi.net",
|
|
||||||
Storage: "https://storage.azure.com/",
|
|
||||||
Synapse: "https://dev.azuresynapse.usgovcloudapi.net",
|
|
||||||
ServiceBus: "https://servicebus.azure.net/",
|
|
||||||
SQLDatabase: "https://database.usgovcloudapi.net/",
|
|
||||||
CosmosDB: "https://cosmos.azure.com",
|
|
||||||
ManagedHSM: NotAvailable,
|
|
||||||
MicrosoftGraph: "https://graph.microsoft.us/",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChinaCloud is the cloud environment operated in China
|
|
||||||
ChinaCloud = Environment{
|
|
||||||
Name: "AzureChinaCloud",
|
|
||||||
ManagementPortalURL: "https://manage.chinacloudapi.com/",
|
|
||||||
PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index",
|
|
||||||
ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/",
|
|
||||||
ResourceManagerEndpoint: "https://management.chinacloudapi.cn/",
|
|
||||||
ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/",
|
|
||||||
GalleryEndpoint: "https://gallery.chinacloudapi.cn/",
|
|
||||||
KeyVaultEndpoint: "https://vault.azure.cn/",
|
|
||||||
ManagedHSMEndpoint: NotAvailable,
|
|
||||||
GraphEndpoint: "https://graph.chinacloudapi.cn/",
|
|
||||||
ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/",
|
|
||||||
BatchManagementEndpoint: "https://batch.chinacloudapi.cn/",
|
|
||||||
MicrosoftGraphEndpoint: "https://microsoftgraph.chinacloudapi.cn/",
|
|
||||||
StorageEndpointSuffix: "core.chinacloudapi.cn",
|
|
||||||
CosmosDBDNSSuffix: "documents.azure.cn",
|
|
||||||
MariaDBDNSSuffix: "mariadb.database.chinacloudapi.cn",
|
|
||||||
MySQLDatabaseDNSSuffix: "mysql.database.chinacloudapi.cn",
|
|
||||||
PostgresqlDatabaseDNSSuffix: "postgres.database.chinacloudapi.cn",
|
|
||||||
SQLDatabaseDNSSuffix: "database.chinacloudapi.cn",
|
|
||||||
TrafficManagerDNSSuffix: "trafficmanager.cn",
|
|
||||||
KeyVaultDNSSuffix: "vault.azure.cn",
|
|
||||||
ManagedHSMDNSSuffix: NotAvailable,
|
|
||||||
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn",
|
|
||||||
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
|
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn",
|
|
||||||
ContainerRegistryDNSSuffix: "azurecr.cn",
|
|
||||||
TokenAudience: "https://management.chinacloudapi.cn/",
|
|
||||||
APIManagementHostNameSuffix: "azure-api.cn",
|
|
||||||
SynapseEndpointSuffix: "dev.azuresynapse.azure.cn",
|
|
||||||
DatalakeSuffix: NotAvailable,
|
|
||||||
ResourceIdentifiers: ResourceIdentifier{
|
|
||||||
Graph: "https://graph.chinacloudapi.cn/",
|
|
||||||
KeyVault: "https://vault.azure.cn",
|
|
||||||
Datalake: NotAvailable,
|
|
||||||
Batch: "https://batch.chinacloudapi.cn/",
|
|
||||||
OperationalInsights: NotAvailable,
|
|
||||||
OSSRDBMS: "https://ossrdbms-aad.database.chinacloudapi.cn",
|
|
||||||
Storage: "https://storage.azure.com/",
|
|
||||||
Synapse: "https://dev.azuresynapse.net",
|
|
||||||
ServiceBus: "https://servicebus.azure.net/",
|
|
||||||
SQLDatabase: "https://database.chinacloudapi.cn/",
|
|
||||||
CosmosDB: "https://cosmos.azure.com",
|
|
||||||
ManagedHSM: NotAvailable,
|
|
||||||
MicrosoftGraph: "https://microsoftgraph.chinacloudapi.cn",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// GermanCloud is the cloud environment operated in Germany
|
|
||||||
GermanCloud = Environment{
|
|
||||||
Name: "AzureGermanCloud",
|
|
||||||
ManagementPortalURL: "http://portal.microsoftazure.de/",
|
|
||||||
PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index",
|
|
||||||
ServiceManagementEndpoint: "https://management.core.cloudapi.de/",
|
|
||||||
ResourceManagerEndpoint: "https://management.microsoftazure.de/",
|
|
||||||
ActiveDirectoryEndpoint: "https://login.microsoftonline.de/",
|
|
||||||
GalleryEndpoint: "https://gallery.cloudapi.de/",
|
|
||||||
KeyVaultEndpoint: "https://vault.microsoftazure.de/",
|
|
||||||
ManagedHSMEndpoint: NotAvailable,
|
|
||||||
GraphEndpoint: "https://graph.cloudapi.de/",
|
|
||||||
ServiceBusEndpoint: "https://servicebus.cloudapi.de/",
|
|
||||||
BatchManagementEndpoint: "https://batch.cloudapi.de/",
|
|
||||||
MicrosoftGraphEndpoint: NotAvailable,
|
|
||||||
StorageEndpointSuffix: "core.cloudapi.de",
|
|
||||||
CosmosDBDNSSuffix: "documents.microsoftazure.de",
|
|
||||||
MariaDBDNSSuffix: "mariadb.database.cloudapi.de",
|
|
||||||
MySQLDatabaseDNSSuffix: "mysql.database.cloudapi.de",
|
|
||||||
PostgresqlDatabaseDNSSuffix: "postgres.database.cloudapi.de",
|
|
||||||
SQLDatabaseDNSSuffix: "database.cloudapi.de",
|
|
||||||
TrafficManagerDNSSuffix: "azuretrafficmanager.de",
|
|
||||||
KeyVaultDNSSuffix: "vault.microsoftazure.de",
|
|
||||||
ManagedHSMDNSSuffix: NotAvailable,
|
|
||||||
ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
|
|
||||||
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
|
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
|
|
||||||
ContainerRegistryDNSSuffix: NotAvailable,
|
|
||||||
TokenAudience: "https://management.microsoftazure.de/",
|
|
||||||
APIManagementHostNameSuffix: NotAvailable,
|
|
||||||
SynapseEndpointSuffix: NotAvailable,
|
|
||||||
DatalakeSuffix: NotAvailable,
|
|
||||||
ResourceIdentifiers: ResourceIdentifier{
|
|
||||||
Graph: "https://graph.cloudapi.de/",
|
|
||||||
KeyVault: "https://vault.microsoftazure.de",
|
|
||||||
Datalake: NotAvailable,
|
|
||||||
Batch: "https://batch.cloudapi.de/",
|
|
||||||
OperationalInsights: NotAvailable,
|
|
||||||
OSSRDBMS: "https://ossrdbms-aad.database.cloudapi.de",
|
|
||||||
Storage: "https://storage.azure.com/",
|
|
||||||
Synapse: NotAvailable,
|
|
||||||
ServiceBus: "https://servicebus.azure.net/",
|
|
||||||
SQLDatabase: "https://database.cloudapi.de/",
|
|
||||||
CosmosDB: "https://cosmos.azure.com",
|
|
||||||
ManagedHSM: NotAvailable,
|
|
||||||
MicrosoftGraph: NotAvailable,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// EnvironmentFromName returns an Environment based on the common name specified.
|
|
||||||
func EnvironmentFromName(name string) (Environment, error) {
|
|
||||||
// IMPORTANT
|
|
||||||
// As per @radhikagupta5:
|
|
||||||
// This is technical debt, fundamentally here because Kubernetes is not currently accepting
|
|
||||||
// contributions to the providers. Once that is an option, the provider should be updated to
|
|
||||||
// directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation
|
|
||||||
// from this method based on the name that is provided to us.
|
|
||||||
if strings.EqualFold(name, "AZURESTACKCLOUD") {
|
|
||||||
return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName))
|
|
||||||
}
|
|
||||||
|
|
||||||
name = strings.ToUpper(name)
|
|
||||||
env, ok := environments[name]
|
|
||||||
if !ok {
|
|
||||||
return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return env, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnvironmentFromFile loads an Environment from a configuration file available on disk.
|
|
||||||
// This function is particularly useful in the Hybrid Cloud model, where one must define their own
|
|
||||||
// endpoints.
|
|
||||||
func EnvironmentFromFile(location string) (unmarshaled Environment, err error) {
|
|
||||||
fileContents, err := ioutil.ReadFile(location)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = json.Unmarshal(fileContents, &unmarshaled)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetEnvironment updates the environment map with the specified values.
|
|
||||||
func SetEnvironment(name string, env Environment) {
|
|
||||||
environments[strings.ToUpper(name)] = env
|
|
||||||
}
|
|
||||||
245
vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go
generated
vendored
245
vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go
generated
vendored
@@ -1,245 +0,0 @@
|
|||||||
package azure
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
type audience []string
|
|
||||||
|
|
||||||
type authentication struct {
|
|
||||||
LoginEndpoint string `json:"loginEndpoint"`
|
|
||||||
Audiences audience `json:"audiences"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type environmentMetadataInfo struct {
|
|
||||||
GalleryEndpoint string `json:"galleryEndpoint"`
|
|
||||||
GraphEndpoint string `json:"graphEndpoint"`
|
|
||||||
PortalEndpoint string `json:"portalEndpoint"`
|
|
||||||
Authentication authentication `json:"authentication"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnvironmentProperty represent property names that clients can override
|
|
||||||
type EnvironmentProperty string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// EnvironmentName ...
|
|
||||||
EnvironmentName EnvironmentProperty = "name"
|
|
||||||
// EnvironmentManagementPortalURL ..
|
|
||||||
EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL"
|
|
||||||
// EnvironmentPublishSettingsURL ...
|
|
||||||
EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL"
|
|
||||||
// EnvironmentServiceManagementEndpoint ...
|
|
||||||
EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint"
|
|
||||||
// EnvironmentResourceManagerEndpoint ...
|
|
||||||
EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint"
|
|
||||||
// EnvironmentActiveDirectoryEndpoint ...
|
|
||||||
EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint"
|
|
||||||
// EnvironmentGalleryEndpoint ...
|
|
||||||
EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint"
|
|
||||||
// EnvironmentKeyVaultEndpoint ...
|
|
||||||
EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint"
|
|
||||||
// EnvironmentGraphEndpoint ...
|
|
||||||
EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint"
|
|
||||||
// EnvironmentServiceBusEndpoint ...
|
|
||||||
EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint"
|
|
||||||
// EnvironmentBatchManagementEndpoint ...
|
|
||||||
EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint"
|
|
||||||
// EnvironmentStorageEndpointSuffix ...
|
|
||||||
EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix"
|
|
||||||
// EnvironmentSQLDatabaseDNSSuffix ...
|
|
||||||
EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix"
|
|
||||||
// EnvironmentTrafficManagerDNSSuffix ...
|
|
||||||
EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix"
|
|
||||||
// EnvironmentKeyVaultDNSSuffix ...
|
|
||||||
EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix"
|
|
||||||
// EnvironmentServiceBusEndpointSuffix ...
|
|
||||||
EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix"
|
|
||||||
// EnvironmentServiceManagementVMDNSSuffix ...
|
|
||||||
EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix"
|
|
||||||
// EnvironmentResourceManagerVMDNSSuffix ...
|
|
||||||
EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix"
|
|
||||||
// EnvironmentContainerRegistryDNSSuffix ...
|
|
||||||
EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix"
|
|
||||||
// EnvironmentTokenAudience ...
|
|
||||||
EnvironmentTokenAudience EnvironmentProperty = "tokenAudience"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OverrideProperty represents property name and value that clients can override
|
|
||||||
type OverrideProperty struct {
|
|
||||||
Key EnvironmentProperty
|
|
||||||
Value string
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnvironmentFromURL loads an Environment from a URL
|
|
||||||
// This function is particularly useful in the Hybrid Cloud model, where one may define their own
|
|
||||||
// endpoints.
|
|
||||||
func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) {
|
|
||||||
var metadataEnvProperties environmentMetadataInfo
|
|
||||||
|
|
||||||
if resourceManagerEndpoint == "" {
|
|
||||||
return environment, fmt.Errorf("Metadata resource manager endpoint is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil {
|
|
||||||
return environment, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Give priority to user's override values
|
|
||||||
overrideProperties(&environment, properties)
|
|
||||||
|
|
||||||
if environment.Name == "" {
|
|
||||||
environment.Name = "HybridEnvironment"
|
|
||||||
}
|
|
||||||
stampDNSSuffix := environment.StorageEndpointSuffix
|
|
||||||
if stampDNSSuffix == "" {
|
|
||||||
stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/")
|
|
||||||
environment.StorageEndpointSuffix = stampDNSSuffix
|
|
||||||
}
|
|
||||||
if environment.KeyVaultDNSSuffix == "" {
|
|
||||||
environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix)
|
|
||||||
}
|
|
||||||
if environment.KeyVaultEndpoint == "" {
|
|
||||||
environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix)
|
|
||||||
}
|
|
||||||
if environment.TokenAudience == "" {
|
|
||||||
environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0]
|
|
||||||
}
|
|
||||||
if environment.ActiveDirectoryEndpoint == "" {
|
|
||||||
environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint
|
|
||||||
}
|
|
||||||
if environment.ResourceManagerEndpoint == "" {
|
|
||||||
environment.ResourceManagerEndpoint = resourceManagerEndpoint
|
|
||||||
}
|
|
||||||
if environment.GalleryEndpoint == "" {
|
|
||||||
environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint
|
|
||||||
}
|
|
||||||
if environment.GraphEndpoint == "" {
|
|
||||||
environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
return environment, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func overrideProperties(environment *Environment, properties []OverrideProperty) {
|
|
||||||
for _, property := range properties {
|
|
||||||
switch property.Key {
|
|
||||||
case EnvironmentName:
|
|
||||||
{
|
|
||||||
environment.Name = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentManagementPortalURL:
|
|
||||||
{
|
|
||||||
environment.ManagementPortalURL = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentPublishSettingsURL:
|
|
||||||
{
|
|
||||||
environment.PublishSettingsURL = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentServiceManagementEndpoint:
|
|
||||||
{
|
|
||||||
environment.ServiceManagementEndpoint = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentResourceManagerEndpoint:
|
|
||||||
{
|
|
||||||
environment.ResourceManagerEndpoint = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentActiveDirectoryEndpoint:
|
|
||||||
{
|
|
||||||
environment.ActiveDirectoryEndpoint = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentGalleryEndpoint:
|
|
||||||
{
|
|
||||||
environment.GalleryEndpoint = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentKeyVaultEndpoint:
|
|
||||||
{
|
|
||||||
environment.KeyVaultEndpoint = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentGraphEndpoint:
|
|
||||||
{
|
|
||||||
environment.GraphEndpoint = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentServiceBusEndpoint:
|
|
||||||
{
|
|
||||||
environment.ServiceBusEndpoint = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentBatchManagementEndpoint:
|
|
||||||
{
|
|
||||||
environment.BatchManagementEndpoint = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentStorageEndpointSuffix:
|
|
||||||
{
|
|
||||||
environment.StorageEndpointSuffix = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentSQLDatabaseDNSSuffix:
|
|
||||||
{
|
|
||||||
environment.SQLDatabaseDNSSuffix = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentTrafficManagerDNSSuffix:
|
|
||||||
{
|
|
||||||
environment.TrafficManagerDNSSuffix = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentKeyVaultDNSSuffix:
|
|
||||||
{
|
|
||||||
environment.KeyVaultDNSSuffix = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentServiceBusEndpointSuffix:
|
|
||||||
{
|
|
||||||
environment.ServiceBusEndpointSuffix = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentServiceManagementVMDNSSuffix:
|
|
||||||
{
|
|
||||||
environment.ServiceManagementVMDNSSuffix = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentResourceManagerVMDNSSuffix:
|
|
||||||
{
|
|
||||||
environment.ResourceManagerVMDNSSuffix = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentContainerRegistryDNSSuffix:
|
|
||||||
{
|
|
||||||
environment.ContainerRegistryDNSSuffix = property.Value
|
|
||||||
}
|
|
||||||
case EnvironmentTokenAudience:
|
|
||||||
{
|
|
||||||
environment.TokenAudience = property.Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) {
|
|
||||||
client := autorest.NewClientWithUserAgent("")
|
|
||||||
managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0")
|
|
||||||
req, _ := http.NewRequest("GET", managementEndpoint, nil)
|
|
||||||
response, err := client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return environment, err
|
|
||||||
}
|
|
||||||
defer response.Body.Close()
|
|
||||||
jsonResponse, err := ioutil.ReadAll(response.Body)
|
|
||||||
if err != nil {
|
|
||||||
return environment, err
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(jsonResponse, &environment)
|
|
||||||
return environment, err
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user