mirror of
https://github.com/1Password/onepassword-operator.git
synced 2025-10-22 15:38:06 +00:00
Compare commits
7 Commits
63e3f29be9
...
feature/mi
Author | SHA1 | Date | |
---|---|---|---|
![]() |
209bc7cd17 | ||
![]() |
493b311564 | ||
![]() |
e39cff881d | ||
![]() |
1a085562e4 | ||
![]() |
21111fec90 | ||
![]() |
69cc7cedb0 | ||
![]() |
b30c6130f7 |
4
.dockerignore
Normal file
4
.dockerignore
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
|
||||||
|
# Ignore build and test binaries.
|
||||||
|
bin/
|
||||||
|
testbin/
|
87
.gitignore
vendored
87
.gitignore
vendored
@@ -1,80 +1,25 @@
|
|||||||
# Temporary Build Files
|
|
||||||
build/_output
|
|
||||||
build/_test
|
|
||||||
# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode
|
|
||||||
### Emacs ###
|
|
||||||
# -*- mode: gitignore; -*-
|
|
||||||
*~
|
|
||||||
\#*\#
|
|
||||||
/.emacs.desktop
|
|
||||||
/.emacs.desktop.lock
|
|
||||||
*.elc
|
|
||||||
auto-save-list
|
|
||||||
tramp
|
|
||||||
.\#*
|
|
||||||
# Org-mode
|
|
||||||
.org-id-locations
|
|
||||||
*_archive
|
|
||||||
# flymake-mode
|
|
||||||
*_flymake.*
|
|
||||||
# eshell files
|
|
||||||
/eshell/history
|
|
||||||
/eshell/lastdir
|
|
||||||
# elpa packages
|
|
||||||
/elpa/
|
|
||||||
# reftex files
|
|
||||||
*.rel
|
|
||||||
# AUCTeX auto folder
|
|
||||||
/auto/
|
|
||||||
# cask packages
|
|
||||||
.cask/
|
|
||||||
dist/
|
|
||||||
# Flycheck
|
|
||||||
flycheck_*.el
|
|
||||||
# server auth directory
|
|
||||||
/server/
|
|
||||||
# projectiles files
|
|
||||||
.projectile
|
|
||||||
projectile-bookmarks.eld
|
|
||||||
# directory configuration
|
|
||||||
.dir-locals.el
|
|
||||||
# saveplace
|
|
||||||
places
|
|
||||||
# url cache
|
|
||||||
url/cache/
|
|
||||||
# cedet
|
|
||||||
ede-projects.el
|
|
||||||
# smex
|
|
||||||
smex-items
|
|
||||||
# company-statistics
|
|
||||||
company-statistics-cache.el
|
|
||||||
# anaconda-mode
|
|
||||||
anaconda-mode/
|
|
||||||
### Go ###
|
|
||||||
# Binaries for programs and plugins
|
# Binaries for programs and plugins
|
||||||
*.exe
|
*.exe
|
||||||
*.exe~
|
*.exe~
|
||||||
*.dll
|
*.dll
|
||||||
*.so
|
*.so
|
||||||
*.dylib
|
*.dylib
|
||||||
# Test binary, build with 'go test -c'
|
bin
|
||||||
|
testbin/*
|
||||||
|
|
||||||
|
# Test binary, build with `go test -c`
|
||||||
*.test
|
*.test
|
||||||
|
|
||||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
*.out
|
*.out
|
||||||
### Vim ###
|
|
||||||
# swap
|
# Kubernetes Generated files - skip generated files, except for vendored files
|
||||||
.sw[a-p]
|
|
||||||
.*.sw[a-p]
|
!vendor/**/zz_generated.*
|
||||||
# session
|
|
||||||
Session.vim
|
# editor and IDE paraphernalia
|
||||||
# temporary
|
.idea
|
||||||
.netrwhist
|
*.swp
|
||||||
# auto-generated tag files
|
*.swo
|
||||||
tags
|
*~
|
||||||
### VisualStudioCode ###
|
|
||||||
.vscode/*
|
|
||||||
.history
|
|
||||||
.DS_Store
|
|
||||||
op-ss-client/
|
|
||||||
.idea/
|
|
||||||
# End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode
|
|
||||||
|
24
Dockerfile
24
Dockerfile
@@ -1,31 +1,27 @@
|
|||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM golang:1.13 as builder
|
FROM golang:1.17 as builder
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
# Copy the Go Modules manifests
|
# Copy the Go Modules manifests
|
||||||
COPY go.mod go.mod
|
COPY go.mod go.mod
|
||||||
COPY go.sum go.sum
|
COPY go.sum go.sum
|
||||||
|
# cache deps before building and copying source so that we don't need to re-download as much
|
||||||
|
# and so that source changes don't invalidate our downloaded layer
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
# Copy the go source
|
# Copy the go source
|
||||||
COPY cmd/manager/main.go main.go
|
COPY main.go main.go
|
||||||
COPY pkg/ pkg/
|
COPY api/ api/
|
||||||
COPY version/ version/
|
COPY controllers/ controllers/
|
||||||
COPY vendor/ vendor/
|
|
||||||
# Build
|
# Build
|
||||||
ARG operator_version=dev
|
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go
|
||||||
RUN CGO_ENABLED=0 \
|
|
||||||
GO111MODULE=on \
|
|
||||||
go build \
|
|
||||||
-ldflags "-X \"github.com/1Password/onepassword-operator/version.Version=$operator_version\"" \
|
|
||||||
-mod vendor \
|
|
||||||
-a -o manager main.go
|
|
||||||
|
|
||||||
# Use distroless as minimal base image to package the manager binary
|
# Use distroless as minimal base image to package the manager binary
|
||||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||||
FROM gcr.io/distroless/static:nonroot
|
FROM gcr.io/distroless/static:nonroot
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
COPY --from=builder /workspace/manager .
|
COPY --from=builder /workspace/manager .
|
||||||
USER nonroot:nonroot
|
USER 65532:65532
|
||||||
COPY deploy/connect/ deploy/connect/
|
|
||||||
|
|
||||||
ENTRYPOINT ["/manager"]
|
ENTRYPOINT ["/manager"]
|
||||||
|
295
Makefile
295
Makefile
@@ -1,68 +1,233 @@
|
|||||||
export MAIN_BRANCH ?= main
|
# VERSION defines the project version for the bundle.
|
||||||
|
# Update this value when you upgrade the version of your project.
|
||||||
|
# To re-generate a bundle for another specific version without changing the standard setup, you can:
|
||||||
|
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
|
||||||
|
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
|
||||||
|
VERSION ?= 0.0.1
|
||||||
|
|
||||||
.DEFAULT_GOAL := help
|
# CHANNELS define the bundle channels used in the bundle.
|
||||||
.PHONY: test build build/binary build/local clean test/coverage release/prepare release/tag .check_bump_type .check_git_clean help
|
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
|
||||||
|
# To re-generate a bundle for other specific channels without changing the standard setup, you can:
|
||||||
GIT_BRANCH := $(shell git symbolic-ref --short HEAD)
|
# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable)
|
||||||
WORKTREE_CLEAN := $(shell git status --porcelain 1>/dev/null 2>&1; echo $$?)
|
# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable")
|
||||||
SCRIPTS_DIR := $(CURDIR)/scripts
|
ifneq ($(origin CHANNELS), undefined)
|
||||||
|
BUNDLE_CHANNELS := --channels=$(CHANNELS)
|
||||||
versionFile = $(CURDIR)/.VERSION
|
|
||||||
curVersion := $(shell cat $(versionFile) | sed 's/^v//')
|
|
||||||
|
|
||||||
OPERATOR_NAME := onepassword-connect-operator
|
|
||||||
DOCKER_IMG_TAG ?= $(OPERATOR_NAME):v$(curVersion)
|
|
||||||
|
|
||||||
test: ## Run test suite
|
|
||||||
go test ./...
|
|
||||||
|
|
||||||
test/coverage: ## Run test suite with coverage report
|
|
||||||
go test -v ./... -cover
|
|
||||||
|
|
||||||
build: ## Build operator Docker image
|
|
||||||
@docker build -f Dockerfile --build-arg operator_version=$(curVersion) -t $(DOCKER_IMG_TAG) .
|
|
||||||
@echo "Successfully built and tagged image."
|
|
||||||
@echo "Tag: $(DOCKER_IMG_TAG)"
|
|
||||||
|
|
||||||
build/local: ## Build local version of the operator Docker image
|
|
||||||
@docker build -f Dockerfile -t local/$(DOCKER_IMG_TAG) .
|
|
||||||
|
|
||||||
build/binary: clean ## Build operator binary
|
|
||||||
@mkdir -p dist
|
|
||||||
@go build -mod vendor -a -o manager ./cmd/manager/main.go
|
|
||||||
@mv manager ./dist
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -rf ./dist
|
|
||||||
|
|
||||||
help: ## Prints this help message
|
|
||||||
@grep -E '^[\/a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
|
||||||
|
|
||||||
|
|
||||||
## Release functions =====================
|
|
||||||
|
|
||||||
release/prepare: .check_git_clean ## Updates changelog and creates release branch (call with 'release/prepare version=<new_version_number>')
|
|
||||||
|
|
||||||
@test $(version) || (echo "[ERROR] version argument not set."; exit 1)
|
|
||||||
@git fetch --quiet origin $(MAIN_BRANCH)
|
|
||||||
|
|
||||||
@echo $(version) | tr -d '\n' | tee $(versionFile) &>/dev/null
|
|
||||||
|
|
||||||
@NEW_VERSION=$(version) $(SCRIPTS_DIR)/prepare-release.sh
|
|
||||||
|
|
||||||
release/tag: .check_git_clean ## Creates git tag
|
|
||||||
@git pull --ff-only
|
|
||||||
@echo "Applying tag 'v$(curVersion)' to HEAD..."
|
|
||||||
@git tag --sign "v$(curVersion)" -m "Release v$(curVersion)"
|
|
||||||
@echo "[OK] Success!"
|
|
||||||
@echo "Remember to call 'git push --tags' to persist the tag."
|
|
||||||
|
|
||||||
## Helper functions =====================
|
|
||||||
|
|
||||||
.check_git_clean:
|
|
||||||
ifneq ($(GIT_BRANCH), $(MAIN_BRANCH))
|
|
||||||
@echo "[ERROR] Please checkout default branch '$(MAIN_BRANCH)' and re-run this command."; exit 1;
|
|
||||||
endif
|
endif
|
||||||
ifneq ($(WORKTREE_CLEAN), 0)
|
|
||||||
@echo "[ERROR] Uncommitted changes found in worktree. Address them and try again."; exit 1;
|
# DEFAULT_CHANNEL defines the default channel used in the bundle.
|
||||||
|
# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable")
|
||||||
|
# To re-generate a bundle for any other default channel without changing the default setup, you can:
|
||||||
|
# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable)
|
||||||
|
# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable")
|
||||||
|
ifneq ($(origin DEFAULT_CHANNEL), undefined)
|
||||||
|
BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
|
||||||
endif
|
endif
|
||||||
|
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
|
||||||
|
|
||||||
|
# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images.
|
||||||
|
# This variable is used to construct full image tags for bundle and catalog images.
|
||||||
|
#
|
||||||
|
# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
|
||||||
|
# onepassword.com/onepassword-operator-new-bundle:$VERSION and onepassword.com/onepassword-operator-new-catalog:$VERSION.
|
||||||
|
IMAGE_TAG_BASE ?= onepassword.com/onepassword-operator-new
|
||||||
|
|
||||||
|
# BUNDLE_IMG defines the image:tag used for the bundle.
|
||||||
|
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
|
||||||
|
BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)
|
||||||
|
|
||||||
|
# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command
|
||||||
|
BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
|
||||||
|
|
||||||
|
# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests
|
||||||
|
# You can enable this value if you would like to use SHA Based Digests
|
||||||
|
# To enable set flag to true
|
||||||
|
USE_IMAGE_DIGESTS ?= false
|
||||||
|
ifeq ($(USE_IMAGE_DIGESTS), true)
|
||||||
|
BUNDLE_GEN_FLAGS += --use-image-digests
|
||||||
|
endif
|
||||||
|
|
||||||
|
# Image URL to use all building/pushing image targets
|
||||||
|
IMG ?= controller:latest
|
||||||
|
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
||||||
|
ENVTEST_K8S_VERSION = 1.23
|
||||||
|
|
||||||
|
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||||
|
ifeq (,$(shell go env GOBIN))
|
||||||
|
GOBIN=$(shell go env GOPATH)/bin
|
||||||
|
else
|
||||||
|
GOBIN=$(shell go env GOBIN)
|
||||||
|
endif
|
||||||
|
|
||||||
|
# Setting SHELL to bash allows bash commands to be executed by recipes.
|
||||||
|
# This is a requirement for 'setup-envtest.sh' in the test target.
|
||||||
|
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
|
||||||
|
SHELL = /usr/bin/env bash -o pipefail
|
||||||
|
.SHELLFLAGS = -ec
|
||||||
|
|
||||||
|
.PHONY: all
|
||||||
|
all: build
|
||||||
|
|
||||||
|
##@ General
|
||||||
|
|
||||||
|
# The help target prints out all targets with their descriptions organized
|
||||||
|
# beneath their categories. The categories are represented by '##@' and the
|
||||||
|
# target descriptions by '##'. The awk commands is responsible for reading the
|
||||||
|
# entire set of makefiles included in this invocation, looking for lines of the
|
||||||
|
# file as xyz: ## something, and then pretty-format the target and help. Then,
|
||||||
|
# if there's a line with ##@ something, that gets pretty-printed as a category.
|
||||||
|
# More info on the usage of ANSI control characters for terminal formatting:
|
||||||
|
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
|
||||||
|
# More info on the awk command:
|
||||||
|
# http://linuxcommand.org/lc3_adv_awk.php
|
||||||
|
|
||||||
|
.PHONY: help
|
||||||
|
help: ## Display this help.
|
||||||
|
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||||
|
|
||||||
|
##@ Development
|
||||||
|
|
||||||
|
.PHONY: manifests
|
||||||
|
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||||
|
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||||
|
|
||||||
|
.PHONY: generate
|
||||||
|
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||||
|
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||||
|
|
||||||
|
.PHONY: fmt
|
||||||
|
fmt: ## Run go fmt against code.
|
||||||
|
go fmt ./...
|
||||||
|
|
||||||
|
.PHONY: vet
|
||||||
|
vet: ## Run go vet against code.
|
||||||
|
go vet ./...
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test: manifests generate fmt vet envtest ## Run tests.
|
||||||
|
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out
|
||||||
|
|
||||||
|
##@ Build
|
||||||
|
|
||||||
|
.PHONY: build
|
||||||
|
build: generate fmt vet ## Build manager binary.
|
||||||
|
go build -o bin/manager main.go
|
||||||
|
|
||||||
|
.PHONY: run
|
||||||
|
run: manifests generate fmt vet ## Run a controller from your host.
|
||||||
|
go run ./main.go
|
||||||
|
|
||||||
|
.PHONY: docker-build
|
||||||
|
docker-build: test ## Build docker image with the manager.
|
||||||
|
docker build -t ${IMG} .
|
||||||
|
|
||||||
|
.PHONY: docker-push
|
||||||
|
docker-push: ## Push docker image with the manager.
|
||||||
|
docker push ${IMG}
|
||||||
|
|
||||||
|
##@ Deployment
|
||||||
|
|
||||||
|
ifndef ignore-not-found
|
||||||
|
ignore-not-found = false
|
||||||
|
endif
|
||||||
|
|
||||||
|
.PHONY: install
|
||||||
|
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||||
|
$(KUSTOMIZE) build config/crd | kubectl apply -f -
|
||||||
|
|
||||||
|
.PHONY: uninstall
|
||||||
|
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||||
|
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
||||||
|
|
||||||
|
.PHONY: deploy
|
||||||
|
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||||
|
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||||
|
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
||||||
|
|
||||||
|
.PHONY: undeploy
|
||||||
|
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||||
|
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
||||||
|
|
||||||
|
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||||
|
.PHONY: controller-gen
|
||||||
|
controller-gen: ## Download controller-gen locally if necessary.
|
||||||
|
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0)
|
||||||
|
|
||||||
|
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||||
|
.PHONY: kustomize
|
||||||
|
kustomize: ## Download kustomize locally if necessary.
|
||||||
|
$(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7)
|
||||||
|
|
||||||
|
ENVTEST = $(shell pwd)/bin/setup-envtest
|
||||||
|
.PHONY: envtest
|
||||||
|
envtest: ## Download envtest-setup locally if necessary.
|
||||||
|
$(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest)
|
||||||
|
|
||||||
|
# go-get-tool will 'go get' any package $2 and install it to $1.
|
||||||
|
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||||
|
define go-get-tool
|
||||||
|
@[ -f $(1) ] || { \
|
||||||
|
set -e ;\
|
||||||
|
TMP_DIR=$$(mktemp -d) ;\
|
||||||
|
cd $$TMP_DIR ;\
|
||||||
|
go mod init tmp ;\
|
||||||
|
echo "Downloading $(2)" ;\
|
||||||
|
GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\
|
||||||
|
rm -rf $$TMP_DIR ;\
|
||||||
|
}
|
||||||
|
endef
|
||||||
|
|
||||||
|
.PHONY: bundle
|
||||||
|
bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
|
||||||
|
operator-sdk generate kustomize manifests -q
|
||||||
|
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
|
||||||
|
$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle $(BUNDLE_GEN_FLAGS)
|
||||||
|
operator-sdk bundle validate ./bundle
|
||||||
|
|
||||||
|
.PHONY: bundle-build
|
||||||
|
bundle-build: ## Build the bundle image.
|
||||||
|
docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
|
||||||
|
|
||||||
|
.PHONY: bundle-push
|
||||||
|
bundle-push: ## Push the bundle image.
|
||||||
|
$(MAKE) docker-push IMG=$(BUNDLE_IMG)
|
||||||
|
|
||||||
|
.PHONY: opm
|
||||||
|
OPM = ./bin/opm
|
||||||
|
opm: ## Download opm locally if necessary.
|
||||||
|
ifeq (,$(wildcard $(OPM)))
|
||||||
|
ifeq (,$(shell which opm 2>/dev/null))
|
||||||
|
@{ \
|
||||||
|
set -e ;\
|
||||||
|
mkdir -p $(dir $(OPM)) ;\
|
||||||
|
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
|
||||||
|
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.19.1/$${OS}-$${ARCH}-opm ;\
|
||||||
|
chmod +x $(OPM) ;\
|
||||||
|
}
|
||||||
|
else
|
||||||
|
OPM = $(shell which opm)
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0).
|
||||||
|
# These images MUST exist in a registry and be pull-able.
|
||||||
|
BUNDLE_IMGS ?= $(BUNDLE_IMG)
|
||||||
|
|
||||||
|
# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0).
|
||||||
|
CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION)
|
||||||
|
|
||||||
|
# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image.
|
||||||
|
ifneq ($(origin CATALOG_BASE_IMG), undefined)
|
||||||
|
FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG)
|
||||||
|
endif
|
||||||
|
|
||||||
|
# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'.
|
||||||
|
# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see:
|
||||||
|
# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator
|
||||||
|
.PHONY: catalog-build
|
||||||
|
catalog-build: opm ## Build a catalog image.
|
||||||
|
$(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
|
||||||
|
|
||||||
|
# Push the catalog image.
|
||||||
|
.PHONY: catalog-push
|
||||||
|
catalog-push: ## Push a catalog image.
|
||||||
|
$(MAKE) docker-push IMG=$(CATALOG_IMG)
|
||||||
|
19
PROJECT
Normal file
19
PROJECT
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
domain: onepassword.com
|
||||||
|
layout:
|
||||||
|
- go.kubebuilder.io/v3
|
||||||
|
plugins:
|
||||||
|
manifests.sdk.operatorframework.io/v2: {}
|
||||||
|
scorecard.sdk.operatorframework.io/v2: {}
|
||||||
|
projectName: onepassword-operator-new
|
||||||
|
repo: github.com/1Password/onepassword-operator
|
||||||
|
resources:
|
||||||
|
- api:
|
||||||
|
crdVersion: v1
|
||||||
|
namespaced: true
|
||||||
|
controller: true
|
||||||
|
domain: onepassword.com
|
||||||
|
group: onepassword
|
||||||
|
kind: OnePasswordItem
|
||||||
|
path: github.com/1Password/onepassword-operator/api/v1
|
||||||
|
version: v1
|
||||||
|
version: "3"
|
@@ -1,3 +1,5 @@
|
|||||||
|
// TODO: Update README.md
|
||||||
|
|
||||||
# 1Password Connect Kubernetes Operator
|
# 1Password Connect Kubernetes Operator
|
||||||
|
|
||||||
The 1Password Connect Kubernetes Operator provides the ability to integrate Kubernetes with 1Password. This Operator manages `OnePasswordItem` Custom Resource Definitions (CRDs) that define the location of an Item stored in 1Password. The `OnePasswordItem` CRD, when created, will be used to compose a Kubernetes Secret containing the contents of the specified item.
|
The 1Password Connect Kubernetes Operator provides the ability to integrate Kubernetes with 1Password. This Operator manages `OnePasswordItem` Custom Resource Definitions (CRDs) that define the location of an Item stored in 1Password. The `OnePasswordItem` CRD, when created, will be used to compose a Kubernetes Secret containing the contents of the specified item.
|
||||||
|
36
api/v1/groupversion_info.go
Normal file
36
api/v1/groupversion_info.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package v1 contains API Schema definitions for the onepassword v1 API group
|
||||||
|
//+kubebuilder:object:generate=true
|
||||||
|
//+groupName=onepassword.onepassword.com
|
||||||
|
package v1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// GroupVersion is group version used to register these objects
|
||||||
|
GroupVersion = schema.GroupVersion{Group: "onepassword.onepassword.com", Version: "v1"}
|
||||||
|
|
||||||
|
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||||
|
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||||
|
|
||||||
|
// AddToScheme adds the types in this group-version to the given scheme.
|
||||||
|
AddToScheme = SchemeBuilder.AddToScheme
|
||||||
|
)
|
@@ -1,28 +1,47 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||||
|
|
||||||
// OnePasswordItemSpec defines the desired state of OnePasswordItem
|
// OnePasswordItemSpec defines the desired state of OnePasswordItem
|
||||||
type OnePasswordItemSpec struct {
|
type OnePasswordItemSpec struct {
|
||||||
|
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||||
|
// Important: Run "make" to regenerate code after modifying this file
|
||||||
|
|
||||||
|
// Foo is an example field of OnePasswordItem. Edit onepassworditem_types.go to remove/update
|
||||||
ItemPath string `json:"itemPath,omitempty"`
|
ItemPath string `json:"itemPath,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnePasswordItemStatus defines the observed state of OnePasswordItem
|
// OnePasswordItemStatus defines the observed state of OnePasswordItem
|
||||||
type OnePasswordItemStatus struct {
|
type OnePasswordItemStatus struct {
|
||||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||||
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
|
// Important: Run "make" to regenerate code after modifying this file
|
||||||
// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
//+kubebuilder:object:root=true
|
||||||
|
//+kubebuilder:subresource:status
|
||||||
|
|
||||||
// OnePasswordItem is the Schema for the onepassworditems API
|
// OnePasswordItem is the Schema for the onepassworditems API
|
||||||
// +kubebuilder:subresource:status
|
|
||||||
// +kubebuilder:resource:path=onepassworditems,scope=Namespaced
|
|
||||||
type OnePasswordItem struct {
|
type OnePasswordItem struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
@@ -32,7 +51,7 @@ type OnePasswordItem struct {
|
|||||||
Status OnePasswordItemStatus `json:"status,omitempty"`
|
Status OnePasswordItemStatus `json:"status,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
//+kubebuilder:object:root=true
|
||||||
|
|
||||||
// OnePasswordItemList contains a list of OnePasswordItem
|
// OnePasswordItemList contains a list of OnePasswordItem
|
||||||
type OnePasswordItemList struct {
|
type OnePasswordItemList struct {
|
@@ -1,6 +1,23 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
// Code generated by operator-sdk. DO NOT EDIT.
|
/*
|
||||||
|
Copyright 2022.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by controller-gen. DO NOT EDIT.
|
||||||
|
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
@@ -15,7 +32,6 @@ func (in *OnePasswordItem) DeepCopyInto(out *OnePasswordItem) {
|
|||||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
out.Spec = in.Spec
|
out.Spec = in.Spec
|
||||||
out.Status = in.Status
|
out.Status = in.Status
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItem.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItem.
|
||||||
@@ -48,7 +64,6 @@ func (in *OnePasswordItemList) DeepCopyInto(out *OnePasswordItemList) {
|
|||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemList.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemList.
|
||||||
@@ -72,7 +87,6 @@ func (in *OnePasswordItemList) DeepCopyObject() runtime.Object {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *OnePasswordItemSpec) DeepCopyInto(out *OnePasswordItemSpec) {
|
func (in *OnePasswordItemSpec) DeepCopyInto(out *OnePasswordItemSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemSpec.
|
||||||
@@ -88,7 +102,6 @@ func (in *OnePasswordItemSpec) DeepCopy() *OnePasswordItemSpec {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *OnePasswordItemStatus) DeepCopyInto(out *OnePasswordItemStatus) {
|
func (in *OnePasswordItemStatus) DeepCopyInto(out *OnePasswordItemStatus) {
|
||||||
*out = *in
|
*out = *in
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnePasswordItemStatus.
|
@@ -1,15 +0,0 @@
|
|||||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:latest
|
|
||||||
|
|
||||||
ENV OPERATOR=/usr/local/bin/onepassword-connect-operator \
|
|
||||||
USER_UID=1001 \
|
|
||||||
USER_NAME=onepassword-connect-operator
|
|
||||||
|
|
||||||
# install operator binary
|
|
||||||
COPY build/_output/bin/op-kubernetes-connect-operator ${OPERATOR}
|
|
||||||
|
|
||||||
COPY build/bin /usr/local/bin
|
|
||||||
RUN /usr/local/bin/user_setup
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/entrypoint"]
|
|
||||||
|
|
||||||
USER ${USER_UID}
|
|
@@ -1,3 +0,0 @@
|
|||||||
#!/bin/sh -e
|
|
||||||
|
|
||||||
exec ${OPERATOR} $@
|
|
@@ -1,11 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
set -x
|
|
||||||
|
|
||||||
# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be)
|
|
||||||
echo "${USER_NAME}:x:${USER_UID}:0:${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd
|
|
||||||
mkdir -p "${HOME}"
|
|
||||||
chown "${USER_UID}:0" "${HOME}"
|
|
||||||
chmod ug+rwx "${HOME}"
|
|
||||||
|
|
||||||
# no need for this script to remain in the image after running
|
|
||||||
rm "$0"
|
|
@@ -1,305 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/controller"
|
|
||||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
|
||||||
|
|
||||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
|
||||||
|
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
|
||||||
"k8s.io/client-go/rest"
|
|
||||||
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/apis"
|
|
||||||
"github.com/1Password/onepassword-operator/version"
|
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/connect"
|
|
||||||
|
|
||||||
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
|
|
||||||
kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics"
|
|
||||||
"github.com/operator-framework/operator-sdk/pkg/leader"
|
|
||||||
"github.com/operator-framework/operator-sdk/pkg/log/zap"
|
|
||||||
"github.com/operator-framework/operator-sdk/pkg/metrics"
|
|
||||||
sdkVersion "github.com/operator-framework/operator-sdk/version"
|
|
||||||
"github.com/spf13/pflag"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/config"
|
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
|
||||||
)
|
|
||||||
|
|
||||||
const envPollingIntervalVariable = "POLLING_INTERVAL"
|
|
||||||
const manageConnect = "MANAGE_CONNECT"
|
|
||||||
const restartDeploymentsEnvVariable = "AUTO_RESTART"
|
|
||||||
const defaultPollingInterval = 600
|
|
||||||
|
|
||||||
// Change below variables to serve metrics on different host or port.
|
|
||||||
var (
|
|
||||||
metricsHost = "0.0.0.0"
|
|
||||||
metricsPort int32 = 8383
|
|
||||||
operatorMetricsPort int32 = 8686
|
|
||||||
)
|
|
||||||
var log = logf.Log.WithName("cmd")
|
|
||||||
|
|
||||||
func printVersion() {
|
|
||||||
log.Info(fmt.Sprintf("Operator Version: %s", version.Version))
|
|
||||||
log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
|
|
||||||
log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
|
|
||||||
log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Add the zap logger flag set to the CLI. The flag set must
|
|
||||||
// be added before calling pflag.Parse().
|
|
||||||
pflag.CommandLine.AddFlagSet(zap.FlagSet())
|
|
||||||
|
|
||||||
// Add flags registered by imported packages (e.g. glog and
|
|
||||||
// controller-runtime)
|
|
||||||
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
|
|
||||||
|
|
||||||
pflag.Parse()
|
|
||||||
|
|
||||||
// Use a zap logr.Logger implementation. If none of the zap
|
|
||||||
// flags are configured (or if the zap flag set is not being
|
|
||||||
// used), this defaults to a production zap logger.
|
|
||||||
//
|
|
||||||
// The logger instantiated here can be changed to any logger
|
|
||||||
// implementing the logr.Logger interface. This logger will
|
|
||||||
// be propagated through the whole operator, generating
|
|
||||||
// uniform and structured logs.
|
|
||||||
logf.SetLogger(zap.Logger())
|
|
||||||
|
|
||||||
printVersion()
|
|
||||||
|
|
||||||
namespace := os.Getenv(k8sutil.WatchNamespaceEnvVar)
|
|
||||||
|
|
||||||
deploymentNamespace, err := k8sutil.GetOperatorNamespace()
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err, "Failed to get namespace")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a config to talk to the apiserver
|
|
||||||
cfg, err := config.GetConfig()
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err, "")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
// Become the leader before proceeding
|
|
||||||
err = leader.Become(ctx, "onepassword-connect-operator-lock")
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err, "")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set default manager options
|
|
||||||
options := manager.Options{
|
|
||||||
Namespace: namespace,
|
|
||||||
MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
|
|
||||||
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
|
|
||||||
// Also note that you may face performance issues when using this with a high number of namespaces.
|
|
||||||
if strings.Contains(namespace, ",") {
|
|
||||||
options.Namespace = ""
|
|
||||||
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new manager to provide shared dependencies and start components
|
|
||||||
mgr, err := manager.New(cfg, options)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err, "")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Registering Components.")
|
|
||||||
|
|
||||||
// Setup Scheme for all resources
|
|
||||||
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
|
|
||||||
log.Error(err, "")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
//Setup 1PasswordConnect
|
|
||||||
if shouldManageConnect() {
|
|
||||||
log.Info("Automated Connect Management Enabled")
|
|
||||||
go func() {
|
|
||||||
connectStarted := false
|
|
||||||
for connectStarted == false {
|
|
||||||
err := op.SetupConnect(mgr.GetClient(), deploymentNamespace)
|
|
||||||
// Cache Not Started is an acceptable error. Retry until cache is started.
|
|
||||||
if err != nil && !errors.Is(err, &cache.ErrCacheNotStarted{}) {
|
|
||||||
log.Error(err, "")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
connectStarted = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
} else {
|
|
||||||
log.Info("Automated Connect Management Disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup One Password Client
|
|
||||||
opConnectClient, err := connect.NewClientFromEnvironment()
|
|
||||||
|
|
||||||
if err := controller.AddToManager(mgr, opConnectClient); err != nil {
|
|
||||||
log.Error(err, "")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add the Metrics Service
|
|
||||||
addMetrics(ctx, cfg)
|
|
||||||
|
|
||||||
// Setup update secrets task
|
|
||||||
updatedSecretsPoller := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
|
|
||||||
done := make(chan bool)
|
|
||||||
ticker := time.NewTicker(getPollingIntervalForUpdatingSecrets())
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
ticker.Stop()
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
err := updatedSecretsPoller.UpdateKubernetesSecretsTask()
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err, "error running update kubernetes secret task")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Start the Cmd
|
|
||||||
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
|
|
||||||
log.Error(err, "Manager exited non-zero")
|
|
||||||
done <- true
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// addMetrics will create the Services and Service Monitors to allow the operator export the metrics by using
|
|
||||||
// the Prometheus operator
|
|
||||||
func addMetrics(ctx context.Context, cfg *rest.Config) {
|
|
||||||
// Get the namespace the operator is currently deployed in.
|
|
||||||
operatorNs, err := k8sutil.GetOperatorNamespace()
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, k8sutil.ErrRunLocal) {
|
|
||||||
log.Info("Skipping CR metrics server creation; not running in a cluster.")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := serveCRMetrics(cfg, operatorNs); err != nil {
|
|
||||||
log.Info("Could not generate and serve custom resource metrics", "error", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add to the below struct any other metrics ports you want to expose.
|
|
||||||
servicePorts := []v1.ServicePort{
|
|
||||||
{Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}},
|
|
||||||
{Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create Service object to expose the metrics port(s).
|
|
||||||
service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts)
|
|
||||||
if err != nil {
|
|
||||||
log.Info("Could not create metrics Service", "error", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources
|
|
||||||
// necessary to configure Prometheus to scrape metrics from this operator.
|
|
||||||
services := []*v1.Service{service}
|
|
||||||
|
|
||||||
// The ServiceMonitor is created in the same namespace where the operator is deployed
|
|
||||||
_, err = metrics.CreateServiceMonitors(cfg, operatorNs, services)
|
|
||||||
if err != nil {
|
|
||||||
log.Info("Could not create ServiceMonitor object", "error", err.Error())
|
|
||||||
// If this operator is deployed to a cluster without the prometheus-operator running, it will return
|
|
||||||
// ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation.
|
|
||||||
if err == metrics.ErrServiceMonitorNotPresent {
|
|
||||||
log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types.
|
|
||||||
// It serves those metrics on "http://metricsHost:operatorMetricsPort".
|
|
||||||
func serveCRMetrics(cfg *rest.Config, operatorNs string) error {
|
|
||||||
// The function below returns a list of filtered operator/CR specific GVKs. For more control, override the GVK list below
|
|
||||||
// with your own custom logic. Note that if you are adding third party API schemas, probably you will need to
|
|
||||||
// customize this implementation to avoid permissions issues.
|
|
||||||
filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The metrics will be generated from the namespaces which are returned here.
|
|
||||||
// NOTE that passing nil or an empty list of namespaces in GenerateAndServeCRMetrics will result in an error.
|
|
||||||
ns, err := kubemetrics.GetNamespacesForMetrics(operatorNs)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate and serve custom resource specific metrics.
|
|
||||||
err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPollingIntervalForUpdatingSecrets() time.Duration {
|
|
||||||
timeInSecondsString, found := os.LookupEnv(envPollingIntervalVariable)
|
|
||||||
if found {
|
|
||||||
timeInSeconds, err := strconv.Atoi(timeInSecondsString)
|
|
||||||
if err == nil {
|
|
||||||
return time.Duration(timeInSeconds) * time.Second
|
|
||||||
}
|
|
||||||
log.Info("Invalid value set for polling interval. Must be a valid integer.")
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info(fmt.Sprintf("Using default polling interval of %v seconds", defaultPollingInterval))
|
|
||||||
return time.Duration(defaultPollingInterval) * time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldManageConnect() bool {
|
|
||||||
shouldManageConnect, found := os.LookupEnv(manageConnect)
|
|
||||||
if found {
|
|
||||||
shouldManageConnectBool, err := strconv.ParseBool(strings.ToLower(shouldManageConnect))
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err, "")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
return shouldManageConnectBool
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldAutoRestartDeployments() bool {
|
|
||||||
shouldAutoRestartDeployments, found := os.LookupEnv(restartDeploymentsEnvVariable)
|
|
||||||
if found {
|
|
||||||
shouldAutoRestartDeploymentsBool, err := strconv.ParseBool(strings.ToLower(shouldAutoRestartDeployments))
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err, "")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
return shouldAutoRestartDeploymentsBool
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
21
config/crd/kustomization.yaml
Normal file
21
config/crd/kustomization.yaml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# This kustomization.yaml is not intended to be run by itself,
|
||||||
|
# since it depends on service name and namespace that are out of this kustomize package.
|
||||||
|
# It should be run by config/default
|
||||||
|
resources:
|
||||||
|
- bases/onepassword.onepassword.com_onepassworditems.yaml
|
||||||
|
#+kubebuilder:scaffold:crdkustomizeresource
|
||||||
|
|
||||||
|
patchesStrategicMerge:
|
||||||
|
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
|
||||||
|
# patches here are for enabling the conversion webhook for each CRD
|
||||||
|
#- patches/webhook_in_onepassworditems.yaml
|
||||||
|
#+kubebuilder:scaffold:crdkustomizewebhookpatch
|
||||||
|
|
||||||
|
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
|
||||||
|
# patches here are for enabling the CA injection for each CRD
|
||||||
|
#- patches/cainjection_in_onepassworditems.yaml
|
||||||
|
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
|
||||||
|
|
||||||
|
# the following config is for teaching kustomize how to do kustomization for CRDs.
|
||||||
|
configurations:
|
||||||
|
- kustomizeconfig.yaml
|
19
config/crd/kustomizeconfig.yaml
Normal file
19
config/crd/kustomizeconfig.yaml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
|
||||||
|
nameReference:
|
||||||
|
- kind: Service
|
||||||
|
version: v1
|
||||||
|
fieldSpecs:
|
||||||
|
- kind: CustomResourceDefinition
|
||||||
|
version: v1
|
||||||
|
group: apiextensions.k8s.io
|
||||||
|
path: spec/conversion/webhook/clientConfig/service/name
|
||||||
|
|
||||||
|
namespace:
|
||||||
|
- kind: CustomResourceDefinition
|
||||||
|
version: v1
|
||||||
|
group: apiextensions.k8s.io
|
||||||
|
path: spec/conversion/webhook/clientConfig/service/namespace
|
||||||
|
create: false
|
||||||
|
|
||||||
|
varReference:
|
||||||
|
- path: metadata/annotations
|
7
config/crd/patches/cainjection_in_onepassworditems.yaml
Normal file
7
config/crd/patches/cainjection_in_onepassworditems.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# The following patch adds a directive for certmanager to inject CA into the CRD
|
||||||
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
||||||
|
name: onepassworditems.onepassword.onepassword.com
|
16
config/crd/patches/webhook_in_onepassworditems.yaml
Normal file
16
config/crd/patches/webhook_in_onepassworditems.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# The following patch enables a conversion webhook for the CRD
|
||||||
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: onepassworditems.onepassword.onepassword.com
|
||||||
|
spec:
|
||||||
|
conversion:
|
||||||
|
strategy: Webhook
|
||||||
|
webhook:
|
||||||
|
clientConfig:
|
||||||
|
service:
|
||||||
|
namespace: system
|
||||||
|
name: webhook-service
|
||||||
|
path: /convert
|
||||||
|
conversionReviewVersions:
|
||||||
|
- v1
|
74
config/default/kustomization.yaml
Normal file
74
config/default/kustomization.yaml
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# Adds namespace to all resources.
|
||||||
|
namespace: onepassword-operator-new-system
|
||||||
|
|
||||||
|
# Value of this field is prepended to the
|
||||||
|
# names of all resources, e.g. a deployment named
|
||||||
|
# "wordpress" becomes "alices-wordpress".
|
||||||
|
# Note that it should also match with the prefix (text before '-') of the namespace
|
||||||
|
# field above.
|
||||||
|
namePrefix: onepassword-operator-new-
|
||||||
|
|
||||||
|
# Labels to add to all resources and selectors.
|
||||||
|
#commonLabels:
|
||||||
|
# someName: someValue
|
||||||
|
|
||||||
|
bases:
|
||||||
|
- ../crd
|
||||||
|
- ../rbac
|
||||||
|
- ../manager
|
||||||
|
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||||
|
# crd/kustomization.yaml
|
||||||
|
#- ../webhook
|
||||||
|
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
|
||||||
|
#- ../certmanager
|
||||||
|
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||||
|
#- ../prometheus
|
||||||
|
|
||||||
|
patchesStrategicMerge:
|
||||||
|
# Protect the /metrics endpoint by putting it behind auth.
|
||||||
|
# If you want your controller-manager to expose the /metrics
|
||||||
|
# endpoint w/o any authn/z, please comment the following line.
|
||||||
|
- manager_auth_proxy_patch.yaml
|
||||||
|
|
||||||
|
# Mount the controller config file for loading manager configurations
|
||||||
|
# through a ComponentConfig type
|
||||||
|
#- manager_config_patch.yaml
|
||||||
|
|
||||||
|
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||||
|
# crd/kustomization.yaml
|
||||||
|
#- manager_webhook_patch.yaml
|
||||||
|
|
||||||
|
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
|
||||||
|
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
|
||||||
|
# 'CERTMANAGER' needs to be enabled to use ca injection
|
||||||
|
#- webhookcainjection_patch.yaml
|
||||||
|
|
||||||
|
# the following config is for teaching kustomize how to do var substitution
|
||||||
|
vars:
|
||||||
|
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||||
|
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
|
||||||
|
# objref:
|
||||||
|
# kind: Certificate
|
||||||
|
# group: cert-manager.io
|
||||||
|
# version: v1
|
||||||
|
# name: serving-cert # this name should match the one in certificate.yaml
|
||||||
|
# fieldref:
|
||||||
|
# fieldpath: metadata.namespace
|
||||||
|
#- name: CERTIFICATE_NAME
|
||||||
|
# objref:
|
||||||
|
# kind: Certificate
|
||||||
|
# group: cert-manager.io
|
||||||
|
# version: v1
|
||||||
|
# name: serving-cert # this name should match the one in certificate.yaml
|
||||||
|
#- name: SERVICE_NAMESPACE # namespace of the service
|
||||||
|
# objref:
|
||||||
|
# kind: Service
|
||||||
|
# version: v1
|
||||||
|
# name: webhook-service
|
||||||
|
# fieldref:
|
||||||
|
# fieldpath: metadata.namespace
|
||||||
|
#- name: SERVICE_NAME
|
||||||
|
# objref:
|
||||||
|
# kind: Service
|
||||||
|
# version: v1
|
||||||
|
# name: webhook-service
|
34
config/default/manager_auth_proxy_patch.yaml
Normal file
34
config/default/manager_auth_proxy_patch.yaml
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# This patch inject a sidecar container which is a HTTP proxy for the
|
||||||
|
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: controller-manager
|
||||||
|
namespace: system
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: kube-rbac-proxy
|
||||||
|
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
|
||||||
|
args:
|
||||||
|
- "--secure-listen-address=0.0.0.0:8443"
|
||||||
|
- "--upstream=http://127.0.0.1:8080/"
|
||||||
|
- "--logtostderr=true"
|
||||||
|
- "--v=0"
|
||||||
|
ports:
|
||||||
|
- containerPort: 8443
|
||||||
|
protocol: TCP
|
||||||
|
name: https
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 500m
|
||||||
|
memory: 128Mi
|
||||||
|
requests:
|
||||||
|
cpu: 5m
|
||||||
|
memory: 64Mi
|
||||||
|
- name: manager
|
||||||
|
args:
|
||||||
|
- "--health-probe-bind-address=:8081"
|
||||||
|
- "--metrics-bind-address=127.0.0.1:8080"
|
||||||
|
- "--leader-elect"
|
20
config/default/manager_config_patch.yaml
Normal file
20
config/default/manager_config_patch.yaml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: controller-manager
|
||||||
|
namespace: system
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: manager
|
||||||
|
args:
|
||||||
|
- "--config=controller_manager_config.yaml"
|
||||||
|
volumeMounts:
|
||||||
|
- name: manager-config
|
||||||
|
mountPath: /controller_manager_config.yaml
|
||||||
|
subPath: controller_manager_config.yaml
|
||||||
|
volumes:
|
||||||
|
- name: manager-config
|
||||||
|
configMap:
|
||||||
|
name: manager-config
|
11
config/manager/controller_manager_config.yaml
Normal file
11
config/manager/controller_manager_config.yaml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
|
||||||
|
kind: ControllerManagerConfig
|
||||||
|
health:
|
||||||
|
healthProbeBindAddress: :8081
|
||||||
|
metrics:
|
||||||
|
bindAddress: 127.0.0.1:8080
|
||||||
|
webhook:
|
||||||
|
port: 9443
|
||||||
|
leaderElection:
|
||||||
|
leaderElect: true
|
||||||
|
resourceName: c26807fd.onepassword.com
|
10
config/manager/kustomization.yaml
Normal file
10
config/manager/kustomization.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
resources:
|
||||||
|
- manager.yaml
|
||||||
|
|
||||||
|
generatorOptions:
|
||||||
|
disableNameSuffixHash: true
|
||||||
|
|
||||||
|
configMapGenerator:
|
||||||
|
- name: manager-config
|
||||||
|
files:
|
||||||
|
- controller_manager_config.yaml
|
60
config/manager/manager.yaml
Normal file
60
config/manager/manager.yaml
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
control-plane: controller-manager
|
||||||
|
name: system
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: controller-manager
|
||||||
|
namespace: system
|
||||||
|
labels:
|
||||||
|
control-plane: controller-manager
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
control-plane: controller-manager
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/default-container: manager
|
||||||
|
labels:
|
||||||
|
control-plane: controller-manager
|
||||||
|
spec:
|
||||||
|
securityContext:
|
||||||
|
runAsNonRoot: true
|
||||||
|
containers:
|
||||||
|
- command:
|
||||||
|
- /manager
|
||||||
|
args:
|
||||||
|
- --leader-elect
|
||||||
|
image: controller:latest
|
||||||
|
name: manager
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 8081
|
||||||
|
initialDelaySeconds: 15
|
||||||
|
periodSeconds: 20
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /readyz
|
||||||
|
port: 8081
|
||||||
|
initialDelaySeconds: 5
|
||||||
|
periodSeconds: 10
|
||||||
|
# TODO(user): Configure the resources accordingly based on the project requirements.
|
||||||
|
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 500m
|
||||||
|
memory: 128Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 64Mi
|
||||||
|
serviceAccountName: controller-manager
|
||||||
|
terminationGracePeriodSeconds: 10
|
27
config/manifests/kustomization.yaml
Normal file
27
config/manifests/kustomization.yaml
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# These resources constitute the fully configured set of manifests
|
||||||
|
# used to generate the 'manifests/' directory in a bundle.
|
||||||
|
resources:
|
||||||
|
- bases/onepassword-operator-new.clusterserviceversion.yaml
|
||||||
|
- ../default
|
||||||
|
- ../samples
|
||||||
|
- ../scorecard
|
||||||
|
|
||||||
|
# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix.
|
||||||
|
# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager.
|
||||||
|
# These patches remove the unnecessary "cert" volume and its manager container volumeMount.
|
||||||
|
#patchesJson6902:
|
||||||
|
#- target:
|
||||||
|
# group: apps
|
||||||
|
# version: v1
|
||||||
|
# kind: Deployment
|
||||||
|
# name: controller-manager
|
||||||
|
# namespace: system
|
||||||
|
# patch: |-
|
||||||
|
# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs.
|
||||||
|
# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment.
|
||||||
|
# - op: remove
|
||||||
|
# path: /spec/template/spec/containers/1/volumeMounts/0
|
||||||
|
# # Remove the "cert" volume, since OLM will create and mount a set of certs.
|
||||||
|
# # Update the indices in this path if adding or removing volumes in the manager's Deployment.
|
||||||
|
# - op: remove
|
||||||
|
# path: /spec/template/spec/volumes/0
|
2
config/prometheus/kustomization.yaml
Normal file
2
config/prometheus/kustomization.yaml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
resources:
|
||||||
|
- monitor.yaml
|
20
config/prometheus/monitor.yaml
Normal file
20
config/prometheus/monitor.yaml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
|
||||||
|
# Prometheus Monitor Service (Metrics)
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: ServiceMonitor
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
control-plane: controller-manager
|
||||||
|
name: controller-manager-metrics-monitor
|
||||||
|
namespace: system
|
||||||
|
spec:
|
||||||
|
endpoints:
|
||||||
|
- path: /metrics
|
||||||
|
port: https
|
||||||
|
scheme: https
|
||||||
|
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||||
|
tlsConfig:
|
||||||
|
insecureSkipVerify: true
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
control-plane: controller-manager
|
9
config/rbac/auth_proxy_client_clusterrole.yaml
Normal file
9
config/rbac/auth_proxy_client_clusterrole.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: metrics-reader
|
||||||
|
rules:
|
||||||
|
- nonResourceURLs:
|
||||||
|
- "/metrics"
|
||||||
|
verbs:
|
||||||
|
- get
|
17
config/rbac/auth_proxy_role.yaml
Normal file
17
config/rbac/auth_proxy_role.yaml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: proxy-role
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- authentication.k8s.io
|
||||||
|
resources:
|
||||||
|
- tokenreviews
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- apiGroups:
|
||||||
|
- authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- subjectaccessreviews
|
||||||
|
verbs:
|
||||||
|
- create
|
12
config/rbac/auth_proxy_role_binding.yaml
Normal file
12
config/rbac/auth_proxy_role_binding.yaml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: proxy-rolebinding
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: proxy-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: controller-manager
|
||||||
|
namespace: system
|
15
config/rbac/auth_proxy_service.yaml
Normal file
15
config/rbac/auth_proxy_service.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
control-plane: controller-manager
|
||||||
|
name: controller-manager-metrics-service
|
||||||
|
namespace: system
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- name: https
|
||||||
|
port: 8443
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: https
|
||||||
|
selector:
|
||||||
|
control-plane: controller-manager
|
18
config/rbac/kustomization.yaml
Normal file
18
config/rbac/kustomization.yaml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
resources:
|
||||||
|
# All RBAC will be applied under this service account in
|
||||||
|
# the deployment namespace. You may comment out this resource
|
||||||
|
# if your manager will use a service account that exists at
|
||||||
|
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
|
||||||
|
# subjects if changing service account names.
|
||||||
|
- service_account.yaml
|
||||||
|
- role.yaml
|
||||||
|
- role_binding.yaml
|
||||||
|
- leader_election_role.yaml
|
||||||
|
- leader_election_role_binding.yaml
|
||||||
|
# Comment the following 4 lines if you want to disable
|
||||||
|
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
|
||||||
|
# which protects your /metrics endpoint.
|
||||||
|
- auth_proxy_service.yaml
|
||||||
|
- auth_proxy_role.yaml
|
||||||
|
- auth_proxy_role_binding.yaml
|
||||||
|
- auth_proxy_client_clusterrole.yaml
|
37
config/rbac/leader_election_role.yaml
Normal file
37
config/rbac/leader_election_role.yaml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# permissions to do leader election.
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: leader-election-role
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- configmaps
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- events
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- patch
|
12
config/rbac/leader_election_role_binding.yaml
Normal file
12
config/rbac/leader_election_role_binding.yaml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: leader-election-rolebinding
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: leader-election-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: controller-manager
|
||||||
|
namespace: system
|
24
config/rbac/onepassworditem_editor_role.yaml
Normal file
24
config/rbac/onepassworditem_editor_role.yaml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# permissions for end users to edit onepassworditems.
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: onepassworditem-editor-role
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- onepassword.onepassword.com
|
||||||
|
resources:
|
||||||
|
- onepassworditems
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- onepassword.onepassword.com
|
||||||
|
resources:
|
||||||
|
- onepassworditems/status
|
||||||
|
verbs:
|
||||||
|
- get
|
20
config/rbac/onepassworditem_viewer_role.yaml
Normal file
20
config/rbac/onepassworditem_viewer_role.yaml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# permissions for end users to view onepassworditems.
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: onepassworditem-viewer-role
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- onepassword.onepassword.com
|
||||||
|
resources:
|
||||||
|
- onepassworditems
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- onepassword.onepassword.com
|
||||||
|
resources:
|
||||||
|
- onepassworditems/status
|
||||||
|
verbs:
|
||||||
|
- get
|
12
config/rbac/role_binding.yaml
Normal file
12
config/rbac/role_binding.yaml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: manager-rolebinding
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: manager-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: controller-manager
|
||||||
|
namespace: system
|
5
config/rbac/service_account.yaml
Normal file
5
config/rbac/service_account.yaml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: controller-manager
|
||||||
|
namespace: system
|
4
config/samples/kustomization.yaml
Normal file
4
config/samples/kustomization.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
## Append samples you want in your CSV to this file as resources ##
|
||||||
|
resources:
|
||||||
|
- onepassword_v1_onepassworditem.yaml
|
||||||
|
#+kubebuilder:scaffold:manifestskustomizesamples
|
6
config/samples/onepassword_v1_onepassworditem.yaml
Normal file
6
config/samples/onepassword_v1_onepassworditem.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
apiVersion: onepassword.onepassword.com/v1
|
||||||
|
kind: OnePasswordItem
|
||||||
|
metadata:
|
||||||
|
name: onepassworditem-sample
|
||||||
|
spec:
|
||||||
|
# TODO(user): Add fields here
|
7
config/scorecard/bases/config.yaml
Normal file
7
config/scorecard/bases/config.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
apiVersion: scorecard.operatorframework.io/v1alpha3
|
||||||
|
kind: Configuration
|
||||||
|
metadata:
|
||||||
|
name: config
|
||||||
|
stages:
|
||||||
|
- parallel: true
|
||||||
|
tests: []
|
16
config/scorecard/kustomization.yaml
Normal file
16
config/scorecard/kustomization.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
resources:
|
||||||
|
- bases/config.yaml
|
||||||
|
patchesJson6902:
|
||||||
|
- path: patches/basic.config.yaml
|
||||||
|
target:
|
||||||
|
group: scorecard.operatorframework.io
|
||||||
|
version: v1alpha3
|
||||||
|
kind: Configuration
|
||||||
|
name: config
|
||||||
|
- path: patches/olm.config.yaml
|
||||||
|
target:
|
||||||
|
group: scorecard.operatorframework.io
|
||||||
|
version: v1alpha3
|
||||||
|
kind: Configuration
|
||||||
|
name: config
|
||||||
|
#+kubebuilder:scaffold:patchesJson6902
|
10
config/scorecard/patches/basic.config.yaml
Normal file
10
config/scorecard/patches/basic.config.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
- op: add
|
||||||
|
path: /stages/0/tests/-
|
||||||
|
value:
|
||||||
|
entrypoint:
|
||||||
|
- scorecard-test
|
||||||
|
- basic-check-spec
|
||||||
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
|
labels:
|
||||||
|
suite: basic
|
||||||
|
test: basic-check-spec-test
|
50
config/scorecard/patches/olm.config.yaml
Normal file
50
config/scorecard/patches/olm.config.yaml
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
- op: add
|
||||||
|
path: /stages/0/tests/-
|
||||||
|
value:
|
||||||
|
entrypoint:
|
||||||
|
- scorecard-test
|
||||||
|
- olm-bundle-validation
|
||||||
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
|
labels:
|
||||||
|
suite: olm
|
||||||
|
test: olm-bundle-validation-test
|
||||||
|
- op: add
|
||||||
|
path: /stages/0/tests/-
|
||||||
|
value:
|
||||||
|
entrypoint:
|
||||||
|
- scorecard-test
|
||||||
|
- olm-crds-have-validation
|
||||||
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
|
labels:
|
||||||
|
suite: olm
|
||||||
|
test: olm-crds-have-validation-test
|
||||||
|
- op: add
|
||||||
|
path: /stages/0/tests/-
|
||||||
|
value:
|
||||||
|
entrypoint:
|
||||||
|
- scorecard-test
|
||||||
|
- olm-crds-have-resources
|
||||||
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
|
labels:
|
||||||
|
suite: olm
|
||||||
|
test: olm-crds-have-resources-test
|
||||||
|
- op: add
|
||||||
|
path: /stages/0/tests/-
|
||||||
|
value:
|
||||||
|
entrypoint:
|
||||||
|
- scorecard-test
|
||||||
|
- olm-spec-descriptors
|
||||||
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
|
labels:
|
||||||
|
suite: olm
|
||||||
|
test: olm-spec-descriptors-test
|
||||||
|
- op: add
|
||||||
|
path: /stages/0/tests/-
|
||||||
|
value:
|
||||||
|
entrypoint:
|
||||||
|
- scorecard-test
|
||||||
|
- olm-status-descriptors
|
||||||
|
image: quay.io/operator-framework/scorecard-test:v1.19.0
|
||||||
|
labels:
|
||||||
|
suite: olm
|
||||||
|
test: olm-status-descriptors-test
|
@@ -1,4 +1,4 @@
|
|||||||
package deployment
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -27,7 +27,7 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||||
)
|
)
|
||||||
|
|
||||||
var log = logf.Log.WithName("controller_deployment")
|
var deploymentLog = logf.Log.WithName("controller_deployment")
|
||||||
var finalizer = "onepassword.com/finalizer.secret"
|
var finalizer = "onepassword.com/finalizer.secret"
|
||||||
|
|
||||||
const annotationRegExpString = "^operator.1password.io\\/[a-zA-Z\\.]+"
|
const annotationRegExpString = "^operator.1password.io\\/[a-zA-Z\\.]+"
|
||||||
@@ -71,9 +71,23 @@ type ReconcileDeployment struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *ReconcileDeployment) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *ReconcileDeployment) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
|
||||||
For(&appsv1.Deployment{}).
|
c, err := controller.New("deployment-controller", mgr, controller.Options{Reconciler: r})
|
||||||
Complete(r)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch for changes to primary resource Deployment
|
||||||
|
err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
// TODO figure out what to do with this code.
|
||||||
|
// return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
// For(&appsv1.Deployment{}).
|
||||||
|
// Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ReconcileDeployment) test() {
|
func (r *ReconcileDeployment) test() {
|
||||||
@@ -85,9 +99,8 @@ func (r *ReconcileDeployment) test() {
|
|||||||
// Note:
|
// Note:
|
||||||
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
|
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
|
||||||
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
|
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
|
||||||
func (r *ReconcileDeployment) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
func (r *ReconcileDeployment) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||||
ctx := context.Background()
|
reqLogger := deploymentLog.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||||
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
|
||||||
reqLogger.Info("Reconciling Deployment")
|
reqLogger.Info("Reconciling Deployment")
|
||||||
|
|
||||||
deployment := &appsv1.Deployment{}
|
deployment := &appsv1.Deployment{}
|
||||||
@@ -170,7 +183,7 @@ func (r *ReconcileDeployment) areMultipleDeploymentsUsingSecret(updatedSecrets m
|
|||||||
|
|
||||||
err := r.kubeClient.List(context.Background(), deployments, opts...)
|
err := r.kubeClient.List(context.Background(), deployments, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "Failed to list kubernetes deployments")
|
deploymentLog.Error(err, "Failed to list kubernetes deployments")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,7 +203,7 @@ func (r *ReconcileDeployment) removeOnePasswordFinalizerFromDeployment(deploymen
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *ReconcileDeployment) HandleApplyingDeployment(deployment *appsv1.Deployment, namespace string, annotations map[string]string, request reconcile.Request) error {
|
func (r *ReconcileDeployment) HandleApplyingDeployment(deployment *appsv1.Deployment, namespace string, annotations map[string]string, request reconcile.Request) error {
|
||||||
reqLog := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
reqLog := deploymentLog.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||||
|
|
||||||
secretName := annotations[op.NameAnnotation]
|
secretName := annotations[op.NameAnnotation]
|
||||||
secretLabels := map[string]string(nil)
|
secretLabels := map[string]string(nil)
|
||||||
@@ -218,5 +231,5 @@ func (r *ReconcileDeployment) HandleApplyingDeployment(deployment *appsv1.Deploy
|
|||||||
UID: deployment.GetUID(),
|
UID: deployment.GetUID(),
|
||||||
}
|
}
|
||||||
|
|
||||||
return kubeSecrets.CreateKubernetesSecretFromItem(r.kubeClient, secretName, namespace, item, annotations[op.RestartDeploymentsAnnotation], secretLabels, secretType, ownerRef)
|
return kubeSecrets.CreateKubernetesSecretFromItem(r.kubeClient, secretName, namespace, item, annotations[op.RestartDeploymentsAnnotation], secretLabels, secretType, annotations, ownerRef)
|
||||||
}
|
}
|
@@ -1,82 +1,78 @@
|
|||||||
package onepassworditem
|
/*
|
||||||
|
Copyright 2022.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
onepasswordv1 "github.com/1Password/onepassword-operator/pkg/apis/onepassword/v1"
|
|
||||||
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/onepassword"
|
"github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||||
"github.com/1Password/onepassword-operator/pkg/utils"
|
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/connect"
|
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
kubeClient "sigs.k8s.io/controller-runtime/pkg/client"
|
kubeClient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||||
|
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||||
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
|
||||||
|
"github.com/1Password/onepassword-operator/pkg/utils"
|
||||||
|
|
||||||
|
"github.com/1Password/connect-sdk-go/connect"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var log = logf.Log.WithName("controller_onepassworditem")
|
var log = logf.Log.WithName("controller_onepassworditem")
|
||||||
var finalizer = "onepassword.com/finalizer.secret"
|
|
||||||
|
|
||||||
func Add(mgr manager.Manager, opConnectClient connect.Client) error {
|
// OnePasswordItemReconciler reconciles a OnePasswordItem object
|
||||||
return add(mgr, newReconciler(mgr, opConnectClient))
|
type OnePasswordItemReconciler struct {
|
||||||
|
Client kubeClient.Client
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
OpConnectClient connect.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
func newReconciler(mgr manager.Manager, opConnectClient connect.Client) *ReconcileOnePasswordItem {
|
//+kubebuilder:rbac:groups=onepassword.onepassword.com,resources=onepassworditems,verbs=get;list;watch;create;update;patch;delete
|
||||||
return &ReconcileOnePasswordItem{
|
//+kubebuilder:rbac:groups=onepassword.onepassword.com,resources=onepassworditems/status,verbs=get;update;patch
|
||||||
kubeClient: mgr.GetClient(),
|
//+kubebuilder:rbac:groups=onepassword.onepassword.com,resources=onepassworditems/finalizers,verbs=update
|
||||||
scheme: mgr.GetScheme(),
|
|
||||||
opConnectClient: opConnectClient,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||||
c, err := controller.New("onepassworditem-controller", mgr, controller.Options{Reconciler: r})
|
// move the current state of the cluster closer to the desired state.
|
||||||
if err != nil {
|
// TODO(user): Modify the Reconcile function to compare the state specified by
|
||||||
return err
|
// the OnePasswordItem object against the actual cluster state, and then
|
||||||
}
|
// perform operations to make the cluster state reflect the state specified by
|
||||||
|
// the user.
|
||||||
// Watch for changes to primary resource OnePasswordItem
|
//
|
||||||
err = c.Watch(&source.Kind{Type: &onepasswordv1.OnePasswordItem{}}, &handler.EnqueueRequestForObject{})
|
// For more details, check Reconcile and its Result here:
|
||||||
if err != nil {
|
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.0/pkg/reconcile
|
||||||
return err
|
func (r *OnePasswordItemReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ reconcile.Reconciler = &ReconcileOnePasswordItem{}
|
|
||||||
|
|
||||||
type ReconcileOnePasswordItem struct {
|
|
||||||
kubeClient kubeClient.Client
|
|
||||||
scheme *runtime.Scheme
|
|
||||||
opConnectClient connect.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ReconcileOnePasswordItem) SetupWithManager(mgr ctrl.Manager) error {
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
|
||||||
For(&onepasswordv1.OnePasswordItem{}).
|
|
||||||
Complete(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ReconcileOnePasswordItem) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
|
||||||
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||||
reqLogger.Info("Reconciling OnePasswordItem")
|
reqLogger.Info("Reconciling OnePasswordItem")
|
||||||
|
|
||||||
onepassworditem := &onepasswordv1.OnePasswordItem{}
|
onepassworditem := &onepasswordv1.OnePasswordItem{}
|
||||||
err := r.kubeClient.Get(context.Background(), request.NamespacedName, onepassworditem)
|
err := r.Client.Get(context.Background(), request.NamespacedName, onepassworditem)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
return reconcile.Result{}, nil
|
return reconcile.Result{}, nil
|
||||||
@@ -90,7 +86,7 @@ func (r *ReconcileOnePasswordItem) Reconcile(request reconcile.Request) (reconci
|
|||||||
// This is so we can handle cleanup of associated secrets properly
|
// This is so we can handle cleanup of associated secrets properly
|
||||||
if !utils.ContainsString(onepassworditem.ObjectMeta.Finalizers, finalizer) {
|
if !utils.ContainsString(onepassworditem.ObjectMeta.Finalizers, finalizer) {
|
||||||
onepassworditem.ObjectMeta.Finalizers = append(onepassworditem.ObjectMeta.Finalizers, finalizer)
|
onepassworditem.ObjectMeta.Finalizers = append(onepassworditem.ObjectMeta.Finalizers, finalizer)
|
||||||
if err := r.kubeClient.Update(context.Background(), onepassworditem); err != nil {
|
if err := r.Client.Update(context.Background(), onepassworditem); err != nil {
|
||||||
return reconcile.Result{}, err
|
return reconcile.Result{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -117,21 +113,41 @@ func (r *ReconcileOnePasswordItem) Reconcile(request reconcile.Request) (reconci
|
|||||||
return reconcile.Result{}, nil
|
return reconcile.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ReconcileOnePasswordItem) removeFinalizer(onePasswordItem *onepasswordv1.OnePasswordItem) error {
|
// SetupWithManager sets up the controller with the Manager.
|
||||||
|
func (r *OnePasswordItemReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
c, err := controller.New("onepassworditem-controller", mgr, controller.Options{Reconciler: r})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch for changes to primary resource OnePasswordItem
|
||||||
|
err = c.Watch(&source.Kind{Type: &onepasswordv1.OnePasswordItem{}}, &handler.EnqueueRequestForObject{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
// TODO Consider the simplified code below. Based on the migration guide: https://sdk.operatorframework.io/docs/building-operators/golang/migration/#create-a-new-project
|
||||||
|
// return ctrl.NewControllerManagedBy(mgr).Named("onepassworditem-controller").WithOptions(controller.Options{Reconciler: r}).
|
||||||
|
// For(&onepasswordv1.OnePasswordItem{}).Watches(&source.Kind{Type: &onepasswordv1.OnePasswordItem{}}, &handler.EnqueueRequestForObject{}).
|
||||||
|
// Complete(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *OnePasswordItemReconciler) removeFinalizer(onePasswordItem *onepasswordv1.OnePasswordItem) error {
|
||||||
onePasswordItem.ObjectMeta.Finalizers = utils.RemoveString(onePasswordItem.ObjectMeta.Finalizers, finalizer)
|
onePasswordItem.ObjectMeta.Finalizers = utils.RemoveString(onePasswordItem.ObjectMeta.Finalizers, finalizer)
|
||||||
if err := r.kubeClient.Update(context.Background(), onePasswordItem); err != nil {
|
if err := r.Client.Update(context.Background(), onePasswordItem); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ReconcileOnePasswordItem) cleanupKubernetesSecret(onePasswordItem *onepasswordv1.OnePasswordItem) error {
|
func (r *OnePasswordItemReconciler) cleanupKubernetesSecret(onePasswordItem *onepasswordv1.OnePasswordItem) error {
|
||||||
kubernetesSecret := &corev1.Secret{}
|
kubernetesSecret := &corev1.Secret{}
|
||||||
kubernetesSecret.ObjectMeta.Name = onePasswordItem.Name
|
kubernetesSecret.ObjectMeta.Name = onePasswordItem.Name
|
||||||
kubernetesSecret.ObjectMeta.Namespace = onePasswordItem.Namespace
|
kubernetesSecret.ObjectMeta.Namespace = onePasswordItem.Namespace
|
||||||
|
|
||||||
r.kubeClient.Delete(context.Background(), kubernetesSecret)
|
r.Client.Delete(context.Background(), kubernetesSecret)
|
||||||
if err := r.kubeClient.Delete(context.Background(), kubernetesSecret); err != nil {
|
if err := r.Client.Delete(context.Background(), kubernetesSecret); err != nil {
|
||||||
if !errors.IsNotFound(err) {
|
if !errors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -139,24 +155,20 @@ func (r *ReconcileOnePasswordItem) cleanupKubernetesSecret(onePasswordItem *onep
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ReconcileOnePasswordItem) removeOnePasswordFinalizerFromOnePasswordItem(opSecret *onepasswordv1.OnePasswordItem) error {
|
func (r *OnePasswordItemReconciler) HandleOnePasswordItem(resource *onepasswordv1.OnePasswordItem, request reconcile.Request) error {
|
||||||
opSecret.ObjectMeta.Finalizers = utils.RemoveString(opSecret.ObjectMeta.Finalizers, finalizer)
|
|
||||||
return r.kubeClient.Update(context.Background(), opSecret)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ReconcileOnePasswordItem) HandleOnePasswordItem(resource *onepasswordv1.OnePasswordItem, request reconcile.Request) error {
|
|
||||||
secretName := resource.GetName()
|
secretName := resource.GetName()
|
||||||
labels := resource.Labels
|
labels := resource.Labels
|
||||||
|
annotations := resource.Annotations
|
||||||
secretType := resource.Type
|
secretType := resource.Type
|
||||||
autoRestart := resource.Annotations[op.RestartDeploymentsAnnotation]
|
autoRestart := annotations[op.RestartDeploymentsAnnotation]
|
||||||
|
|
||||||
item, err := onepassword.GetOnePasswordItemByPath(r.opConnectClient, resource.Spec.ItemPath)
|
item, err := onepassword.GetOnePasswordItemByPath(r.OpConnectClient, resource.Spec.ItemPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to retrieve item: %v", err)
|
return fmt.Errorf("Failed to retrieve item: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create owner reference.
|
// Create owner reference.
|
||||||
gvk, err := apiutil.GVKForObject(resource, r.scheme)
|
gvk, err := apiutil.GVKForObject(resource, r.Scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not to retrieve group version kind: %v", err)
|
return fmt.Errorf("could not to retrieve group version kind: %v", err)
|
||||||
}
|
}
|
||||||
@@ -167,5 +179,5 @@ func (r *ReconcileOnePasswordItem) HandleOnePasswordItem(resource *onepasswordv1
|
|||||||
UID: resource.GetUID(),
|
UID: resource.GetUID(),
|
||||||
}
|
}
|
||||||
|
|
||||||
return kubeSecrets.CreateKubernetesSecretFromItem(r.kubeClient, secretName, resource.Namespace, item, autoRestart, labels, secretType, ownerRef)
|
return kubeSecrets.CreateKubernetesSecretFromItem(r.Client, secretName, resource.Namespace, item, autoRestart, labels, secretType, annotations, ownerRef)
|
||||||
}
|
}
|
80
controllers/suite_test.go
Normal file
80
controllers/suite_test.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||||
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||||
|
|
||||||
|
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
|
||||||
|
//+kubebuilder:scaffold:imports
|
||||||
|
)
|
||||||
|
|
||||||
|
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||||
|
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||||
|
|
||||||
|
var cfg *rest.Config
|
||||||
|
var k8sClient client.Client
|
||||||
|
var testEnv *envtest.Environment
|
||||||
|
|
||||||
|
func TestAPIs(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
|
||||||
|
RunSpecsWithDefaultAndCustomReporters(t,
|
||||||
|
"Controller Suite",
|
||||||
|
[]Reporter{printer.NewlineReporter{}})
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = BeforeSuite(func() {
|
||||||
|
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||||
|
|
||||||
|
By("bootstrapping test environment")
|
||||||
|
testEnv = &envtest.Environment{
|
||||||
|
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||||
|
ErrorIfCRDPathMissing: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := testEnv.Start()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(cfg).NotTo(BeNil())
|
||||||
|
|
||||||
|
err = onepasswordv1.AddToScheme(scheme.Scheme)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
//+kubebuilder:scaffold:scheme
|
||||||
|
|
||||||
|
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(k8sClient).NotTo(BeNil())
|
||||||
|
|
||||||
|
}, 60)
|
||||||
|
|
||||||
|
var _ = AfterSuite(func() {
|
||||||
|
By("tearing down the test environment")
|
||||||
|
err := testEnv.Stop()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
84
go.mod
84
go.mod
@@ -1,21 +1,81 @@
|
|||||||
module github.com/1Password/onepassword-operator
|
module github.com/1Password/onepassword-operator
|
||||||
|
|
||||||
go 1.13
|
go 1.17
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/1Password/connect-sdk-go v1.2.0
|
github.com/1Password/connect-sdk-go v1.2.0
|
||||||
github.com/operator-framework/operator-sdk v0.19.0
|
github.com/onsi/ginkgo v1.16.5
|
||||||
github.com/prometheus/common v0.14.0 // indirect
|
github.com/onsi/gomega v1.17.0
|
||||||
github.com/spf13/pflag v1.0.5
|
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.0
|
||||||
k8s.io/api v0.18.2
|
k8s.io/api v0.23.5
|
||||||
k8s.io/apimachinery v0.18.2
|
k8s.io/apimachinery v0.23.5
|
||||||
k8s.io/client-go v12.0.0+incompatible
|
k8s.io/client-go v0.23.5
|
||||||
k8s.io/kubectl v0.18.2
|
k8s.io/kubectl v0.23.5
|
||||||
sigs.k8s.io/controller-runtime v0.6.0
|
sigs.k8s.io/controller-runtime v0.11.0
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
require (
|
||||||
github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.2+incompatible // Required by OLM
|
cloud.google.com/go v0.81.0 // indirect
|
||||||
k8s.io/client-go => k8s.io/client-go v0.18.2 // Required by prometheus-operator
|
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||||
|
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
||||||
|
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||||
|
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||||
|
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||||
|
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||||
|
github.com/go-logr/logr v1.2.0 // indirect
|
||||||
|
github.com/go-logr/zapr v1.2.0 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
|
github.com/google/go-cmp v0.5.5 // indirect
|
||||||
|
github.com/google/gofuzz v1.1.0 // indirect
|
||||||
|
github.com/google/uuid v1.1.2 // indirect
|
||||||
|
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||||
|
github.com/imdario/mergo v0.3.12 // indirect
|
||||||
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
|
github.com/nxadm/tail v1.4.8 // indirect
|
||||||
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/prometheus/client_golang v1.11.0 // indirect
|
||||||
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
|
github.com/prometheus/common v0.28.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.6.0 // indirect
|
||||||
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
|
github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
|
||||||
|
github.com/uber/jaeger-lib v2.4.0+incompatible // indirect
|
||||||
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
|
go.uber.org/multierr v1.6.0 // indirect
|
||||||
|
go.uber.org/zap v1.19.1 // indirect
|
||||||
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
||||||
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||||
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||||
|
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect
|
||||||
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||||
|
golang.org/x/text v0.3.7 // indirect
|
||||||
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||||
|
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||||
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
|
google.golang.org/protobuf v1.27.1 // indirect
|
||||||
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
|
k8s.io/apiextensions-apiserver v0.23.0 // indirect
|
||||||
|
k8s.io/component-base v0.23.5 // indirect
|
||||||
|
k8s.io/klog/v2 v2.30.0 // indirect
|
||||||
|
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||||
|
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||||
|
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||||
|
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||||
)
|
)
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
Copyright 2011-2016 Canonical Ltd.
|
/*
|
||||||
|
Copyright 2022.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -11,3 +12,4 @@ distributed under the License is distributed on an "AS IS" BASIS,
|
|||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
|
*/
|
254
main.go
Normal file
254
main.go
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/1Password/connect-sdk-go/connect"
|
||||||
|
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
||||||
|
"github.com/1Password/onepassword-operator/pkg/utils"
|
||||||
|
"github.com/1Password/onepassword-operator/version"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||||
|
|
||||||
|
// sdkVersion "github.com/operator-framework/operator-sdk/version"
|
||||||
|
|
||||||
|
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||||
|
// to ensure that exec-entrypoint and run can make use of them.
|
||||||
|
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||||
|
|
||||||
|
k8sruntime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||||
|
|
||||||
|
onepasswordv1 "github.com/1Password/onepassword-operator/api/v1"
|
||||||
|
"github.com/1Password/onepassword-operator/controllers"
|
||||||
|
//+kubebuilder:scaffold:imports
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
scheme = k8sruntime.NewScheme()
|
||||||
|
setupLog = ctrl.Log.WithName("setup")
|
||||||
|
WatchNamespaceEnvVar = "WATCH_NAMESPACE"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||||
|
|
||||||
|
utilruntime.Must(onepasswordv1.AddToScheme(scheme))
|
||||||
|
//+kubebuilder:scaffold:scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
func printVersion() {
|
||||||
|
setupLog.Info(fmt.Sprintf("Operator Version: %s", version.Version))
|
||||||
|
setupLog.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
|
||||||
|
setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
|
||||||
|
// TODO figure out how to get operator-sdk version
|
||||||
|
// setupLog.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var metricsAddr string
|
||||||
|
var enableLeaderElection bool
|
||||||
|
var probeAddr string
|
||||||
|
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
|
||||||
|
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
||||||
|
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
|
||||||
|
"Enable leader election for controller manager. "+
|
||||||
|
"Enabling this will ensure there is only one active controller manager.")
|
||||||
|
opts := zap.Options{
|
||||||
|
Development: true,
|
||||||
|
}
|
||||||
|
opts.BindFlags(flag.CommandLine)
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||||
|
|
||||||
|
printVersion()
|
||||||
|
|
||||||
|
namespace := os.Getenv(WatchNamespaceEnvVar)
|
||||||
|
|
||||||
|
options := ctrl.Options{
|
||||||
|
Scheme: scheme,
|
||||||
|
Namespace: namespace,
|
||||||
|
MetricsBindAddress: metricsAddr,
|
||||||
|
Port: 9443,
|
||||||
|
HealthProbeBindAddress: probeAddr,
|
||||||
|
LeaderElection: enableLeaderElection,
|
||||||
|
LeaderElectionID: "c26807fd.onepassword.com",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
|
||||||
|
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
|
||||||
|
// Also note that you may face performance issues when using this with a high number of namespaces.
|
||||||
|
if strings.Contains(namespace, ",") {
|
||||||
|
options.Namespace = ""
|
||||||
|
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ","))
|
||||||
|
}
|
||||||
|
|
||||||
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), options)
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "unable to start manager")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup One Password Client
|
||||||
|
opConnectClient, err := connect.NewClientFromEnvironment()
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "failed to create 1Password client")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = (&controllers.OnePasswordItemReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
OpConnectClient: opConnectClient,
|
||||||
|
}).SetupWithManager(mgr); err != nil {
|
||||||
|
setupLog.Error(err, "unable to create controller", "controller", "OnePasswordItem")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
//+kubebuilder:scaffold:builder
|
||||||
|
|
||||||
|
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||||
|
setupLog.Error(err, "unable to set up health check")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||||
|
setupLog.Error(err, "unable to set up ready check")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
setupLog.Info("starting manager")
|
||||||
|
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||||
|
setupLog.Error(err, "problem running manager")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
deploymentNamespace, err := utils.GetOperatorNamespace()
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "Failed to get namespace")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Setup 1PasswordConnect
|
||||||
|
if shouldManageConnect() {
|
||||||
|
setupLog.Info("Automated Connect Management Enabled")
|
||||||
|
go func() {
|
||||||
|
connectStarted := false
|
||||||
|
for !connectStarted {
|
||||||
|
err := op.SetupConnect(mgr.GetClient(), deploymentNamespace)
|
||||||
|
// Cache Not Started is an acceptable error. Retry until cache is started.
|
||||||
|
if err != nil && !errors.Is(err, &cache.ErrCacheNotStarted{}) {
|
||||||
|
setupLog.Error(err, "")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
connectStarted = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
} else {
|
||||||
|
setupLog.Info("Automated Connect Management Disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Configure Metrics Service. See: https://sdk.operatorframework.io/docs/building-operators/golang/migration/#export-metrics
|
||||||
|
|
||||||
|
// Setup update secrets task
|
||||||
|
updatedSecretsPoller := op.NewManager(mgr.GetClient(), opConnectClient, shouldAutoRestartDeployments())
|
||||||
|
done := make(chan bool)
|
||||||
|
ticker := time.NewTicker(getPollingIntervalForUpdatingSecrets())
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
ticker.Stop()
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
err := updatedSecretsPoller.UpdateKubernetesSecretsTask()
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "error running update kubernetes secret task")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Start the Cmd
|
||||||
|
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
|
||||||
|
setupLog.Error(err, "Manager exited non-zero")
|
||||||
|
done <- true
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
const manageConnect = "MANAGE_CONNECT"
|
||||||
|
|
||||||
|
func shouldManageConnect() bool {
|
||||||
|
shouldManageConnect, found := os.LookupEnv(manageConnect)
|
||||||
|
if found {
|
||||||
|
shouldManageConnectBool, err := strconv.ParseBool(strings.ToLower(shouldManageConnect))
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
return shouldManageConnectBool
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
const envPollingIntervalVariable = "POLLING_INTERVAL"
|
||||||
|
const defaultPollingInterval = 600
|
||||||
|
|
||||||
|
func getPollingIntervalForUpdatingSecrets() time.Duration {
|
||||||
|
timeInSecondsString, found := os.LookupEnv(envPollingIntervalVariable)
|
||||||
|
if found {
|
||||||
|
timeInSeconds, err := strconv.Atoi(timeInSecondsString)
|
||||||
|
if err == nil {
|
||||||
|
return time.Duration(timeInSeconds) * time.Second
|
||||||
|
}
|
||||||
|
setupLog.Info("Invalid value set for polling interval. Must be a valid integer.")
|
||||||
|
}
|
||||||
|
|
||||||
|
setupLog.Info(fmt.Sprintf("Using default polling interval of %v seconds", defaultPollingInterval))
|
||||||
|
return time.Duration(defaultPollingInterval) * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
const restartDeploymentsEnvVariable = "AUTO_RESTART"
|
||||||
|
|
||||||
|
func shouldAutoRestartDeployments() bool {
|
||||||
|
shouldAutoRestartDeployments, found := os.LookupEnv(restartDeploymentsEnvVariable)
|
||||||
|
if found {
|
||||||
|
shouldAutoRestartDeploymentsBool, err := strconv.ParseBool(strings.ToLower(shouldAutoRestartDeployments))
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
return shouldAutoRestartDeploymentsBool
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
@@ -1,10 +0,0 @@
|
|||||||
package apis
|
|
||||||
|
|
||||||
import (
|
|
||||||
v1 "github.com/1Password/onepassword-operator/pkg/apis/onepassword/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
|
|
||||||
AddToSchemes = append(AddToSchemes, v1.SchemeBuilder.AddToScheme)
|
|
||||||
}
|
|
@@ -1,13 +0,0 @@
|
|||||||
package apis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AddToSchemes may be used to add all resources defined in the project to a Scheme
|
|
||||||
var AddToSchemes runtime.SchemeBuilder
|
|
||||||
|
|
||||||
// AddToScheme adds all Resources to the Scheme
|
|
||||||
func AddToScheme(s *runtime.Scheme) error {
|
|
||||||
return AddToSchemes.AddToScheme(s)
|
|
||||||
}
|
|
@@ -1,6 +0,0 @@
|
|||||||
// Package onepassword contains onepassword API versions.
|
|
||||||
//
|
|
||||||
// This file ensures Go source parsers acknowledge the onepassword package
|
|
||||||
// and any child packages. It can be removed if any other Go source files are
|
|
||||||
// added to this package.
|
|
||||||
package onepassword
|
|
@@ -1,4 +0,0 @@
|
|||||||
// Package v1 contains API Schema definitions for the onepassword v1 API group
|
|
||||||
// +k8s:deepcopy-gen=package,register
|
|
||||||
// +groupName=onepassword.com
|
|
||||||
package v1
|
|
@@ -1,19 +0,0 @@
|
|||||||
// NOTE: Boilerplate only. Ignore this file.
|
|
||||||
|
|
||||||
// Package v1 contains API Schema definitions for the onepassword v1 API group
|
|
||||||
// +k8s:deepcopy-gen=package,register
|
|
||||||
// +groupName=onepassword.com
|
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// SchemeGroupVersion is group version used to register these objects
|
|
||||||
SchemeGroupVersion = schema.GroupVersion{Group: "onepassword.com", Version: "v1"}
|
|
||||||
|
|
||||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
|
||||||
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
|
|
||||||
)
|
|
@@ -1,10 +0,0 @@
|
|||||||
package controller
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/controller/deployment"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
|
|
||||||
AddToManagerFuncs = append(AddToManagerFuncs, deployment.Add)
|
|
||||||
}
|
|
@@ -1,10 +0,0 @@
|
|||||||
package controller
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/controller/onepassworditem"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
|
|
||||||
AddToManagerFuncs = append(AddToManagerFuncs, onepassworditem.Add)
|
|
||||||
}
|
|
@@ -1,19 +0,0 @@
|
|||||||
package controller
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/1Password/connect-sdk-go/connect"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AddToManagerFuncs is a list of functions to add all Controllers to the Manager
|
|
||||||
var AddToManagerFuncs []func(manager.Manager, connect.Client) error
|
|
||||||
|
|
||||||
// AddToManager adds all Controllers to the Manager
|
|
||||||
func AddToManager(m manager.Manager, opConnectClient connect.Client) error {
|
|
||||||
for _, f := range AddToManagerFuncs {
|
|
||||||
if err := f(m, opConnectClient); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
@@ -1,481 +0,0 @@
|
|||||||
package deployment
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/mocks"
|
|
||||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/onepassword"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
errors2 "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/kubectl/pkg/scheme"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
deploymentKind = "Deployment"
|
|
||||||
deploymentAPIVersion = "v1"
|
|
||||||
name = "test-deployment"
|
|
||||||
namespace = "default"
|
|
||||||
vaultId = "hfnjvi6aymbsnfc2xeeoheizda"
|
|
||||||
itemId = "nwrhuano7bcwddcviubpp4mhfq"
|
|
||||||
username = "test-user"
|
|
||||||
password = "QmHumKc$mUeEem7caHtbaBaJ"
|
|
||||||
userKey = "username"
|
|
||||||
passKey = "password"
|
|
||||||
version = 123
|
|
||||||
)
|
|
||||||
|
|
||||||
type testReconcileItem struct {
|
|
||||||
testName string
|
|
||||||
deploymentResource *appsv1.Deployment
|
|
||||||
existingSecret *corev1.Secret
|
|
||||||
expectedError error
|
|
||||||
expectedResultSecret *corev1.Secret
|
|
||||||
expectedEvents []string
|
|
||||||
opItem map[string]string
|
|
||||||
existingDeployment *appsv1.Deployment
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
expectedSecretData = map[string][]byte{
|
|
||||||
"password": []byte(password),
|
|
||||||
"username": []byte(username),
|
|
||||||
}
|
|
||||||
itemPath = fmt.Sprintf("vaults/%v/items/%v", vaultId, itemId)
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
time = metav1.Now()
|
|
||||||
regex, _ = regexp.Compile(annotationRegExpString)
|
|
||||||
)
|
|
||||||
|
|
||||||
var tests = []testReconcileItem{
|
|
||||||
{
|
|
||||||
testName: "Test Delete Deployment where secret is being used in another deployment's volumes",
|
|
||||||
deploymentResource: &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
DeletionTimestamp: &time,
|
|
||||||
Finalizers: []string{
|
|
||||||
finalizer,
|
|
||||||
},
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
op.NameAnnotation: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingDeployment: &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "another-deployment",
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
op.NameAnnotation: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Template: corev1.PodTemplateSpec{
|
|
||||||
Spec: corev1.PodSpec{
|
|
||||||
Volumes: []corev1.Volume{
|
|
||||||
{
|
|
||||||
Name: name,
|
|
||||||
VolumeSource: corev1.VolumeSource{
|
|
||||||
Secret: &corev1.SecretVolumeSource{
|
|
||||||
SecretName: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Test Delete Deployment where secret is being used in another deployment's container",
|
|
||||||
deploymentResource: &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
DeletionTimestamp: &time,
|
|
||||||
Finalizers: []string{
|
|
||||||
finalizer,
|
|
||||||
},
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
op.NameAnnotation: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingDeployment: &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "another-deployment",
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
op.NameAnnotation: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Template: corev1.PodTemplateSpec{
|
|
||||||
Spec: corev1.PodSpec{
|
|
||||||
Containers: []corev1.Container{
|
|
||||||
{
|
|
||||||
Env: []corev1.EnvVar{
|
|
||||||
{
|
|
||||||
Name: name,
|
|
||||||
ValueFrom: &corev1.EnvVarSource{
|
|
||||||
SecretKeyRef: &corev1.SecretKeySelector{
|
|
||||||
LocalObjectReference: corev1.LocalObjectReference{
|
|
||||||
Name: name,
|
|
||||||
},
|
|
||||||
Key: passKey,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Test Delete Deployment",
|
|
||||||
deploymentResource: &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
DeletionTimestamp: &time,
|
|
||||||
Finalizers: []string{
|
|
||||||
finalizer,
|
|
||||||
},
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
op.NameAnnotation: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: nil,
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Test Do not update if Annotations have not changed",
|
|
||||||
deploymentResource: &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
op.NameAnnotation: name,
|
|
||||||
},
|
|
||||||
Labels: map[string]string{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
},
|
|
||||||
Labels: map[string]string(nil),
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: "data we don't expect to have updated",
|
|
||||||
passKey: "data we don't expect to have updated",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Test Updating Existing Kubernetes Secret using Deployment",
|
|
||||||
deploymentResource: &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
op.NameAnnotation: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: "456",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Type: corev1.SecretType(""),
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Type: corev1.SecretType(""),
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Create Deployment",
|
|
||||||
deploymentResource: &appsv1.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: deploymentKind,
|
|
||||||
APIVersion: deploymentAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
op.NameAnnotation: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: nil,
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Type: corev1.SecretType(""),
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReconcileDeployment(t *testing.T) {
|
|
||||||
for _, testData := range tests {
|
|
||||||
t.Run(testData.testName, func(t *testing.T) {
|
|
||||||
|
|
||||||
// Register operator types with the runtime scheme.
|
|
||||||
s := scheme.Scheme
|
|
||||||
s.AddKnownTypes(appsv1.SchemeGroupVersion, testData.deploymentResource)
|
|
||||||
|
|
||||||
// Objects to track in the fake client.
|
|
||||||
objs := []runtime.Object{
|
|
||||||
testData.deploymentResource,
|
|
||||||
}
|
|
||||||
|
|
||||||
if testData.existingSecret != nil {
|
|
||||||
objs = append(objs, testData.existingSecret)
|
|
||||||
}
|
|
||||||
|
|
||||||
if testData.existingDeployment != nil {
|
|
||||||
objs = append(objs, testData.existingDeployment)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a fake client to mock API calls.
|
|
||||||
cl := fake.NewFakeClientWithScheme(s, objs...)
|
|
||||||
// Create a Deployment object with the scheme and mock kubernetes
|
|
||||||
// and 1Password Connect client.
|
|
||||||
|
|
||||||
opConnectClient := &mocks.TestClient{}
|
|
||||||
mocks.GetGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
|
|
||||||
item := onepassword.Item{}
|
|
||||||
item.Fields = generateFields(testData.opItem["username"], testData.opItem["password"])
|
|
||||||
item.Version = version
|
|
||||||
item.Vault.ID = vaultUUID
|
|
||||||
item.ID = uuid
|
|
||||||
return &item, nil
|
|
||||||
}
|
|
||||||
r := &ReconcileDeployment{
|
|
||||||
kubeClient: cl,
|
|
||||||
scheme: s,
|
|
||||||
opConnectClient: opConnectClient,
|
|
||||||
opAnnotationRegExp: regex,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mock request to simulate Reconcile() being called on an event for a
|
|
||||||
// watched resource .
|
|
||||||
req := reconcile.Request{
|
|
||||||
NamespacedName: types.NamespacedName{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err := r.Reconcile(req)
|
|
||||||
|
|
||||||
assert.Equal(t, testData.expectedError, err)
|
|
||||||
|
|
||||||
var expectedSecretName string
|
|
||||||
if testData.expectedResultSecret == nil {
|
|
||||||
expectedSecretName = testData.deploymentResource.Name
|
|
||||||
} else {
|
|
||||||
expectedSecretName = testData.expectedResultSecret.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if Secret has been created and has the correct data
|
|
||||||
secret := &corev1.Secret{}
|
|
||||||
err = cl.Get(context.TODO(), types.NamespacedName{Name: expectedSecretName, Namespace: namespace}, secret)
|
|
||||||
|
|
||||||
if testData.expectedResultSecret == nil {
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.True(t, errors2.IsNotFound(err))
|
|
||||||
} else {
|
|
||||||
assert.Equal(t, testData.expectedResultSecret.Data, secret.Data)
|
|
||||||
assert.Equal(t, testData.expectedResultSecret.Name, secret.Name)
|
|
||||||
assert.Equal(t, testData.expectedResultSecret.Type, secret.Type)
|
|
||||||
assert.Equal(t, testData.expectedResultSecret.Annotations[op.VersionAnnotation], secret.Annotations[op.VersionAnnotation])
|
|
||||||
|
|
||||||
updatedCR := &appsv1.Deployment{}
|
|
||||||
err = cl.Get(context.TODO(), req.NamespacedName, updatedCR)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateFields(username, password string) []*onepassword.ItemField {
|
|
||||||
fields := []*onepassword.ItemField{
|
|
||||||
{
|
|
||||||
Label: "username",
|
|
||||||
Value: username,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Label: "password",
|
|
||||||
Value: password,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return fields
|
|
||||||
}
|
|
@@ -1,539 +0,0 @@
|
|||||||
package onepassworditem
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
|
||||||
"github.com/1Password/onepassword-operator/pkg/mocks"
|
|
||||||
op "github.com/1Password/onepassword-operator/pkg/onepassword"
|
|
||||||
|
|
||||||
onepasswordv1 "github.com/1Password/onepassword-operator/pkg/apis/onepassword/v1"
|
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/onepassword"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
errors2 "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/kubectl/pkg/scheme"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
onePasswordItemKind = "OnePasswordItem"
|
|
||||||
onePasswordItemAPIVersion = "onepassword.com/v1"
|
|
||||||
name = "test"
|
|
||||||
namespace = "default"
|
|
||||||
vaultId = "hfnjvi6aymbsnfc2xeeoheizda"
|
|
||||||
itemId = "nwrhuano7bcwddcviubpp4mhfq"
|
|
||||||
username = "test-user"
|
|
||||||
password = "QmHumKc$mUeEem7caHtbaBaJ"
|
|
||||||
firstHost = "http://localhost:8080"
|
|
||||||
awsKey = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
|
||||||
iceCream = "freezing blue 20%"
|
|
||||||
userKey = "username"
|
|
||||||
passKey = "password"
|
|
||||||
version = 123
|
|
||||||
)
|
|
||||||
|
|
||||||
type testReconcileItem struct {
|
|
||||||
testName string
|
|
||||||
customResource *onepasswordv1.OnePasswordItem
|
|
||||||
existingSecret *corev1.Secret
|
|
||||||
expectedError error
|
|
||||||
expectedResultSecret *corev1.Secret
|
|
||||||
expectedEvents []string
|
|
||||||
opItem map[string]string
|
|
||||||
existingOnePasswordItem *onepasswordv1.OnePasswordItem
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
expectedSecretData = map[string][]byte{
|
|
||||||
"password": []byte(password),
|
|
||||||
"username": []byte(username),
|
|
||||||
}
|
|
||||||
itemPath = fmt.Sprintf("vaults/%v/items/%v", vaultId, itemId)
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
time = metav1.Now()
|
|
||||||
)
|
|
||||||
|
|
||||||
var tests = []testReconcileItem{
|
|
||||||
{
|
|
||||||
testName: "Test Delete OnePasswordItem",
|
|
||||||
customResource: &onepasswordv1.OnePasswordItem{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: onePasswordItemKind,
|
|
||||||
APIVersion: onePasswordItemAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
DeletionTimestamp: &time,
|
|
||||||
Finalizers: []string{
|
|
||||||
finalizer,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: itemPath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: nil,
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Test Do not update if OnePassword Version or VaultPath has not changed",
|
|
||||||
customResource: &onepasswordv1.OnePasswordItem{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: onePasswordItemKind,
|
|
||||||
APIVersion: onePasswordItemAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
},
|
|
||||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: itemPath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: "data we don't expect to have updated",
|
|
||||||
passKey: "data we don't expect to have updated",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Test Updating Existing Kubernetes Secret using OnePasswordItem",
|
|
||||||
customResource: &onepasswordv1.OnePasswordItem{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: onePasswordItemKind,
|
|
||||||
APIVersion: onePasswordItemAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
},
|
|
||||||
Labels: map[string]string{},
|
|
||||||
},
|
|
||||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: itemPath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: "456",
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
},
|
|
||||||
Labels: map[string]string{},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
},
|
|
||||||
Labels: map[string]string{},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Test Updating Type of Existing Kubernetes Secret using OnePasswordItem",
|
|
||||||
customResource: &onepasswordv1.OnePasswordItem{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: onePasswordItemKind,
|
|
||||||
APIVersion: onePasswordItemAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
},
|
|
||||||
Labels: map[string]string{},
|
|
||||||
},
|
|
||||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: itemPath,
|
|
||||||
},
|
|
||||||
Type: string(corev1.SecretTypeBasicAuth),
|
|
||||||
},
|
|
||||||
existingSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
},
|
|
||||||
Labels: map[string]string{},
|
|
||||||
},
|
|
||||||
Type: corev1.SecretTypeBasicAuth,
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
op.ItemPathAnnotation: itemPath,
|
|
||||||
},
|
|
||||||
Labels: map[string]string{},
|
|
||||||
},
|
|
||||||
Type: corev1.SecretTypeBasicAuth,
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Custom secret type",
|
|
||||||
customResource: &onepasswordv1.OnePasswordItem{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: onePasswordItemKind,
|
|
||||||
APIVersion: onePasswordItemAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
},
|
|
||||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: itemPath,
|
|
||||||
},
|
|
||||||
Type: "custom",
|
|
||||||
},
|
|
||||||
existingSecret: nil,
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Type: corev1.SecretType("custom"),
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Error if secret type is changed",
|
|
||||||
customResource: &onepasswordv1.OnePasswordItem{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: onePasswordItemKind,
|
|
||||||
APIVersion: onePasswordItemAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
},
|
|
||||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: itemPath,
|
|
||||||
},
|
|
||||||
Type: "custom",
|
|
||||||
},
|
|
||||||
existingSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Type: corev1.SecretTypeOpaque,
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
expectedError: kubernetessecrets.ErrCannotUpdateSecretType,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Type: corev1.SecretTypeOpaque,
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Secret from 1Password item with invalid K8s labels",
|
|
||||||
customResource: &onepasswordv1.OnePasswordItem{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: onePasswordItemKind,
|
|
||||||
APIVersion: onePasswordItemAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "!my sECReT it3m%",
|
|
||||||
Namespace: namespace,
|
|
||||||
},
|
|
||||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: itemPath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: nil,
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "my-secret-it3m",
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: expectedSecretData,
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Secret from 1Password item with fields and sections that have invalid K8s labels",
|
|
||||||
customResource: &onepasswordv1.OnePasswordItem{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: onePasswordItemKind,
|
|
||||||
APIVersion: onePasswordItemAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "!my sECReT it3m%",
|
|
||||||
Namespace: namespace,
|
|
||||||
},
|
|
||||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: itemPath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: nil,
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "my-secret-it3m",
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: map[string][]byte{
|
|
||||||
"password": []byte(password),
|
|
||||||
"username": []byte(username),
|
|
||||||
"first-host": []byte(firstHost),
|
|
||||||
"AWS-Access-Key": []byte(awsKey),
|
|
||||||
"ice-cream-type": []byte(iceCream),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
"first host": firstHost,
|
|
||||||
"AWS Access Key": awsKey,
|
|
||||||
"😄 ice-cream type": iceCream,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testName: "Secret from 1Password item with `-`, `_` and `.`",
|
|
||||||
customResource: &onepasswordv1.OnePasswordItem{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: onePasswordItemKind,
|
|
||||||
APIVersion: onePasswordItemAPIVersion,
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "!.my_sECReT.it3m%-_",
|
|
||||||
Namespace: namespace,
|
|
||||||
},
|
|
||||||
Spec: onepasswordv1.OnePasswordItemSpec{
|
|
||||||
ItemPath: itemPath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingSecret: nil,
|
|
||||||
expectedError: nil,
|
|
||||||
expectedResultSecret: &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "my-secret.it3m",
|
|
||||||
Namespace: namespace,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
op.VersionAnnotation: fmt.Sprint(version),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Data: map[string][]byte{
|
|
||||||
"password": []byte(password),
|
|
||||||
"username": []byte(username),
|
|
||||||
"first-host": []byte(firstHost),
|
|
||||||
"AWS-Access-Key": []byte(awsKey),
|
|
||||||
"-_ice_cream.type.": []byte(iceCream),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
opItem: map[string]string{
|
|
||||||
userKey: username,
|
|
||||||
passKey: password,
|
|
||||||
"first host": firstHost,
|
|
||||||
"AWS Access Key": awsKey,
|
|
||||||
"😄 -_ice_cream.type.": iceCream,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReconcileOnePasswordItem(t *testing.T) {
|
|
||||||
for _, testData := range tests {
|
|
||||||
t.Run(testData.testName, func(t *testing.T) {
|
|
||||||
|
|
||||||
// Register operator types with the runtime scheme.
|
|
||||||
s := scheme.Scheme
|
|
||||||
s.AddKnownTypes(onepasswordv1.SchemeGroupVersion, testData.customResource)
|
|
||||||
|
|
||||||
// Objects to track in the fake client.
|
|
||||||
objs := []runtime.Object{
|
|
||||||
testData.customResource,
|
|
||||||
}
|
|
||||||
|
|
||||||
if testData.existingSecret != nil {
|
|
||||||
objs = append(objs, testData.existingSecret)
|
|
||||||
}
|
|
||||||
|
|
||||||
if testData.existingOnePasswordItem != nil {
|
|
||||||
objs = append(objs, testData.existingOnePasswordItem)
|
|
||||||
}
|
|
||||||
// Create a fake client to mock API calls.
|
|
||||||
cl := fake.NewFakeClientWithScheme(s, objs...)
|
|
||||||
// Create a OnePasswordItem object with the scheme and mock kubernetes
|
|
||||||
// and 1Password Connect client.
|
|
||||||
|
|
||||||
opConnectClient := &mocks.TestClient{}
|
|
||||||
mocks.GetGetItemFunc = func(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
|
|
||||||
item := onepassword.Item{}
|
|
||||||
item.Fields = []*onepassword.ItemField{}
|
|
||||||
for k, v := range testData.opItem {
|
|
||||||
item.Fields = append(item.Fields, &onepassword.ItemField{Label: k, Value: v})
|
|
||||||
}
|
|
||||||
item.Version = version
|
|
||||||
item.Vault.ID = vaultUUID
|
|
||||||
item.ID = uuid
|
|
||||||
return &item, nil
|
|
||||||
}
|
|
||||||
r := &ReconcileOnePasswordItem{
|
|
||||||
kubeClient: cl,
|
|
||||||
scheme: s,
|
|
||||||
opConnectClient: opConnectClient,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mock request to simulate Reconcile() being called on an event for a
|
|
||||||
// watched resource .
|
|
||||||
req := reconcile.Request{
|
|
||||||
NamespacedName: types.NamespacedName{
|
|
||||||
Name: testData.customResource.ObjectMeta.Name,
|
|
||||||
Namespace: testData.customResource.ObjectMeta.Namespace,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err := r.Reconcile(req)
|
|
||||||
|
|
||||||
assert.Equal(t, testData.expectedError, err)
|
|
||||||
|
|
||||||
var expectedSecretName string
|
|
||||||
if testData.expectedResultSecret == nil {
|
|
||||||
expectedSecretName = testData.customResource.Name
|
|
||||||
} else {
|
|
||||||
expectedSecretName = testData.expectedResultSecret.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if Secret has been created and has the correct data
|
|
||||||
secret := &corev1.Secret{}
|
|
||||||
err = cl.Get(context.TODO(), types.NamespacedName{Name: expectedSecretName, Namespace: namespace}, secret)
|
|
||||||
|
|
||||||
if testData.expectedResultSecret == nil {
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.True(t, errors2.IsNotFound(err))
|
|
||||||
} else {
|
|
||||||
assert.Equal(t, testData.expectedResultSecret.Data, secret.Data)
|
|
||||||
assert.Equal(t, testData.expectedResultSecret.Name, secret.Name)
|
|
||||||
assert.Equal(t, testData.expectedResultSecret.Type, secret.Type)
|
|
||||||
assert.Equal(t, testData.expectedResultSecret.Annotations[op.VersionAnnotation], secret.Annotations[op.VersionAnnotation])
|
|
||||||
|
|
||||||
updatedCR := &onepasswordv1.OnePasswordItem{}
|
|
||||||
err = cl.Get(context.TODO(), req.NamespacedName, updatedCR)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateFields(username, password string) []*onepassword.ItemField {
|
|
||||||
fields := []*onepassword.ItemField{
|
|
||||||
{
|
|
||||||
Label: "username",
|
|
||||||
Value: username,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Label: "password",
|
|
||||||
Value: password,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return fields
|
|
||||||
}
|
|
@@ -35,13 +35,18 @@ var ErrCannotUpdateSecretType = errs.New("Cannot change secret type. Secret type
|
|||||||
|
|
||||||
var log = logf.Log
|
var log = logf.Log
|
||||||
|
|
||||||
func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretName, namespace string, item *onepassword.Item, autoRestart string, labels map[string]string, secretType string, ownerRef *metav1.OwnerReference) error {
|
func CreateKubernetesSecretFromItem(kubeClient kubernetesClient.Client, secretName, namespace string, item *onepassword.Item, autoRestart string, labels map[string]string, secretType string, secretAnnotations map[string]string, ownerRef *metav1.OwnerReference) error {
|
||||||
|
|
||||||
itemVersion := fmt.Sprint(item.Version)
|
itemVersion := fmt.Sprint(item.Version)
|
||||||
secretAnnotations := map[string]string{
|
|
||||||
VersionAnnotation: itemVersion,
|
// If secretAnnotations is nil we create an empty map so we can later assign values for the OP Annotations in the map
|
||||||
ItemPathAnnotation: fmt.Sprintf("vaults/%v/items/%v", item.Vault.ID, item.ID),
|
if secretAnnotations == nil {
|
||||||
|
secretAnnotations = map[string]string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
secretAnnotations[VersionAnnotation] = itemVersion
|
||||||
|
secretAnnotations[ItemPathAnnotation] = fmt.Sprintf("vaults/%v/items/%v", item.Vault.ID, item.ID)
|
||||||
|
|
||||||
if autoRestart != "" {
|
if autoRestart != "" {
|
||||||
_, err := utils.StringToBool(autoRestart)
|
_, err := utils.StringToBool(autoRestart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -33,9 +33,12 @@ func TestCreateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
|||||||
|
|
||||||
kubeClient := fake.NewFakeClient()
|
kubeClient := fake.NewFakeClient()
|
||||||
secretLabels := map[string]string{}
|
secretLabels := map[string]string{}
|
||||||
|
secretAnnotations := map[string]string{
|
||||||
|
"testAnnotation": "exists",
|
||||||
|
}
|
||||||
secretType := ""
|
secretType := ""
|
||||||
|
|
||||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, nil)
|
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %v", err)
|
t.Errorf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -47,6 +50,10 @@ func TestCreateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
|||||||
}
|
}
|
||||||
compareFields(item.Fields, createdSecret.Data, t)
|
compareFields(item.Fields, createdSecret.Data, t)
|
||||||
compareAnnotationsToItem(createdSecret.Annotations, item, t)
|
compareAnnotationsToItem(createdSecret.Annotations, item, t)
|
||||||
|
|
||||||
|
if createdSecret.Annotations["testAnnotation"] != "exists" {
|
||||||
|
t.Errorf("Expected testAnnotation to be merged with existing annotations, but wasn't.")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKubernetesSecretFromOnePasswordItemOwnerReferences(t *testing.T) {
|
func TestKubernetesSecretFromOnePasswordItemOwnerReferences(t *testing.T) {
|
||||||
@@ -61,6 +68,9 @@ func TestKubernetesSecretFromOnePasswordItemOwnerReferences(t *testing.T) {
|
|||||||
|
|
||||||
kubeClient := fake.NewFakeClient()
|
kubeClient := fake.NewFakeClient()
|
||||||
secretLabels := map[string]string{}
|
secretLabels := map[string]string{}
|
||||||
|
secretAnnotations := map[string]string{
|
||||||
|
"testAnnotation": "exists",
|
||||||
|
}
|
||||||
secretType := ""
|
secretType := ""
|
||||||
|
|
||||||
ownerRef := &metav1.OwnerReference{
|
ownerRef := &metav1.OwnerReference{
|
||||||
@@ -69,7 +79,7 @@ func TestKubernetesSecretFromOnePasswordItemOwnerReferences(t *testing.T) {
|
|||||||
Name: "test-deployment",
|
Name: "test-deployment",
|
||||||
UID: types.UID("test-uid"),
|
UID: types.UID("test-uid"),
|
||||||
}
|
}
|
||||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, ownerRef)
|
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, ownerRef)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %v", err)
|
t.Errorf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -106,9 +116,10 @@ func TestUpdateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
|||||||
|
|
||||||
kubeClient := fake.NewFakeClient()
|
kubeClient := fake.NewFakeClient()
|
||||||
secretLabels := map[string]string{}
|
secretLabels := map[string]string{}
|
||||||
|
secretAnnotations := map[string]string{}
|
||||||
secretType := ""
|
secretType := ""
|
||||||
|
|
||||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, nil)
|
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %v", err)
|
t.Errorf("Unexpected error: %v", err)
|
||||||
@@ -120,7 +131,7 @@ func TestUpdateKubernetesSecretFromOnePasswordItem(t *testing.T) {
|
|||||||
newItem.Version = 456
|
newItem.Version = 456
|
||||||
newItem.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
newItem.Vault.ID = "hfnjvi6aymbsnfc2xeeoheizda"
|
||||||
newItem.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
newItem.ID = "h46bb3jddvay7nxopfhvlwg35q"
|
||||||
err = CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &newItem, restartDeploymentAnnotation, secretLabels, secretType, nil)
|
err = CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &newItem, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %v", err)
|
t.Errorf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -221,9 +232,12 @@ func TestCreateKubernetesTLSSecretFromOnePasswordItem(t *testing.T) {
|
|||||||
|
|
||||||
kubeClient := fake.NewFakeClient()
|
kubeClient := fake.NewFakeClient()
|
||||||
secretLabels := map[string]string{}
|
secretLabels := map[string]string{}
|
||||||
|
secretAnnotations := map[string]string{
|
||||||
|
"testAnnotation": "exists",
|
||||||
|
}
|
||||||
secretType := "kubernetes.io/tls"
|
secretType := "kubernetes.io/tls"
|
||||||
|
|
||||||
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, nil)
|
err := CreateKubernetesSecretFromItem(kubeClient, secretName, namespace, &item, restartDeploymentAnnotation, secretLabels, secretType, secretAnnotations, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error: %v", err)
|
t.Errorf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
@@ -5,7 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "github.com/1Password/onepassword-operator/pkg/apis/onepassword/v1"
|
v1 "github.com/1Password/onepassword-operator/api/v1"
|
||||||
|
|
||||||
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
kubeSecrets "github.com/1Password/onepassword-operator/pkg/kubernetessecrets"
|
||||||
"github.com/1Password/onepassword-operator/pkg/utils"
|
"github.com/1Password/onepassword-operator/pkg/utils"
|
||||||
@@ -134,21 +134,15 @@ func (h *SecretUpdateHandler) updateKubernetesSecrets() (map[string]map[string]*
|
|||||||
log.Info(fmt.Sprintf("Secret '%v' has been updated in 1Password but is set to be ignored. Updates to an ignored secret will not trigger an update to a kubernetes secret or a rolling restart.", secret.GetName()))
|
log.Info(fmt.Sprintf("Secret '%v' has been updated in 1Password but is set to be ignored. Updates to an ignored secret will not trigger an update to a kubernetes secret or a rolling restart.", secret.GetName()))
|
||||||
secret.Annotations[VersionAnnotation] = itemVersion
|
secret.Annotations[VersionAnnotation] = itemVersion
|
||||||
secret.Annotations[ItemPathAnnotation] = itemPathString
|
secret.Annotations[ItemPathAnnotation] = itemPathString
|
||||||
if err := h.client.Update(context.Background(), &secret); err != nil {
|
h.client.Update(context.Background(), &secret)
|
||||||
log.Error(err, "failed to update secret %s annotations to version %d: %s", secret.Name, itemVersion, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.Info(fmt.Sprintf("Updating kubernetes secret '%v'", secret.GetName()))
|
log.Info(fmt.Sprintf("Updating kubernetes secret '%v'", secret.GetName()))
|
||||||
secret.Annotations[VersionAnnotation] = itemVersion
|
secret.Annotations[VersionAnnotation] = itemVersion
|
||||||
secret.Annotations[ItemPathAnnotation] = itemPathString
|
secret.Annotations[ItemPathAnnotation] = itemPathString
|
||||||
secret.Data = kubeSecrets.BuildKubernetesSecretData(item.Fields, item.Files)
|
updatedSecret := kubeSecrets.BuildKubernetesSecretFromOnePasswordItem(secret.Name, secret.Namespace, secret.Annotations, secret.Labels, string(secret.Type), *item, nil)
|
||||||
log.Info(fmt.Sprintf("New secret path: %v and version: %v", secret.Annotations[ItemPathAnnotation], secret.Annotations[VersionAnnotation]))
|
log.Info(fmt.Sprintf("New secret path: %v and version: %v", updatedSecret.Annotations[ItemPathAnnotation], updatedSecret.Annotations[VersionAnnotation]))
|
||||||
if err := h.client.Update(context.Background(), &secret); err != nil {
|
h.client.Update(context.Background(), updatedSecret)
|
||||||
log.Error(err, "failed to update secret %s to version %d: %s", secret.Name, itemVersion, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if updatedSecrets[secret.Namespace] == nil {
|
if updatedSecrets[secret.Namespace] == nil {
|
||||||
updatedSecrets[secret.Namespace] = make(map[string]*corev1.Secret)
|
updatedSecrets[secret.Namespace] = make(map[string]*corev1.Secret)
|
||||||
}
|
}
|
||||||
|
45
pkg/utils/k8sutil.go
Normal file
45
pkg/utils/k8sutil.go
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ForceRunModeEnv = "OSDK_FORCE_RUN_MODE"
|
||||||
|
|
||||||
|
type RunModeType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
LocalRunMode RunModeType = "local"
|
||||||
|
ClusterRunMode RunModeType = "cluster"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrNoNamespace indicates that a namespace could not be found for the current
|
||||||
|
// environment
|
||||||
|
var ErrNoNamespace = fmt.Errorf("namespace not found for current environment")
|
||||||
|
|
||||||
|
// ErrRunLocal indicates that the operator is set to run in local mode (this error
|
||||||
|
// is returned by functions that only work on operators running in cluster mode)
|
||||||
|
var ErrRunLocal = fmt.Errorf("operator run mode forced to local")
|
||||||
|
|
||||||
|
// GetOperatorNamespace returns the namespace the operator should be running in.
|
||||||
|
func GetOperatorNamespace() (string, error) {
|
||||||
|
if isRunModeLocal() {
|
||||||
|
return "", ErrRunLocal
|
||||||
|
}
|
||||||
|
nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return "", ErrNoNamespace
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
ns := strings.TrimSpace(string(nsBytes))
|
||||||
|
return ns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRunModeLocal() bool {
|
||||||
|
return os.Getenv(ForceRunModeEnv) == string(LocalRunMode)
|
||||||
|
}
|
5
tools.go
5
tools.go
@@ -1,5 +0,0 @@
|
|||||||
// +build tools
|
|
||||||
|
|
||||||
// Place any runtime dependencies as imports in this file.
|
|
||||||
// Go modules will be forced to download and install them.
|
|
||||||
package tools
|
|
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
@@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
12
vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json
generated
vendored
12
vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json
generated
vendored
@@ -1,12 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "metadata",
|
|
||||||
"name_pretty": "Google Compute Engine Metadata API",
|
|
||||||
"product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata",
|
|
||||||
"client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata",
|
|
||||||
"release_level": "ga",
|
|
||||||
"language": "go",
|
|
||||||
"repo": "googleapis/google-cloud-go",
|
|
||||||
"distribution_name": "cloud.google.com/go/compute/metadata",
|
|
||||||
"api_id": "compute:metadata",
|
|
||||||
"requires_billing": false
|
|
||||||
}
|
|
526
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
526
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@@ -1,526 +0,0 @@
|
|||||||
// Copyright 2014 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package metadata provides access to Google Compute Engine (GCE)
|
|
||||||
// metadata and API service accounts.
|
|
||||||
//
|
|
||||||
// This package is a wrapper around the GCE metadata service,
|
|
||||||
// as documented at https://developers.google.com/compute/docs/metadata.
|
|
||||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// metadataIP is the documented metadata server IP address.
|
|
||||||
metadataIP = "169.254.169.254"
|
|
||||||
|
|
||||||
// metadataHostEnv is the environment variable specifying the
|
|
||||||
// GCE metadata hostname. If empty, the default value of
|
|
||||||
// metadataIP ("169.254.169.254") is used instead.
|
|
||||||
// This is variable name is not defined by any spec, as far as
|
|
||||||
// I know; it was made up for the Go package.
|
|
||||||
metadataHostEnv = "GCE_METADATA_HOST"
|
|
||||||
|
|
||||||
userAgent = "gcloud-golang/0.1"
|
|
||||||
)
|
|
||||||
|
|
||||||
type cachedValue struct {
|
|
||||||
k string
|
|
||||||
trim bool
|
|
||||||
mu sync.Mutex
|
|
||||||
v string
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
|
||||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
|
||||||
instID = &cachedValue{k: "instance/id", trim: true}
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
defaultClient = &Client{hc: &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 2 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
ResponseHeaderTimeout: 2 * time.Second,
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
subscribeClient = &Client{hc: &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 2 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
)
|
|
||||||
|
|
||||||
// NotDefinedError is returned when requested metadata is not defined.
|
|
||||||
//
|
|
||||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
|
||||||
//
|
|
||||||
// This error is not returned if the value is defined to be the empty
|
|
||||||
// string.
|
|
||||||
type NotDefinedError string
|
|
||||||
|
|
||||||
func (suffix NotDefinedError) Error() string {
|
|
||||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cachedValue) get(cl *Client) (v string, err error) {
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
c.mu.Lock()
|
|
||||||
if c.v != "" {
|
|
||||||
return c.v, nil
|
|
||||||
}
|
|
||||||
if c.trim {
|
|
||||||
v, err = cl.getTrimmed(c.k)
|
|
||||||
} else {
|
|
||||||
v, err = cl.Get(c.k)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
c.v = v
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
onGCEOnce sync.Once
|
|
||||||
onGCE bool
|
|
||||||
)
|
|
||||||
|
|
||||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
|
||||||
func OnGCE() bool {
|
|
||||||
onGCEOnce.Do(initOnGCE)
|
|
||||||
return onGCE
|
|
||||||
}
|
|
||||||
|
|
||||||
func initOnGCE() {
|
|
||||||
onGCE = testOnGCE()
|
|
||||||
}
|
|
||||||
|
|
||||||
func testOnGCE() bool {
|
|
||||||
// The user explicitly said they're on GCE, so trust them.
|
|
||||||
if os.Getenv(metadataHostEnv) != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
resc := make(chan bool, 2)
|
|
||||||
|
|
||||||
// Try two strategies in parallel.
|
|
||||||
// See https://github.com/googleapis/google-cloud-go/issues/194
|
|
||||||
go func() {
|
|
||||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
res, err := defaultClient.hc.Do(req.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
resc <- false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
addrs, err := net.LookupHost("metadata.google.internal")
|
|
||||||
if err != nil || len(addrs) == 0 {
|
|
||||||
resc <- false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resc <- strsContains(addrs, metadataIP)
|
|
||||||
}()
|
|
||||||
|
|
||||||
tryHarder := systemInfoSuggestsGCE()
|
|
||||||
if tryHarder {
|
|
||||||
res := <-resc
|
|
||||||
if res {
|
|
||||||
// The first strategy succeeded, so let's use it.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Wait for either the DNS or metadata server probe to
|
|
||||||
// contradict the other one and say we are running on
|
|
||||||
// GCE. Give it a lot of time to do so, since the system
|
|
||||||
// info already suggests we're running on a GCE BIOS.
|
|
||||||
timer := time.NewTimer(5 * time.Second)
|
|
||||||
defer timer.Stop()
|
|
||||||
select {
|
|
||||||
case res = <-resc:
|
|
||||||
return res
|
|
||||||
case <-timer.C:
|
|
||||||
// Too slow. Who knows what this system is.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// There's no hint from the system info that we're running on
|
|
||||||
// GCE, so use the first probe's result as truth, whether it's
|
|
||||||
// true or false. The goal here is to optimize for speed for
|
|
||||||
// users who are NOT running on GCE. We can't assume that
|
|
||||||
// either a DNS lookup or an HTTP request to a blackholed IP
|
|
||||||
// address is fast. Worst case this should return when the
|
|
||||||
// metaClient's Transport.ResponseHeaderTimeout or
|
|
||||||
// Transport.Dial.Timeout fires (in two seconds).
|
|
||||||
return <-resc
|
|
||||||
}
|
|
||||||
|
|
||||||
// systemInfoSuggestsGCE reports whether the local system (without
|
|
||||||
// doing network requests) suggests that we're running on GCE. If this
|
|
||||||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
|
||||||
// server.
|
|
||||||
func systemInfoSuggestsGCE() bool {
|
|
||||||
if runtime.GOOS != "linux" {
|
|
||||||
// We don't have any non-Linux clues available, at least yet.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
|
||||||
name := strings.TrimSpace(string(slurp))
|
|
||||||
return name == "Google" || name == "Google Compute Engine"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
|
||||||
// ResponseHeaderTimeout).
|
|
||||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
|
||||||
return subscribeClient.Subscribe(suffix, fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get calls Client.Get on the default client.
|
|
||||||
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
|
|
||||||
|
|
||||||
// ProjectID returns the current instance's project ID string.
|
|
||||||
func ProjectID() (string, error) { return defaultClient.ProjectID() }
|
|
||||||
|
|
||||||
// NumericProjectID returns the current instance's numeric project ID.
|
|
||||||
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
|
|
||||||
|
|
||||||
// InternalIP returns the instance's primary internal IP address.
|
|
||||||
func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
|
||||||
|
|
||||||
// ExternalIP returns the instance's primary external (public) IP address.
|
|
||||||
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
|
||||||
|
|
||||||
// Email calls Client.Email on the default client.
|
|
||||||
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
|
|
||||||
|
|
||||||
// Hostname returns the instance's hostname. This will be of the form
|
|
||||||
// "<instanceID>.c.<projID>.internal".
|
|
||||||
func Hostname() (string, error) { return defaultClient.Hostname() }
|
|
||||||
|
|
||||||
// InstanceTags returns the list of user-defined instance tags,
|
|
||||||
// assigned when initially creating a GCE instance.
|
|
||||||
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
|
|
||||||
|
|
||||||
// InstanceID returns the current VM's numeric instance ID.
|
|
||||||
func InstanceID() (string, error) { return defaultClient.InstanceID() }
|
|
||||||
|
|
||||||
// InstanceName returns the current VM's instance ID string.
|
|
||||||
func InstanceName() (string, error) { return defaultClient.InstanceName() }
|
|
||||||
|
|
||||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|
||||||
func Zone() (string, error) { return defaultClient.Zone() }
|
|
||||||
|
|
||||||
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
|
||||||
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
|
|
||||||
|
|
||||||
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
|
||||||
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
|
|
||||||
|
|
||||||
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
|
||||||
func InstanceAttributeValue(attr string) (string, error) {
|
|
||||||
return defaultClient.InstanceAttributeValue(attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
|
||||||
func ProjectAttributeValue(attr string) (string, error) {
|
|
||||||
return defaultClient.ProjectAttributeValue(attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scopes calls Client.Scopes on the default client.
|
|
||||||
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
|
|
||||||
|
|
||||||
func strsContains(ss []string, s string) bool {
|
|
||||||
for _, v := range ss {
|
|
||||||
if v == s {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Client provides metadata.
|
|
||||||
type Client struct {
|
|
||||||
hc *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
|
||||||
// will use the given http.Client instead of the default client.
|
|
||||||
func NewClient(c *http.Client) *Client {
|
|
||||||
return &Client{hc: c}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getETag returns a value from the metadata service as well as the associated ETag.
|
|
||||||
// This func is otherwise equivalent to Get.
|
|
||||||
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
|
||||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
|
||||||
// a container, which is an important use-case for local testing of cloud
|
|
||||||
// deployments. To enable spoofing of the metadata service, the environment
|
|
||||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
|
||||||
// requests shall go.
|
|
||||||
host := os.Getenv(metadataHostEnv)
|
|
||||||
if host == "" {
|
|
||||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
|
||||||
// binaries built with the "netgo" tag and without cgo won't
|
|
||||||
// know the search suffix for "metadata" is
|
|
||||||
// ".google.internal", and this IP address is documented as
|
|
||||||
// being stable anyway.
|
|
||||||
host = metadataIP
|
|
||||||
}
|
|
||||||
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
|
||||||
req, _ := http.NewRequest("GET", u, nil)
|
|
||||||
req.Header.Set("Metadata-Flavor", "Google")
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
res, err := c.hc.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode == http.StatusNotFound {
|
|
||||||
return "", "", NotDefinedError(suffix)
|
|
||||||
}
|
|
||||||
all, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
|
|
||||||
}
|
|
||||||
return string(all), res.Header.Get("Etag"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a value from the metadata service.
|
|
||||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|
||||||
//
|
|
||||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
|
||||||
// 169.254.169.254 will be used instead.
|
|
||||||
//
|
|
||||||
// If the requested metadata is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
func (c *Client) Get(suffix string) (string, error) {
|
|
||||||
val, _, err := c.getETag(suffix)
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) getTrimmed(suffix string) (s string, err error) {
|
|
||||||
s, err = c.Get(suffix)
|
|
||||||
s = strings.TrimSpace(s)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) lines(suffix string) ([]string, error) {
|
|
||||||
j, err := c.Get(suffix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
|
||||||
for i := range s {
|
|
||||||
s[i] = strings.TrimSpace(s[i])
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectID returns the current instance's project ID string.
|
|
||||||
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
|
|
||||||
|
|
||||||
// NumericProjectID returns the current instance's numeric project ID.
|
|
||||||
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
|
|
||||||
|
|
||||||
// InstanceID returns the current VM's numeric instance ID.
|
|
||||||
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
|
|
||||||
|
|
||||||
// InternalIP returns the instance's primary internal IP address.
|
|
||||||
func (c *Client) InternalIP() (string, error) {
|
|
||||||
return c.getTrimmed("instance/network-interfaces/0/ip")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Email returns the email address associated with the service account.
|
|
||||||
// The account may be empty or the string "default" to use the instance's
|
|
||||||
// main account.
|
|
||||||
func (c *Client) Email(serviceAccount string) (string, error) {
|
|
||||||
if serviceAccount == "" {
|
|
||||||
serviceAccount = "default"
|
|
||||||
}
|
|
||||||
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExternalIP returns the instance's primary external (public) IP address.
|
|
||||||
func (c *Client) ExternalIP() (string, error) {
|
|
||||||
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname returns the instance's hostname. This will be of the form
|
|
||||||
// "<instanceID>.c.<projID>.internal".
|
|
||||||
func (c *Client) Hostname() (string, error) {
|
|
||||||
return c.getTrimmed("instance/hostname")
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceTags returns the list of user-defined instance tags,
|
|
||||||
// assigned when initially creating a GCE instance.
|
|
||||||
func (c *Client) InstanceTags() ([]string, error) {
|
|
||||||
var s []string
|
|
||||||
j, err := c.Get("instance/tags")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceName returns the current VM's instance ID string.
|
|
||||||
func (c *Client) InstanceName() (string, error) {
|
|
||||||
host, err := c.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return strings.Split(host, ".")[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|
||||||
func (c *Client) Zone() (string, error) {
|
|
||||||
zone, err := c.getTrimmed("instance/zone")
|
|
||||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceAttributes returns the list of user-defined attributes,
|
|
||||||
// assigned when initially creating a GCE VM instance. The value of an
|
|
||||||
// attribute can be obtained with InstanceAttributeValue.
|
|
||||||
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
|
|
||||||
|
|
||||||
// ProjectAttributes returns the list of user-defined attributes
|
|
||||||
// applying to the project as a whole, not just this VM. The value of
|
|
||||||
// an attribute can be obtained with ProjectAttributeValue.
|
|
||||||
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
|
|
||||||
|
|
||||||
// InstanceAttributeValue returns the value of the provided VM
|
|
||||||
// instance attribute.
|
|
||||||
//
|
|
||||||
// If the requested attribute is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
//
|
|
||||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
|
||||||
// defined to be the empty string.
|
|
||||||
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
|
|
||||||
return c.Get("instance/attributes/" + attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectAttributeValue returns the value of the provided
|
|
||||||
// project attribute.
|
|
||||||
//
|
|
||||||
// If the requested attribute is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
//
|
|
||||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
|
||||||
// defined to be the empty string.
|
|
||||||
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
|
|
||||||
return c.Get("project/attributes/" + attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scopes returns the service account scopes for the given account.
|
|
||||||
// The account may be empty or the string "default" to use the instance's
|
|
||||||
// main account.
|
|
||||||
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
|
|
||||||
if serviceAccount == "" {
|
|
||||||
serviceAccount = "default"
|
|
||||||
}
|
|
||||||
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe subscribes to a value from the metadata service.
|
|
||||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|
||||||
// The suffix may contain query parameters.
|
|
||||||
//
|
|
||||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
|
||||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
|
||||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
|
||||||
// is deleted. Subscribe returns the error value returned from the last call to
|
|
||||||
// fn, which may be nil when ok == false.
|
|
||||||
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
|
||||||
const failedSubscribeSleep = time.Second * 5
|
|
||||||
|
|
||||||
// First check to see if the metadata value exists at all.
|
|
||||||
val, lastETag, err := c.getETag(suffix)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fn(val, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ok := true
|
|
||||||
if strings.ContainsRune(suffix, '?') {
|
|
||||||
suffix += "&wait_for_change=true&last_etag="
|
|
||||||
} else {
|
|
||||||
suffix += "?wait_for_change=true&last_etag="
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
|
|
||||||
if err != nil {
|
|
||||||
if _, deleted := err.(NotDefinedError); !deleted {
|
|
||||||
time.Sleep(failedSubscribeSleep)
|
|
||||||
continue // Retry on other errors.
|
|
||||||
}
|
|
||||||
ok = false
|
|
||||||
}
|
|
||||||
lastETag = etag
|
|
||||||
|
|
||||||
if err := fn(val, ok); err != nil || !ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error contains an error response from the server.
|
|
||||||
type Error struct {
|
|
||||||
// Code is the HTTP response status code.
|
|
||||||
Code int
|
|
||||||
// Message is the server response message.
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Error) Error() string {
|
|
||||||
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
|
|
||||||
}
|
|
21
vendor/github.com/1Password/connect-sdk-go/LICENSE
generated
vendored
21
vendor/github.com/1Password/connect-sdk-go/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
|||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2021 1Password
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
453
vendor/github.com/1Password/connect-sdk-go/connect/client.go
generated
vendored
453
vendor/github.com/1Password/connect-sdk-go/connect/client.go
generated
vendored
@@ -1,453 +0,0 @@
|
|||||||
package connect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
opentracing "github.com/opentracing/opentracing-go"
|
|
||||||
"github.com/opentracing/opentracing-go/ext"
|
|
||||||
jaegerClientConfig "github.com/uber/jaeger-client-go/config"
|
|
||||||
"github.com/uber/jaeger-client-go/zipkin"
|
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/onepassword"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultUserAgent = "connect-sdk-go/%s"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Client Represents an available 1Password Connect API to connect to
|
|
||||||
type Client interface {
|
|
||||||
GetVaults() ([]onepassword.Vault, error)
|
|
||||||
GetVault(uuid string) (*onepassword.Vault, error)
|
|
||||||
GetVaultsByTitle(uuid string) ([]onepassword.Vault, error)
|
|
||||||
GetItem(uuid string, vaultUUID string) (*onepassword.Item, error)
|
|
||||||
GetItems(vaultUUID string) ([]onepassword.Item, error)
|
|
||||||
GetItemsByTitle(title string, vaultUUID string) ([]onepassword.Item, error)
|
|
||||||
GetItemByTitle(title string, vaultUUID string) (*onepassword.Item, error)
|
|
||||||
CreateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error)
|
|
||||||
UpdateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error)
|
|
||||||
DeleteItem(item *onepassword.Item, vaultUUID string) error
|
|
||||||
GetFile(fileUUID string, itemUUID string, vaultUUID string) (*onepassword.File, error)
|
|
||||||
GetFileContent(file *onepassword.File) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpClient interface {
|
|
||||||
Do(req *http.Request) (*http.Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
envHostVariable = "OP_CONNECT_HOST"
|
|
||||||
envTokenVariable = "OP_CONNECT_TOKEN"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewClientFromEnvironment Returns a Secret Service client assuming that your
|
|
||||||
// jwt is set in the OP_TOKEN environment variable
|
|
||||||
func NewClientFromEnvironment() (Client, error) {
|
|
||||||
host, found := os.LookupEnv(envHostVariable)
|
|
||||||
if !found {
|
|
||||||
return nil, fmt.Errorf("There is no hostname available in the %q variable", envHostVariable)
|
|
||||||
}
|
|
||||||
|
|
||||||
token, found := os.LookupEnv(envTokenVariable)
|
|
||||||
if !found {
|
|
||||||
return nil, fmt.Errorf("There is no token available in the %q variable", envTokenVariable)
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewClient(host, token), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient Returns a Secret Service client for a given url and jwt
|
|
||||||
func NewClient(url string, token string) Client {
|
|
||||||
return NewClientWithUserAgent(url, token, fmt.Sprintf(defaultUserAgent, SDKVersion))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientWithUserAgent Returns a Secret Service client for a given url and jwt and identifies with userAgent
|
|
||||||
func NewClientWithUserAgent(url string, token string, userAgent string) Client {
|
|
||||||
if !opentracing.IsGlobalTracerRegistered() {
|
|
||||||
cfg := jaegerClientConfig.Configuration{}
|
|
||||||
zipkinPropagator := zipkin.NewZipkinB3HTTPHeaderPropagator()
|
|
||||||
cfg.InitGlobalTracer(
|
|
||||||
userAgent,
|
|
||||||
jaegerClientConfig.Injector(opentracing.HTTPHeaders, zipkinPropagator),
|
|
||||||
jaegerClientConfig.Extractor(opentracing.HTTPHeaders, zipkinPropagator),
|
|
||||||
jaegerClientConfig.ZipkinSharedRPCSpan(true),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &restClient{
|
|
||||||
URL: url,
|
|
||||||
Token: token,
|
|
||||||
|
|
||||||
userAgent: userAgent,
|
|
||||||
tracer: opentracing.GlobalTracer(),
|
|
||||||
|
|
||||||
client: http.DefaultClient,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type restClient struct {
|
|
||||||
URL string
|
|
||||||
Token string
|
|
||||||
userAgent string
|
|
||||||
tracer opentracing.Tracer
|
|
||||||
client httpClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVaults Get a list of all available vaults
|
|
||||||
func (rs *restClient) GetVaults() ([]onepassword.Vault, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetVaults")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
vaultURL := fmt.Sprintf("/v1/vaults")
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, vaultURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var vaults []onepassword.Vault
|
|
||||||
if err := parseResponse(response, http.StatusOK, &vaults); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return vaults, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVaults Get a list of all available vaults
|
|
||||||
func (rs *restClient) GetVault(uuid string) (*onepassword.Vault, error) {
|
|
||||||
if uuid == "" {
|
|
||||||
return nil, errors.New("no uuid provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
span := rs.tracer.StartSpan("GetVault")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
vaultURL := fmt.Sprintf("/v1/vaults/%s", uuid)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, vaultURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var vault onepassword.Vault
|
|
||||||
if err := parseResponse(response, http.StatusOK, &vault); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &vault, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) GetVaultsByTitle(title string) ([]onepassword.Vault, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetVaultsByTitle")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
filter := url.QueryEscape(fmt.Sprintf("title eq \"%s\"", title))
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults?filter=%s", filter)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var vaults []onepassword.Vault
|
|
||||||
if err := parseResponse(response, http.StatusOK, &vaults); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return vaults, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetItem Get a specific Item from the 1Password Connect API
|
|
||||||
func (rs *restClient) GetItem(uuid string, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetItem")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s", vaultUUID, uuid)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var item onepassword.Item
|
|
||||||
if err := parseResponse(response, http.StatusOK, &item); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &item, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) GetItemByTitle(title string, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetItemByTitle")
|
|
||||||
defer span.Finish()
|
|
||||||
items, err := rs.GetItemsByTitle(title, vaultUUID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(items) != 1 {
|
|
||||||
return nil, fmt.Errorf("Found %d item(s) in vault %q with title %q", len(items), vaultUUID, title)
|
|
||||||
}
|
|
||||||
|
|
||||||
return rs.GetItem(items[0].ID, items[0].Vault.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) GetItemsByTitle(title string, vaultUUID string) ([]onepassword.Item, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetItemsByTitle")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
filter := url.QueryEscape(fmt.Sprintf("title eq \"%s\"", title))
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items?filter=%s", vaultUUID, filter)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var items []onepassword.Item
|
|
||||||
if err := parseResponse(response, http.StatusOK, &items); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return items, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) GetItems(vaultUUID string) ([]onepassword.Item, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetItems")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items", vaultUUID)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var items []onepassword.Item
|
|
||||||
if err := parseResponse(response, http.StatusOK, &items); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return items, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateItem Create a new item in a specified vault
|
|
||||||
func (rs *restClient) CreateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
span := rs.tracer.StartSpan("CreateItem")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items", vaultUUID)
|
|
||||||
itemBody, err := json.Marshal(item)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
request, err := rs.buildRequest(http.MethodPost, itemURL, bytes.NewBuffer(itemBody), span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var newItem onepassword.Item
|
|
||||||
if err := parseResponse(response, http.StatusOK, &newItem); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &newItem, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateItem Update a new item in a specified vault
|
|
||||||
func (rs *restClient) UpdateItem(item *onepassword.Item, vaultUUID string) (*onepassword.Item, error) {
|
|
||||||
span := rs.tracer.StartSpan("UpdateItem")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s", item.Vault.ID, item.ID)
|
|
||||||
itemBody, err := json.Marshal(item)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
request, err := rs.buildRequest(http.MethodPut, itemURL, bytes.NewBuffer(itemBody), span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var newItem onepassword.Item
|
|
||||||
if err := parseResponse(response, http.StatusOK, &newItem); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &newItem, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteItem Delete a new item in a specified vault
|
|
||||||
func (rs *restClient) DeleteItem(item *onepassword.Item, vaultUUID string) error {
|
|
||||||
span := rs.tracer.StartSpan("DeleteItem")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s", item.Vault.ID, item.ID)
|
|
||||||
request, err := rs.buildRequest(http.MethodDelete, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := parseResponse(response, http.StatusNoContent, nil); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFile Get a specific File in a specified item.
|
|
||||||
// This does not include the file contents. Call GetFileContent() to load the file's content.
|
|
||||||
func (rs *restClient) GetFile(uuid string, itemUUID string, vaultUUID string) (*onepassword.File, error) {
|
|
||||||
span := rs.tracer.StartSpan("GetFile")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
itemURL := fmt.Sprintf("/v1/vaults/%s/items/%s/files/%s", vaultUUID, itemUUID, uuid)
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, itemURL, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := expectMinimumConnectVersion(response, version{1, 3, 0}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var file onepassword.File
|
|
||||||
if err := parseResponse(response, http.StatusOK, &file); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &file, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFileContent retrieves the file's content.
|
|
||||||
// If the file's content have previously been fetched, those contents are returned without making another request.
|
|
||||||
func (rs *restClient) GetFileContent(file *onepassword.File) ([]byte, error) {
|
|
||||||
if content, err := file.Content(); err == nil {
|
|
||||||
return content, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
span := rs.tracer.StartSpan("GetFileContent")
|
|
||||||
defer span.Finish()
|
|
||||||
|
|
||||||
request, err := rs.buildRequest(http.MethodGet, file.ContentPath, http.NoBody, span)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := rs.client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := expectMinimumConnectVersion(response, version{1, 3, 0}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := readResponseBody(response, http.StatusOK)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
file.SetContent(content)
|
|
||||||
return content, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rs *restClient) buildRequest(method string, path string, body io.Reader, span opentracing.Span) (*http.Request, error) {
|
|
||||||
url := fmt.Sprintf("%s%s", rs.URL, path)
|
|
||||||
|
|
||||||
request, err := http.NewRequest(method, url, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
request.Header.Set("Content-Type", "application/json")
|
|
||||||
request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rs.Token))
|
|
||||||
request.Header.Set("User-Agent", rs.userAgent)
|
|
||||||
|
|
||||||
ext.SpanKindRPCClient.Set(span)
|
|
||||||
ext.HTTPUrl.Set(span, path)
|
|
||||||
ext.HTTPMethod.Set(span, method)
|
|
||||||
|
|
||||||
rs.tracer.Inject(span.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(request.Header))
|
|
||||||
|
|
||||||
return request, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseResponse(resp *http.Response, expectedStatusCode int, result interface{}) error {
|
|
||||||
body, err := readResponseBody(resp, expectedStatusCode)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if result != nil {
|
|
||||||
if err := json.Unmarshal(body, result); err != nil {
|
|
||||||
return fmt.Errorf("decoding response: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readResponseBody(resp *http.Response, expectedStatusCode int) ([]byte, error) {
|
|
||||||
defer resp.Body.Close()
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if resp.StatusCode != expectedStatusCode {
|
|
||||||
var errResp *onepassword.Error
|
|
||||||
if err := json.Unmarshal(body, &errResp); err != nil {
|
|
||||||
return nil, fmt.Errorf("decoding error response: %s", err)
|
|
||||||
}
|
|
||||||
return nil, errResp
|
|
||||||
}
|
|
||||||
return body, nil
|
|
||||||
}
|
|
172
vendor/github.com/1Password/connect-sdk-go/connect/config.go
generated
vendored
172
vendor/github.com/1Password/connect-sdk-go/connect/config.go
generated
vendored
@@ -1,172 +0,0 @@
|
|||||||
package connect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/1Password/connect-sdk-go/onepassword"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
vaultTag = "opvault"
|
|
||||||
itemTag = "opitem"
|
|
||||||
fieldTag = "opfield"
|
|
||||||
|
|
||||||
envVaultVar = "OP_VAULT"
|
|
||||||
)
|
|
||||||
|
|
||||||
type parsedItem struct {
|
|
||||||
vaultUUID string
|
|
||||||
itemTitle string
|
|
||||||
fields []*reflect.StructField
|
|
||||||
values []*reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load Load configuration values based on strcut tag
|
|
||||||
func Load(client Client, i interface{}) error {
|
|
||||||
configP := reflect.ValueOf(i)
|
|
||||||
if configP.Kind() != reflect.Ptr {
|
|
||||||
return fmt.Errorf("You must pass a pointer to Config struct")
|
|
||||||
}
|
|
||||||
|
|
||||||
config := configP.Elem()
|
|
||||||
if config.Kind() != reflect.Struct {
|
|
||||||
return fmt.Errorf("Config values can only be loaded into a struct")
|
|
||||||
}
|
|
||||||
|
|
||||||
t := config.Type()
|
|
||||||
|
|
||||||
// Multiple fields may be from a single item so we will collect them
|
|
||||||
items := map[string]parsedItem{}
|
|
||||||
|
|
||||||
// Fetch the Vault from the environment
|
|
||||||
vaultUUID, envVarFound := os.LookupEnv(envVaultVar)
|
|
||||||
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
value := config.Field(i)
|
|
||||||
field := t.Field(i)
|
|
||||||
tag := field.Tag.Get(itemTag)
|
|
||||||
|
|
||||||
if tag == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !value.CanSet() {
|
|
||||||
return fmt.Errorf("Cannot load config into private fields")
|
|
||||||
}
|
|
||||||
|
|
||||||
itemVault, err := vaultUUIDForField(&field, vaultUUID, envVarFound)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
key := fmt.Sprintf("%s/%s", itemVault, tag)
|
|
||||||
parsed := items[key]
|
|
||||||
parsed.vaultUUID = itemVault
|
|
||||||
parsed.itemTitle = tag
|
|
||||||
parsed.fields = append(parsed.fields, &field)
|
|
||||||
parsed.values = append(parsed.values, &value)
|
|
||||||
items[key] = parsed
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range items {
|
|
||||||
if err := setValuesForTag(client, &item); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func vaultUUIDForField(field *reflect.StructField, vaultUUID string, envVaultFound bool) (string, error) {
|
|
||||||
// Check to see if a specific vault has been specified on the field
|
|
||||||
// If the env vault id has not been found and item doesn't have a vault
|
|
||||||
// return an error
|
|
||||||
if vaultUUIDTag := field.Tag.Get(vaultTag); vaultUUIDTag == "" {
|
|
||||||
if !envVaultFound {
|
|
||||||
return "", fmt.Errorf("There is no vault for %q field", field.Name)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return vaultUUIDTag, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return vaultUUID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setValuesForTag(client Client, parsedItem *parsedItem) error {
|
|
||||||
item, err := client.GetItemByTitle(parsedItem.itemTitle, parsedItem.vaultUUID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, field := range parsedItem.fields {
|
|
||||||
value := parsedItem.values[i]
|
|
||||||
path := field.Tag.Get(fieldTag)
|
|
||||||
if path == "" {
|
|
||||||
if field.Type == reflect.TypeOf(onepassword.Item{}) {
|
|
||||||
value.Set(reflect.ValueOf(*item))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("There is no %q specified for %q", fieldTag, field.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
pathParts := strings.Split(path, ".")
|
|
||||||
|
|
||||||
if len(pathParts) != 2 {
|
|
||||||
return fmt.Errorf("Invalid field path format for %q", field.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
sectionID := sectionIDForName(pathParts[0], item.Sections)
|
|
||||||
label := pathParts[1]
|
|
||||||
|
|
||||||
for _, f := range item.Fields {
|
|
||||||
fieldSectionID := ""
|
|
||||||
if f.Section != nil {
|
|
||||||
fieldSectionID = f.Section.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
if fieldSectionID == sectionID && f.Label == label {
|
|
||||||
if err := setValue(value, f.Value); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setValue(value *reflect.Value, toSet string) error {
|
|
||||||
switch value.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
value.SetString(toSet)
|
|
||||||
case reflect.Int:
|
|
||||||
v, err := strconv.Atoi(toSet)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
value.SetInt(int64(v))
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("Unsupported type %q. Only string, int64, and onepassword.Item are supported", value.Kind())
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func sectionIDForName(name string, sections []*onepassword.ItemSection) string {
|
|
||||||
if sections == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range sections {
|
|
||||||
if name == strings.ToLower(s.Label) {
|
|
||||||
return s.ID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
104
vendor/github.com/1Password/connect-sdk-go/connect/version.go
generated
vendored
104
vendor/github.com/1Password/connect-sdk-go/connect/version.go
generated
vendored
@@ -1,104 +0,0 @@
|
|||||||
package connect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SDKVersion is the latest Semantic Version of the library
|
|
||||||
// Do not rename this variable without changing the regex in the Makefile
|
|
||||||
const SDKVersion = "1.2.0"
|
|
||||||
|
|
||||||
const VersionHeaderKey = "1Password-Connect-Version"
|
|
||||||
|
|
||||||
// expectMinimumConnectVersion returns an error if the provided minimum version for Connect is lower than the version
|
|
||||||
// reported in the response from Connect.
|
|
||||||
func expectMinimumConnectVersion(resp *http.Response, minimumVersion version) error {
|
|
||||||
serverVersion, err := getServerVersion(resp)
|
|
||||||
if err != nil {
|
|
||||||
// Return gracefully if server version cannot be determined reliably
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !serverVersion.IsGreaterOrEqualThan(minimumVersion) {
|
|
||||||
return fmt.Errorf("need at least version %s of Connect for this function, detected version %s. Please update your Connect server", minimumVersion, serverVersion)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getServerVersion(resp *http.Response) (serverVersion, error) {
|
|
||||||
versionHeader := resp.Header.Get(VersionHeaderKey)
|
|
||||||
if versionHeader == "" {
|
|
||||||
// The last version without the version header was v1.2.0
|
|
||||||
return serverVersion{
|
|
||||||
version: version{1, 2, 0},
|
|
||||||
orEarlier: true,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return parseServerVersion(versionHeader)
|
|
||||||
}
|
|
||||||
|
|
||||||
type version struct {
|
|
||||||
major int
|
|
||||||
minor int
|
|
||||||
patch int
|
|
||||||
}
|
|
||||||
|
|
||||||
// serverVersion describes the version reported by the server.
|
|
||||||
type serverVersion struct {
|
|
||||||
version
|
|
||||||
// orEarlier is true if the version is derived from the lack of a version header from the server.
|
|
||||||
orEarlier bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v version) String() string {
|
|
||||||
return fmt.Sprintf("%d.%d.%d", v.major, v.minor, v.patch)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v serverVersion) String() string {
|
|
||||||
if v.orEarlier {
|
|
||||||
return v.version.String() + " (or earlier)"
|
|
||||||
}
|
|
||||||
return v.version.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsGreaterOrEqualThan returns true if the lefthand-side version is equal to or or a higher version than the provided
|
|
||||||
// minimum according to the semantic versioning rules.
|
|
||||||
func (v version) IsGreaterOrEqualThan(min version) bool {
|
|
||||||
if v.major != min.major {
|
|
||||||
// Different major version
|
|
||||||
return v.major > min.major
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.minor != min.minor {
|
|
||||||
// Same major, but different minor version
|
|
||||||
return v.minor > min.minor
|
|
||||||
}
|
|
||||||
|
|
||||||
// Same major and minor version
|
|
||||||
return v.patch >= min.patch
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseServerVersion(v string) (serverVersion, error) {
|
|
||||||
spl := strings.Split(v, ".")
|
|
||||||
if len(spl) != 3 {
|
|
||||||
return serverVersion{}, errors.New("wrong length")
|
|
||||||
}
|
|
||||||
var res [3]int
|
|
||||||
for i := range res {
|
|
||||||
tmp, err := strconv.Atoi(spl[i])
|
|
||||||
if err != nil {
|
|
||||||
return serverVersion{}, err
|
|
||||||
}
|
|
||||||
res[i] = tmp
|
|
||||||
}
|
|
||||||
return serverVersion{
|
|
||||||
version: version{
|
|
||||||
major: res[0],
|
|
||||||
minor: res[1],
|
|
||||||
patch: res[2],
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
21
vendor/github.com/1Password/connect-sdk-go/onepassword/errors.go
generated
vendored
21
vendor/github.com/1Password/connect-sdk-go/onepassword/errors.go
generated
vendored
@@ -1,21 +0,0 @@
|
|||||||
package onepassword
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// Error is an error returned by the Connect API.
|
|
||||||
type Error struct {
|
|
||||||
StatusCode int `json:"status"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Error) Error() string {
|
|
||||||
return fmt.Sprintf("status %d: %s", e.StatusCode, e.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Error) Is(target error) bool {
|
|
||||||
t, ok := target.(*Error)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return t.Message == e.Message && t.StatusCode == e.StatusCode
|
|
||||||
}
|
|
49
vendor/github.com/1Password/connect-sdk-go/onepassword/files.go
generated
vendored
49
vendor/github.com/1Password/connect-sdk-go/onepassword/files.go
generated
vendored
@@ -1,49 +0,0 @@
|
|||||||
package onepassword
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type File struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Section *ItemSection `json:"section,omitempty"`
|
|
||||||
Size int `json:"size"`
|
|
||||||
ContentPath string `json:"content_path"`
|
|
||||||
content []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *File) UnmarshalJSON(data []byte) error {
|
|
||||||
var jsonFile struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Section *ItemSection `json:"section,omitempty"`
|
|
||||||
Size int `json:"size"`
|
|
||||||
ContentPath string `json:"content_path"`
|
|
||||||
Content []byte `json:"content,omitempty"`
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &jsonFile); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f.ID = jsonFile.ID
|
|
||||||
f.Name = jsonFile.Name
|
|
||||||
f.Section = jsonFile.Section
|
|
||||||
f.Size = jsonFile.Size
|
|
||||||
f.ContentPath = jsonFile.ContentPath
|
|
||||||
f.content = jsonFile.Content
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Content returns the content of the file if they have been loaded and returns an error if they have not been loaded.
|
|
||||||
// Use `client.GetFileContent(file *File)` instead to make sure the content is fetched automatically if not present.
|
|
||||||
func (f *File) Content() ([]byte, error) {
|
|
||||||
if f.content == nil {
|
|
||||||
return nil, errors.New("file content not loaded")
|
|
||||||
}
|
|
||||||
return f.content, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *File) SetContent(content []byte) {
|
|
||||||
f.content = content
|
|
||||||
}
|
|
160
vendor/github.com/1Password/connect-sdk-go/onepassword/items.go
generated
vendored
160
vendor/github.com/1Password/connect-sdk-go/onepassword/items.go
generated
vendored
@@ -1,160 +0,0 @@
|
|||||||
package onepassword
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ItemCategory Represents the template of the Item
|
|
||||||
type ItemCategory string
|
|
||||||
|
|
||||||
const (
|
|
||||||
Login ItemCategory = "LOGIN"
|
|
||||||
Password ItemCategory = "PASSWORD"
|
|
||||||
Server ItemCategory = "SERVER"
|
|
||||||
Database ItemCategory = "DATABASE"
|
|
||||||
CreditCard ItemCategory = "CREDIT_CARD"
|
|
||||||
Membership ItemCategory = "MEMBERSHIP"
|
|
||||||
Passport ItemCategory = "PASSPORT"
|
|
||||||
SoftwareLicense ItemCategory = "SOFTWARE_LICENSE"
|
|
||||||
OutdoorLicense ItemCategory = "OUTDOOR_LICENSE"
|
|
||||||
SecureNote ItemCategory = "SECURE_NOTE"
|
|
||||||
WirelessRouter ItemCategory = "WIRELESS_ROUTER"
|
|
||||||
BankAccount ItemCategory = "BANK_ACCOUNT"
|
|
||||||
DriverLicense ItemCategory = "DRIVER_LICENSE"
|
|
||||||
Identity ItemCategory = "IDENTITY"
|
|
||||||
RewardProgram ItemCategory = "REWARD_PROGRAM"
|
|
||||||
Document ItemCategory = "DOCUMENT"
|
|
||||||
EmailAccount ItemCategory = "EMAIL_ACCOUNT"
|
|
||||||
SocialSecurityNumber ItemCategory = "SOCIAL_SECURITY_NUMBER"
|
|
||||||
ApiCredential ItemCategory = "API_CREDENTIAL"
|
|
||||||
Custom ItemCategory = "CUSTOM"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UnmarshalJSON Unmarshall Item Category enum strings to Go string enums
|
|
||||||
func (ic *ItemCategory) UnmarshalJSON(b []byte) error {
|
|
||||||
var s string
|
|
||||||
json.Unmarshal(b, &s)
|
|
||||||
category := ItemCategory(s)
|
|
||||||
switch category {
|
|
||||||
case Login, Password, Server, Database, CreditCard, Membership, Passport, SoftwareLicense,
|
|
||||||
OutdoorLicense, SecureNote, WirelessRouter, BankAccount, DriverLicense, Identity, RewardProgram,
|
|
||||||
Document, EmailAccount, SocialSecurityNumber, ApiCredential:
|
|
||||||
*ic = category
|
|
||||||
default:
|
|
||||||
*ic = Custom
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Item represents an item returned to the consumer
|
|
||||||
type Item struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Title string `json:"title"`
|
|
||||||
|
|
||||||
URLs []ItemURL `json:"urls,omitempty"`
|
|
||||||
Favorite bool `json:"favorite,omitempty"`
|
|
||||||
Tags []string `json:"tags,omitempty"`
|
|
||||||
Version int `json:"version,omitempty"`
|
|
||||||
Trashed bool `json:"trashed,omitempty"`
|
|
||||||
|
|
||||||
Vault ItemVault `json:"vault"`
|
|
||||||
Category ItemCategory `json:"category,omitempty"` // TODO: switch this to `category`
|
|
||||||
|
|
||||||
Sections []*ItemSection `json:"sections,omitempty"`
|
|
||||||
Fields []*ItemField `json:"fields,omitempty"`
|
|
||||||
Files []*File `json:"files,omitempty"`
|
|
||||||
|
|
||||||
LastEditedBy string `json:"lastEditedBy,omitempty"`
|
|
||||||
CreatedAt time.Time `json:"createdAt,omitempty"`
|
|
||||||
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemVault represents the Vault the Item is found in
|
|
||||||
type ItemVault struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemURL is a simplified item URL
|
|
||||||
type ItemURL struct {
|
|
||||||
Primary bool `json:"primary,omitempty"`
|
|
||||||
URL string `json:"href"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemSection Representation of a Section on an item
|
|
||||||
type ItemSection struct {
|
|
||||||
ID string `json:"id,omitempty"`
|
|
||||||
Label string `json:"label,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GeneratorRecipe Representation of a "recipe" used to generate a field
|
|
||||||
type GeneratorRecipe struct {
|
|
||||||
Length int `json:"length,omitempty"`
|
|
||||||
CharacterSets []string `json:"characterSets,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemField Representation of a single field on an Item
|
|
||||||
type ItemField struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Section *ItemSection `json:"section,omitempty"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Purpose string `json:"purpose,omitempty"`
|
|
||||||
Label string `json:"label,omitempty"`
|
|
||||||
Value string `json:"value,omitempty"`
|
|
||||||
Generate bool `json:"generate,omitempty"`
|
|
||||||
Recipe *GeneratorRecipe `json:"recipe,omitempty"`
|
|
||||||
Entropy float64 `json:"entropy,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get Retrieve the value of a field on the item by its label. To specify a
|
|
||||||
// field from a specific section pass in <section label>.<field label>. If
|
|
||||||
// no field matching the selector is found return "".
|
|
||||||
func (i *Item) GetValue(field string) string {
|
|
||||||
if i == nil || len(i.Fields) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
sectionFilter := false
|
|
||||||
sectionLabel := ""
|
|
||||||
fieldLabel := field
|
|
||||||
if strings.Contains(field, ".") {
|
|
||||||
parts := strings.Split(field, ".")
|
|
||||||
|
|
||||||
// Test to make sure the . isn't the last character
|
|
||||||
if len(parts) == 2 {
|
|
||||||
sectionFilter = true
|
|
||||||
sectionLabel = parts[0]
|
|
||||||
fieldLabel = parts[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, f := range i.Fields {
|
|
||||||
if sectionFilter {
|
|
||||||
if f.Section != nil {
|
|
||||||
if sectionLabel != i.SectionLabelForID(f.Section.ID) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fieldLabel == f.Label {
|
|
||||||
return f.Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Item) SectionLabelForID(id string) string {
|
|
||||||
if i != nil || len(i.Sections) > 0 {
|
|
||||||
for _, s := range i.Sections {
|
|
||||||
if s.ID == id {
|
|
||||||
return s.Label
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
46
vendor/github.com/1Password/connect-sdk-go/onepassword/vaults.go
generated
vendored
46
vendor/github.com/1Password/connect-sdk-go/onepassword/vaults.go
generated
vendored
@@ -1,46 +0,0 @@
|
|||||||
package onepassword
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Vault represents a 1password Vault
|
|
||||||
type Vault struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
|
|
||||||
AttrVersion int `json:"attributeVersion,omitempty"`
|
|
||||||
ContentVersoin int `json:"contentVersion,omitempty"`
|
|
||||||
Items int `json:"items,omitempty"`
|
|
||||||
Type VaultType `json:"type,omitempty"`
|
|
||||||
|
|
||||||
CreatedAt time.Time `json:"createdAt,omitempty"`
|
|
||||||
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// VaultType Representation of what the Vault Type is
|
|
||||||
type VaultType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
PersonalVault VaultType = "PERSONAL"
|
|
||||||
EveryoneVault VaultType = "EVERYONE"
|
|
||||||
TransferVault VaultType = "TRANSFER"
|
|
||||||
UserCreatedVault VaultType = "USER_CREATED"
|
|
||||||
UnknownVault VaultType = "UNKNOWN"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UnmarshalJSON Unmarshall Vault Type enum strings to Go string enums
|
|
||||||
func (vt *VaultType) UnmarshalJSON(b []byte) error {
|
|
||||||
var s string
|
|
||||||
json.Unmarshal(b, &s)
|
|
||||||
vaultType := VaultType(s)
|
|
||||||
switch vaultType {
|
|
||||||
case PersonalVault, EveryoneVault, TransferVault, UserCreatedVault:
|
|
||||||
*vt = vaultType
|
|
||||||
default:
|
|
||||||
*vt = UnknownVault
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
191
vendor/github.com/Azure/go-autorest/autorest/LICENSE
generated
vendored
191
vendor/github.com/Azure/go-autorest/autorest/LICENSE
generated
vendored
@@ -1,191 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
Copyright 2015 Microsoft Corporation
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
191
vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
generated
vendored
191
vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
generated
vendored
@@ -1,191 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
Copyright 2015 Microsoft Corporation
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
292
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
292
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
@@ -1,292 +0,0 @@
|
|||||||
# Azure Active Directory authentication for Go
|
|
||||||
|
|
||||||
This is a standalone package for authenticating with Azure Active
|
|
||||||
Directory from other Go libraries and applications, in particular the [Azure SDK
|
|
||||||
for Go](https://github.com/Azure/azure-sdk-for-go).
|
|
||||||
|
|
||||||
Note: Despite the package's name it is not related to other "ADAL" libraries
|
|
||||||
maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues
|
|
||||||
should be opened in [this repo's](https://github.com/Azure/go-autorest/issues)
|
|
||||||
or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue
|
|
||||||
trackers.
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get -u github.com/Azure/go-autorest/autorest/adal
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
|
|
||||||
|
|
||||||
### Register an Azure AD Application with secret
|
|
||||||
|
|
||||||
|
|
||||||
1. Register a new application with a `secret` credential
|
|
||||||
|
|
||||||
```
|
|
||||||
az ad app create \
|
|
||||||
--display-name example-app \
|
|
||||||
--homepage https://example-app/home \
|
|
||||||
--identifier-uris https://example-app/app \
|
|
||||||
--password secret
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Create a service principal using the `Application ID` from previous step
|
|
||||||
|
|
||||||
```
|
|
||||||
az ad sp create --id "Application ID"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace `Application ID` with `appId` from step 1.
|
|
||||||
|
|
||||||
### Register an Azure AD Application with certificate
|
|
||||||
|
|
||||||
1. Create a private key
|
|
||||||
|
|
||||||
```
|
|
||||||
openssl genrsa -out "example-app.key" 2048
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Create the certificate
|
|
||||||
|
|
||||||
```
|
|
||||||
openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr"
|
|
||||||
openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Create the PKCS12 version of the certificate containing also the private key
|
|
||||||
|
|
||||||
```
|
|
||||||
openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass:
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Register a new application with the certificate content form `example-app.crt`
|
|
||||||
|
|
||||||
```
|
|
||||||
certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)"
|
|
||||||
|
|
||||||
az ad app create \
|
|
||||||
--display-name example-app \
|
|
||||||
--homepage https://example-app/home \
|
|
||||||
--identifier-uris https://example-app/app \
|
|
||||||
--key-usage Verify --end-date 2018-01-01 \
|
|
||||||
--key-value "${certificateContents}"
|
|
||||||
```
|
|
||||||
|
|
||||||
5. Create a service principal using the `Application ID` from previous step
|
|
||||||
|
|
||||||
```
|
|
||||||
az ad sp create --id "APPLICATION_ID"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace `APPLICATION_ID` with `appId` from step 4.
|
|
||||||
|
|
||||||
|
|
||||||
### Grant the necessary permissions
|
|
||||||
|
|
||||||
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
|
|
||||||
level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles)
|
|
||||||
which can be assigned to a service principal of an Azure AD application depending of your needs.
|
|
||||||
|
|
||||||
```
|
|
||||||
az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step.
|
|
||||||
* Replace the `ROLE_NAME` with a role name of your choice.
|
|
||||||
|
|
||||||
It is also possible to define custom role definitions.
|
|
||||||
|
|
||||||
```
|
|
||||||
az role definition create --role-definition role-definition.json
|
|
||||||
```
|
|
||||||
|
|
||||||
* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
|
|
||||||
|
|
||||||
|
|
||||||
### Acquire Access Token
|
|
||||||
|
|
||||||
The common configuration used by all flows:
|
|
||||||
|
|
||||||
```Go
|
|
||||||
const activeDirectoryEndpoint = "https://login.microsoftonline.com/"
|
|
||||||
tenantID := "TENANT_ID"
|
|
||||||
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID)
|
|
||||||
|
|
||||||
applicationID := "APPLICATION_ID"
|
|
||||||
|
|
||||||
callback := func(token adal.Token) error {
|
|
||||||
// This is called after the token is acquired
|
|
||||||
}
|
|
||||||
|
|
||||||
// The resource for which the token is acquired
|
|
||||||
resource := "https://management.core.windows.net/"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace the `TENANT_ID` with your tenant ID.
|
|
||||||
* Replace the `APPLICATION_ID` with the value from previous section.
|
|
||||||
|
|
||||||
#### Client Credentials
|
|
||||||
|
|
||||||
```Go
|
|
||||||
applicationSecret := "APPLICATION_SECRET"
|
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalToken(
|
|
||||||
*oauthConfig,
|
|
||||||
appliationID,
|
|
||||||
applicationSecret,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Acquire a new access token
|
|
||||||
err = spt.Refresh()
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace the `APPLICATION_SECRET` with the `password` value from previous section.
|
|
||||||
|
|
||||||
#### Client Certificate
|
|
||||||
|
|
||||||
```Go
|
|
||||||
certificatePath := "./example-app.pfx"
|
|
||||||
|
|
||||||
certData, err := ioutil.ReadFile(certificatePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the certificate and private key from pfx file
|
|
||||||
certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromCertificate(
|
|
||||||
*oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
certificate,
|
|
||||||
rsaPrivateKey,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
// Acquire a new access token
|
|
||||||
err = spt.Refresh()
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Update the certificate path to point to the example-app.pfx file which was created in previous section.
|
|
||||||
|
|
||||||
|
|
||||||
#### Device Code
|
|
||||||
|
|
||||||
```Go
|
|
||||||
oauthClient := &http.Client{}
|
|
||||||
|
|
||||||
// Acquire the device code
|
|
||||||
deviceCode, err := adal.InitiateDeviceAuth(
|
|
||||||
oauthClient,
|
|
||||||
*oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
resource)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to start device auth flow: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display the authentication message
|
|
||||||
fmt.Println(*deviceCode.Message)
|
|
||||||
|
|
||||||
// Wait here until the user is authenticated
|
|
||||||
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to finish device auth flow: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromManualToken(
|
|
||||||
*oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
resource,
|
|
||||||
*token,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Username password authenticate
|
|
||||||
|
|
||||||
```Go
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
|
|
||||||
*oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
username,
|
|
||||||
password,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Authorization code authenticate
|
|
||||||
|
|
||||||
``` Go
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
|
|
||||||
*oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
clientSecret,
|
|
||||||
authorizationCode,
|
|
||||||
redirectURI,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
err = spt.Refresh()
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Command Line Tool
|
|
||||||
|
|
||||||
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.
|
|
||||||
|
|
||||||
```
|
|
||||||
adal -h
|
|
||||||
|
|
||||||
Usage of ./adal:
|
|
||||||
-applicationId string
|
|
||||||
application id
|
|
||||||
-certificatePath string
|
|
||||||
path to pk12/PFC application certificate
|
|
||||||
-mode string
|
|
||||||
authentication mode (device, secret, cert, refresh) (default "device")
|
|
||||||
-resource string
|
|
||||||
resource for which the token is requested
|
|
||||||
-secret string
|
|
||||||
application secret
|
|
||||||
-tenantId string
|
|
||||||
tenant id
|
|
||||||
-tokenCachePath string
|
|
||||||
location of oath token cache (default "/home/cgc/.adal/accessToken.json")
|
|
||||||
```
|
|
||||||
|
|
||||||
Example acquire a token for `https://management.core.windows.net/` using device code flow:
|
|
||||||
|
|
||||||
```
|
|
||||||
adal -mode device \
|
|
||||||
-applicationId "APPLICATION_ID" \
|
|
||||||
-tenantId "TENANT_ID" \
|
|
||||||
-resource https://management.core.windows.net/
|
|
||||||
|
|
||||||
```
|
|
151
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
151
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
@@ -1,151 +0,0 @@
|
|||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OAuthConfig represents the endpoints needed
|
|
||||||
// in OAuth operations
|
|
||||||
type OAuthConfig struct {
|
|
||||||
AuthorityEndpoint url.URL `json:"authorityEndpoint"`
|
|
||||||
AuthorizeEndpoint url.URL `json:"authorizeEndpoint"`
|
|
||||||
TokenEndpoint url.URL `json:"tokenEndpoint"`
|
|
||||||
DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsZero returns true if the OAuthConfig object is zero-initialized.
|
|
||||||
func (oac OAuthConfig) IsZero() bool {
|
|
||||||
return oac == OAuthConfig{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateStringParam(param, name string) error {
|
|
||||||
if len(param) == 0 {
|
|
||||||
return fmt.Errorf("parameter '" + name + "' cannot be empty")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
|
|
||||||
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
|
|
||||||
apiVer := "1.0"
|
|
||||||
return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
|
|
||||||
// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
|
|
||||||
func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
|
|
||||||
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
api := ""
|
|
||||||
// it's legal for tenantID to be empty so don't validate it
|
|
||||||
if apiVersion != nil {
|
|
||||||
if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
api = fmt.Sprintf("?api-version=%s", *apiVersion)
|
|
||||||
}
|
|
||||||
u, err := url.Parse(activeDirectoryEndpoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
authorityURL, err := u.Parse(tenantID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &OAuthConfig{
|
|
||||||
AuthorityEndpoint: *authorityURL,
|
|
||||||
AuthorizeEndpoint: *authorizeURL,
|
|
||||||
TokenEndpoint: *tokenURL,
|
|
||||||
DeviceCodeEndpoint: *deviceCodeURL,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs.
|
|
||||||
type MultiTenantOAuthConfig interface {
|
|
||||||
PrimaryTenant() *OAuthConfig
|
|
||||||
AuxiliaryTenants() []*OAuthConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// OAuthOptions contains optional OAuthConfig creation arguments.
|
|
||||||
type OAuthOptions struct {
|
|
||||||
APIVersion string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c OAuthOptions) apiVersion() string {
|
|
||||||
if c.APIVersion != "" {
|
|
||||||
return fmt.Sprintf("?api-version=%s", c.APIVersion)
|
|
||||||
}
|
|
||||||
return "1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration.
|
|
||||||
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information.
|
|
||||||
func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) {
|
|
||||||
if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 {
|
|
||||||
return nil, errors.New("must specify one to three auxiliary tenants")
|
|
||||||
}
|
|
||||||
mtCfg := multiTenantOAuthConfig{
|
|
||||||
cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1),
|
|
||||||
}
|
|
||||||
apiVer := options.apiVersion()
|
|
||||||
pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err)
|
|
||||||
}
|
|
||||||
mtCfg.cfgs[0] = pri
|
|
||||||
for i := range auxiliaryTenantIDs {
|
|
||||||
aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err)
|
|
||||||
}
|
|
||||||
mtCfg.cfgs[i+1] = aux
|
|
||||||
}
|
|
||||||
return mtCfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type multiTenantOAuthConfig struct {
|
|
||||||
// first config in the slice is the primary tenant
|
|
||||||
cfgs []*OAuthConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig {
|
|
||||||
return m.cfgs[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig {
|
|
||||||
return m.cfgs[1:]
|
|
||||||
}
|
|
269
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
269
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
@@ -1,269 +0,0 @@
|
|||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
/*
|
|
||||||
This file is largely based on rjw57/oauth2device's code, with the follow differences:
|
|
||||||
* scope -> resource, and only allow a single one
|
|
||||||
* receive "Message" in the DeviceCode struct and show it to users as the prompt
|
|
||||||
* azure-xplat-cli has the following behavior that this emulates:
|
|
||||||
- does not send client_secret during the token exchange
|
|
||||||
- sends resource again in the token exchange request
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
logPrefix = "autorest/adal/devicetoken:"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow
|
|
||||||
ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow
|
|
||||||
ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow
|
|
||||||
ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow
|
|
||||||
ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
|
|
||||||
ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow
|
|
||||||
ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix)
|
|
||||||
|
|
||||||
// ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow
|
|
||||||
ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix)
|
|
||||||
|
|
||||||
errCodeSendingFails = "Error occurred while sending request for Device Authorization Code"
|
|
||||||
errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint"
|
|
||||||
errTokenSendingFails = "Error occurred while sending request with device code for a token"
|
|
||||||
errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
|
|
||||||
errStatusNotOK = "Error HTTP status != 200"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeviceCode is the object returned by the device auth endpoint
|
|
||||||
// It contains information to instruct the user to complete the auth flow
|
|
||||||
type DeviceCode struct {
|
|
||||||
DeviceCode *string `json:"device_code,omitempty"`
|
|
||||||
UserCode *string `json:"user_code,omitempty"`
|
|
||||||
VerificationURL *string `json:"verification_url,omitempty"`
|
|
||||||
ExpiresIn *int64 `json:"expires_in,string,omitempty"`
|
|
||||||
Interval *int64 `json:"interval,string,omitempty"`
|
|
||||||
|
|
||||||
Message *string `json:"message"` // Azure specific
|
|
||||||
Resource string // store the following, stored when initiating, used when exchanging
|
|
||||||
OAuthConfig OAuthConfig
|
|
||||||
ClientID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokenError is the object returned by the token exchange endpoint
|
|
||||||
// when something is amiss
|
|
||||||
type TokenError struct {
|
|
||||||
Error *string `json:"error,omitempty"`
|
|
||||||
ErrorCodes []int `json:"error_codes,omitempty"`
|
|
||||||
ErrorDescription *string `json:"error_description,omitempty"`
|
|
||||||
Timestamp *string `json:"timestamp,omitempty"`
|
|
||||||
TraceID *string `json:"trace_id,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeviceToken is the object return by the token exchange endpoint
|
|
||||||
// It can either look like a Token or an ErrorToken, so put both here
|
|
||||||
// and check for presence of "Error" to know if we are in error state
|
|
||||||
type deviceToken struct {
|
|
||||||
Token
|
|
||||||
TokenError
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
|
|
||||||
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
|
||||||
// Deprecated: use InitiateDeviceAuthWithContext() instead.
|
|
||||||
func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
|
||||||
return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode
|
|
||||||
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
|
||||||
func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
|
||||||
v := url.Values{
|
|
||||||
"client_id": []string{clientID},
|
|
||||||
"resource": []string{resource},
|
|
||||||
}
|
|
||||||
|
|
||||||
s := v.Encode()
|
|
||||||
body := ioutil.NopCloser(strings.NewReader(s))
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
req.ContentLength = int64(len(s))
|
|
||||||
req.Header.Set(contentType, mimeTypeFormPost)
|
|
||||||
resp, err := sender.Do(req.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
rb, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(strings.Trim(string(rb), " ")) == 0 {
|
|
||||||
return nil, ErrDeviceCodeEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
var code DeviceCode
|
|
||||||
err = json.Unmarshal(rb, &code)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
code.ClientID = clientID
|
|
||||||
code.Resource = resource
|
|
||||||
code.OAuthConfig = oauthConfig
|
|
||||||
|
|
||||||
return &code, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
|
||||||
// to see if the device flow has: been completed, timed out, or otherwise failed
|
|
||||||
// Deprecated: use CheckForUserCompletionWithContext() instead.
|
|
||||||
func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
|
||||||
return CheckForUserCompletionWithContext(context.Background(), sender, code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
|
||||||
// to see if the device flow has: been completed, timed out, or otherwise failed
|
|
||||||
func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) {
|
|
||||||
v := url.Values{
|
|
||||||
"client_id": []string{code.ClientID},
|
|
||||||
"code": []string{*code.DeviceCode},
|
|
||||||
"grant_type": []string{OAuthGrantTypeDeviceCode},
|
|
||||||
"resource": []string{code.Resource},
|
|
||||||
}
|
|
||||||
|
|
||||||
s := v.Encode()
|
|
||||||
body := ioutil.NopCloser(strings.NewReader(s))
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
req.ContentLength = int64(len(s))
|
|
||||||
req.Header.Set(contentType, mimeTypeFormPost)
|
|
||||||
resp, err := sender.Do(req.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
rb, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK)
|
|
||||||
}
|
|
||||||
if len(strings.Trim(string(rb), " ")) == 0 {
|
|
||||||
return nil, ErrOAuthTokenEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
var token deviceToken
|
|
||||||
err = json.Unmarshal(rb, &token)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if token.Error == nil {
|
|
||||||
return &token.Token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch *token.Error {
|
|
||||||
case "authorization_pending":
|
|
||||||
return nil, ErrDeviceAuthorizationPending
|
|
||||||
case "slow_down":
|
|
||||||
return nil, ErrDeviceSlowDown
|
|
||||||
case "access_denied":
|
|
||||||
return nil, ErrDeviceAccessDenied
|
|
||||||
case "code_expired":
|
|
||||||
return nil, ErrDeviceCodeExpired
|
|
||||||
default:
|
|
||||||
return nil, ErrDeviceGeneric
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
|
|
||||||
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
|
||||||
// Deprecated: use WaitForUserCompletionWithContext() instead.
|
|
||||||
func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
|
||||||
return WaitForUserCompletionWithContext(context.Background(), sender, code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error
|
|
||||||
// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
|
||||||
func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) {
|
|
||||||
intervalDuration := time.Duration(*code.Interval) * time.Second
|
|
||||||
waitDuration := intervalDuration
|
|
||||||
|
|
||||||
for {
|
|
||||||
token, err := CheckForUserCompletionWithContext(ctx, sender, code)
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
return token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch err {
|
|
||||||
case ErrDeviceSlowDown:
|
|
||||||
waitDuration += waitDuration
|
|
||||||
case ErrDeviceAuthorizationPending:
|
|
||||||
// noop
|
|
||||||
default: // everything else is "fatal" to us
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if waitDuration > (intervalDuration * 3) {
|
|
||||||
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-time.After(waitDuration):
|
|
||||||
// noop
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil, ctx.Err()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
12
vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
generated
vendored
12
vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
generated
vendored
@@ -1,12 +0,0 @@
|
|||||||
module github.com/Azure/go-autorest/autorest/adal
|
|
||||||
|
|
||||||
go 1.12
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/Azure/go-autorest/autorest v0.9.0
|
|
||||||
github.com/Azure/go-autorest/autorest/date v0.2.0
|
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0
|
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0
|
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
|
||||||
)
|
|
23
vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
generated
vendored
23
vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
generated
vendored
@@ -1,23 +0,0 @@
|
|||||||
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
|
|
||||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
|
||||||
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
|
||||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
|
||||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
|
|
||||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
|
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
|
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
|
||||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
|
||||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
24
vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
generated
vendored
24
vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
generated
vendored
@@ -1,24 +0,0 @@
|
|||||||
// +build modhack
|
|
||||||
|
|
||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
|
|
||||||
// the resultant binary.
|
|
||||||
|
|
||||||
// Necessary for safely adding multi-module repo.
|
|
||||||
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
|
||||||
import _ "github.com/Azure/go-autorest/autorest"
|
|
73
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
73
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
@@ -1,73 +0,0 @@
|
|||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LoadToken restores a Token object from a file located at 'path'.
|
|
||||||
func LoadToken(path string) (*Token, error) {
|
|
||||||
file, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
var token Token
|
|
||||||
|
|
||||||
dec := json.NewDecoder(file)
|
|
||||||
if err = dec.Decode(&token); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err)
|
|
||||||
}
|
|
||||||
return &token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveToken persists an oauth token at the given location on disk.
|
|
||||||
// It moves the new file into place so it can safely be used to replace an existing file
|
|
||||||
// that maybe accessed by multiple processes.
|
|
||||||
func SaveToken(path string, mode os.FileMode, token Token) error {
|
|
||||||
dir := filepath.Dir(path)
|
|
||||||
err := os.MkdirAll(dir, os.ModePerm)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
newFile, err := ioutil.TempFile(dir, "token")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create the temp file to write the token: %v", err)
|
|
||||||
}
|
|
||||||
tempPath := newFile.Name()
|
|
||||||
|
|
||||||
if err := json.NewEncoder(newFile).Encode(token); err != nil {
|
|
||||||
return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err)
|
|
||||||
}
|
|
||||||
if err := newFile.Close(); err != nil {
|
|
||||||
return fmt.Errorf("failed to close temp file %s: %v", tempPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Atomic replace to avoid multi-writer file corruptions
|
|
||||||
if err := os.Rename(tempPath, path); err != nil {
|
|
||||||
return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err)
|
|
||||||
}
|
|
||||||
if err := os.Chmod(path, mode); err != nil {
|
|
||||||
return fmt.Errorf("failed to chmod the token file %s: %v", path, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
95
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
95
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
@@ -1,95 +0,0 @@
|
|||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"net/http"
|
|
||||||
"net/http/cookiejar"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/tracing"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
contentType = "Content-Type"
|
|
||||||
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultSender Sender
|
|
||||||
var defaultSenderInit = &sync.Once{}
|
|
||||||
|
|
||||||
// Sender is the interface that wraps the Do method to send HTTP requests.
|
|
||||||
//
|
|
||||||
// The standard http.Client conforms to this interface.
|
|
||||||
type Sender interface {
|
|
||||||
Do(*http.Request) (*http.Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SenderFunc is a method that implements the Sender interface.
|
|
||||||
type SenderFunc func(*http.Request) (*http.Response, error)
|
|
||||||
|
|
||||||
// Do implements the Sender interface on SenderFunc.
|
|
||||||
func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
|
|
||||||
return sf(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
|
|
||||||
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
|
||||||
// http.Response result.
|
|
||||||
type SendDecorator func(Sender) Sender
|
|
||||||
|
|
||||||
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
|
|
||||||
func CreateSender(decorators ...SendDecorator) Sender {
|
|
||||||
return DecorateSender(sender(), decorators...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
|
|
||||||
// the Sender. Decorators are applied in the order received, but their affect upon the request
|
|
||||||
// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
|
|
||||||
// post-decorator (pass the http.Request along and react to the results in http.Response).
|
|
||||||
func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
|
|
||||||
for _, decorate := range decorators {
|
|
||||||
s = decorate(s)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func sender() Sender {
|
|
||||||
// note that we can't init defaultSender in init() since it will
|
|
||||||
// execute before calling code has had a chance to enable tracing
|
|
||||||
defaultSenderInit.Do(func() {
|
|
||||||
// Use behaviour compatible with DefaultTransport, but require TLS minimum version.
|
|
||||||
defaultTransport := http.DefaultTransport.(*http.Transport)
|
|
||||||
transport := &http.Transport{
|
|
||||||
Proxy: defaultTransport.Proxy,
|
|
||||||
DialContext: defaultTransport.DialContext,
|
|
||||||
MaxIdleConns: defaultTransport.MaxIdleConns,
|
|
||||||
IdleConnTimeout: defaultTransport.IdleConnTimeout,
|
|
||||||
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
|
|
||||||
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
|
|
||||||
TLSClientConfig: &tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
var roundTripper http.RoundTripper = transport
|
|
||||||
if tracing.IsEnabled() {
|
|
||||||
roundTripper = tracing.NewTransport(transport)
|
|
||||||
}
|
|
||||||
j, _ := cookiejar.New(nil)
|
|
||||||
defaultSender = &http.Client{Jar: j, Transport: roundTripper}
|
|
||||||
})
|
|
||||||
return defaultSender
|
|
||||||
}
|
|
1130
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
1130
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
File diff suppressed because it is too large
Load Diff
45
vendor/github.com/Azure/go-autorest/autorest/adal/version.go
generated
vendored
45
vendor/github.com/Azure/go-autorest/autorest/adal/version.go
generated
vendored
@@ -1,45 +0,0 @@
|
|||||||
package adal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
const number = "v1.0.0"
|
|
||||||
|
|
||||||
var (
|
|
||||||
ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s",
|
|
||||||
runtime.Version(),
|
|
||||||
runtime.GOARCH,
|
|
||||||
runtime.GOOS,
|
|
||||||
number,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version.
|
|
||||||
func UserAgent() string {
|
|
||||||
return ua
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddToUserAgent adds an extension to the current user agent
|
|
||||||
func AddToUserAgent(extension string) error {
|
|
||||||
if extension != "" {
|
|
||||||
ua = fmt.Sprintf("%s %s", ua, extension)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua)
|
|
||||||
}
|
|
336
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
336
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
@@ -1,336 +0,0 @@
|
|||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest/adal"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
bearerChallengeHeader = "Www-Authenticate"
|
|
||||||
bearer = "Bearer"
|
|
||||||
tenantID = "tenantID"
|
|
||||||
apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key"
|
|
||||||
bingAPISdkHeader = "X-BingApis-SDK-Client"
|
|
||||||
golangBingAPISdkHeaderValue = "Go-SDK"
|
|
||||||
authorization = "Authorization"
|
|
||||||
basic = "Basic"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Authorizer is the interface that provides a PrepareDecorator used to supply request
|
|
||||||
// authorization. Most often, the Authorizer decorator runs last so it has access to the full
|
|
||||||
// state of the formed HTTP request.
|
|
||||||
type Authorizer interface {
|
|
||||||
WithAuthorization() PrepareDecorator
|
|
||||||
}
|
|
||||||
|
|
||||||
// NullAuthorizer implements a default, "do nothing" Authorizer.
|
|
||||||
type NullAuthorizer struct{}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that does nothing.
|
|
||||||
func (na NullAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return WithNothing()
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIKeyAuthorizer implements API Key authorization.
|
|
||||||
type APIKeyAuthorizer struct {
|
|
||||||
headers map[string]interface{}
|
|
||||||
queryParameters map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers.
|
|
||||||
func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer {
|
|
||||||
return NewAPIKeyAuthorizer(headers, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters.
|
|
||||||
func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer {
|
|
||||||
return NewAPIKeyAuthorizer(nil, queryParameters)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers.
|
|
||||||
func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer {
|
|
||||||
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters.
|
|
||||||
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CognitiveServicesAuthorizer implements authorization for Cognitive Services.
|
|
||||||
type CognitiveServicesAuthorizer struct {
|
|
||||||
subscriptionKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCognitiveServicesAuthorizer is
|
|
||||||
func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer {
|
|
||||||
return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization is
|
|
||||||
func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
headers := make(map[string]interface{})
|
|
||||||
headers[apiKeyAuthorizerHeader] = csa.subscriptionKey
|
|
||||||
headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue
|
|
||||||
|
|
||||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
|
||||||
}
|
|
||||||
|
|
||||||
// BearerAuthorizer implements the bearer authorization
|
|
||||||
type BearerAuthorizer struct {
|
|
||||||
tokenProvider adal.OAuthTokenProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider
|
|
||||||
func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer {
|
|
||||||
return &BearerAuthorizer{tokenProvider: tp}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|
||||||
// value is "Bearer " followed by the token.
|
|
||||||
//
|
|
||||||
// By default, the token will be automatically refreshed through the Refresher interface.
|
|
||||||
func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
// the ordering is important here, prefer RefresherWithContext if available
|
|
||||||
if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok {
|
|
||||||
err = refresher.EnsureFreshWithContext(r.Context())
|
|
||||||
} else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok {
|
|
||||||
err = refresher.EnsureFresh()
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
var resp *http.Response
|
|
||||||
if tokError, ok := err.(adal.TokenRefreshError); ok {
|
|
||||||
resp = tokError.Response()
|
|
||||||
}
|
|
||||||
return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp,
|
|
||||||
"Failed to refresh the Token for request to %s", r.URL)
|
|
||||||
}
|
|
||||||
return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())))
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BearerAuthorizerCallbackFunc is the authentication callback signature.
|
|
||||||
type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error)
|
|
||||||
|
|
||||||
// BearerAuthorizerCallback implements bearer authorization via a callback.
|
|
||||||
type BearerAuthorizerCallback struct {
|
|
||||||
sender Sender
|
|
||||||
callback BearerAuthorizerCallbackFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
|
|
||||||
// is invoked when the HTTP request is submitted.
|
|
||||||
func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
|
|
||||||
if s == nil {
|
|
||||||
s = sender(tls.RenegotiateNever)
|
|
||||||
}
|
|
||||||
return &BearerAuthorizerCallback{sender: s, callback: callback}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
|
|
||||||
// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback.
|
|
||||||
//
|
|
||||||
// By default, the token will be automatically refreshed through the Refresher interface.
|
|
||||||
func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
// make a copy of the request and remove the body as it's not
|
|
||||||
// required and avoids us having to create a copy of it.
|
|
||||||
rCopy := *r
|
|
||||||
removeRequestBody(&rCopy)
|
|
||||||
|
|
||||||
resp, err := bacb.sender.Do(&rCopy)
|
|
||||||
if err == nil && resp.StatusCode == 401 {
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if hasBearerChallenge(resp) {
|
|
||||||
bc, err := newBearerChallenge(resp)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
if bacb.callback != nil {
|
|
||||||
ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"])
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
return Prepare(r, ba.WithAuthorization())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns true if the HTTP response contains a bearer challenge
|
|
||||||
func hasBearerChallenge(resp *http.Response) bool {
|
|
||||||
authHeader := resp.Header.Get(bearerChallengeHeader)
|
|
||||||
if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type bearerChallenge struct {
|
|
||||||
values map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) {
|
|
||||||
challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader))
|
|
||||||
trimmedChallenge := challenge[len(bearer)+1:]
|
|
||||||
|
|
||||||
// challenge is a set of key=value pairs that are comma delimited
|
|
||||||
pairs := strings.Split(trimmedChallenge, ",")
|
|
||||||
if len(pairs) < 1 {
|
|
||||||
err = fmt.Errorf("challenge '%s' contains no pairs", challenge)
|
|
||||||
return bc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
bc.values = make(map[string]string)
|
|
||||||
for i := range pairs {
|
|
||||||
trimmedPair := strings.TrimSpace(pairs[i])
|
|
||||||
pair := strings.Split(trimmedPair, "=")
|
|
||||||
if len(pair) == 2 {
|
|
||||||
// remove the enclosing quotes
|
|
||||||
key := strings.Trim(pair[0], "\"")
|
|
||||||
value := strings.Trim(pair[1], "\"")
|
|
||||||
|
|
||||||
switch key {
|
|
||||||
case "authorization", "authorization_uri":
|
|
||||||
// strip the tenant ID from the authorization URL
|
|
||||||
asURL, err := url.Parse(value)
|
|
||||||
if err != nil {
|
|
||||||
return bc, err
|
|
||||||
}
|
|
||||||
bc.values[tenantID] = asURL.Path[1:]
|
|
||||||
default:
|
|
||||||
bc.values[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return bc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// EventGridKeyAuthorizer implements authorization for event grid using key authentication.
|
|
||||||
type EventGridKeyAuthorizer struct {
|
|
||||||
topicKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer
|
|
||||||
// with the specified topic key.
|
|
||||||
func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer {
|
|
||||||
return EventGridKeyAuthorizer{topicKey: topicKey}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header.
|
|
||||||
func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
headers := map[string]interface{}{
|
|
||||||
"aeg-sas-key": egta.topicKey,
|
|
||||||
}
|
|
||||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
|
||||||
}
|
|
||||||
|
|
||||||
// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header
|
|
||||||
// with the value "Basic <TOKEN>" where <TOKEN> is a base64-encoded username:password tuple.
|
|
||||||
type BasicAuthorizer struct {
|
|
||||||
userName string
|
|
||||||
password string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password.
|
|
||||||
func NewBasicAuthorizer(userName, password string) *BasicAuthorizer {
|
|
||||||
return &BasicAuthorizer{
|
|
||||||
userName: userName,
|
|
||||||
password: password,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|
||||||
// value is "Basic " followed by the base64-encoded username:password tuple.
|
|
||||||
func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
headers := make(map[string]interface{})
|
|
||||||
headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password)))
|
|
||||||
|
|
||||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants.
|
|
||||||
type MultiTenantServicePrincipalTokenAuthorizer interface {
|
|
||||||
WithAuthorization() PrepareDecorator
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider
|
|
||||||
func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer {
|
|
||||||
return &multiTenantSPTAuthorizer{tp: tp}
|
|
||||||
}
|
|
||||||
|
|
||||||
type multiTenantSPTAuthorizer struct {
|
|
||||||
tp adal.MultitenantOAuthTokenProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the
|
|
||||||
// primary token along with the auxiliary authorization header using the auxiliary tokens.
|
|
||||||
//
|
|
||||||
// By default, the token will be automatically refreshed through the Refresher interface.
|
|
||||||
func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
if refresher, ok := mt.tp.(adal.RefresherWithContext); ok {
|
|
||||||
err = refresher.EnsureFreshWithContext(r.Context())
|
|
||||||
if err != nil {
|
|
||||||
var resp *http.Response
|
|
||||||
if tokError, ok := err.(adal.TokenRefreshError); ok {
|
|
||||||
resp = tokError.Response()
|
|
||||||
}
|
|
||||||
return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp,
|
|
||||||
"Failed to refresh one or more Tokens for request to %s", r.URL)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken())))
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
auxTokens := mt.tp.AuxiliaryOAuthTokens()
|
|
||||||
for i := range auxTokens {
|
|
||||||
auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i])
|
|
||||||
}
|
|
||||||
return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; ")))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
67
vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go
generated
vendored
67
vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go
generated
vendored
@@ -1,67 +0,0 @@
|
|||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SASTokenAuthorizer implements an authorization for SAS Token Authentication
|
|
||||||
// this can be used for interaction with Blob Storage Endpoints
|
|
||||||
type SASTokenAuthorizer struct {
|
|
||||||
sasToken string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials
|
|
||||||
func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) {
|
|
||||||
if strings.TrimSpace(sasToken) == "" {
|
|
||||||
return nil, fmt.Errorf("sasToken cannot be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
token := sasToken
|
|
||||||
if strings.HasPrefix(sasToken, "?") {
|
|
||||||
token = strings.TrimPrefix(sasToken, "?")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &SASTokenAuthorizer{
|
|
||||||
sasToken: token,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the
|
|
||||||
// URI's query parameters. This can be used for the Blob, Queue, and File Services.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature
|
|
||||||
func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.URL.RawQuery != "" {
|
|
||||||
r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken)
|
|
||||||
} else {
|
|
||||||
r.URL.RawQuery = sas.sasToken
|
|
||||||
}
|
|
||||||
|
|
||||||
r.RequestURI = r.URL.String()
|
|
||||||
return Prepare(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
301
vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go
generated
vendored
301
vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go
generated
vendored
@@ -1,301 +0,0 @@
|
|||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SharedKeyType defines the enumeration for the various shared key types.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types.
|
|
||||||
type SharedKeyType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// SharedKey is used to authorize against blobs, files and queues services.
|
|
||||||
SharedKey SharedKeyType = "sharedKey"
|
|
||||||
|
|
||||||
// SharedKeyForTable is used to authorize against the table service.
|
|
||||||
SharedKeyForTable SharedKeyType = "sharedKeyTable"
|
|
||||||
|
|
||||||
// SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for
|
|
||||||
// backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead.
|
|
||||||
SharedKeyLite SharedKeyType = "sharedKeyLite"
|
|
||||||
|
|
||||||
// SharedKeyLiteForTable is used to authorize against the table service. It's provided for
|
|
||||||
// backwards compatibility with older table API versions. Prefer SharedKeyForTable instead.
|
|
||||||
SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
headerAccept = "Accept"
|
|
||||||
headerAcceptCharset = "Accept-Charset"
|
|
||||||
headerContentEncoding = "Content-Encoding"
|
|
||||||
headerContentLength = "Content-Length"
|
|
||||||
headerContentMD5 = "Content-MD5"
|
|
||||||
headerContentLanguage = "Content-Language"
|
|
||||||
headerIfModifiedSince = "If-Modified-Since"
|
|
||||||
headerIfMatch = "If-Match"
|
|
||||||
headerIfNoneMatch = "If-None-Match"
|
|
||||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
|
||||||
headerDate = "Date"
|
|
||||||
headerXMSDate = "X-Ms-Date"
|
|
||||||
headerXMSVersion = "x-ms-version"
|
|
||||||
headerRange = "Range"
|
|
||||||
)
|
|
||||||
|
|
||||||
const storageEmulatorAccountName = "devstoreaccount1"
|
|
||||||
|
|
||||||
// SharedKeyAuthorizer implements an authorization for Shared Key
|
|
||||||
// this can be used for interaction with Blob, File and Queue Storage Endpoints
|
|
||||||
type SharedKeyAuthorizer struct {
|
|
||||||
accountName string
|
|
||||||
accountKey []byte
|
|
||||||
keyType SharedKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type.
|
|
||||||
func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) {
|
|
||||||
key, err := base64.StdEncoding.DecodeString(accountKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("malformed storage account key: %v", err)
|
|
||||||
}
|
|
||||||
return &SharedKeyAuthorizer{
|
|
||||||
accountName: accountName,
|
|
||||||
accountKey: key,
|
|
||||||
keyType: keyType,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|
||||||
// value is "<SharedKeyType> " followed by the computed key.
|
|
||||||
// This can be used for the Blob, Queue, and File Services
|
|
||||||
//
|
|
||||||
// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key
|
|
||||||
// You may use Shared Key authorization to authorize a request made against the
|
|
||||||
// 2009-09-19 version and later of the Blob and Queue services,
|
|
||||||
// and version 2014-02-14 and later of the File services.
|
|
||||||
func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType)
|
|
||||||
return Prepare(r, WithHeader(headerAuthorization, sk))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) {
|
|
||||||
canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Header == nil {
|
|
||||||
req.Header = http.Header{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensure date is set
|
|
||||||
if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" {
|
|
||||||
date := time.Now().UTC().Format(http.TimeFormat)
|
|
||||||
req.Header.Set(headerXMSDate, date)
|
|
||||||
}
|
|
||||||
canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return createAuthorizationHeader(accName, accKey, canString, keyType), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) {
|
|
||||||
errMsg := "buildCanonicalizedResource error: %s"
|
|
||||||
u, err := url.Parse(uri)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf(errMsg, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
cr := bytes.NewBufferString("")
|
|
||||||
if accountName != storageEmulatorAccountName {
|
|
||||||
cr.WriteString("/")
|
|
||||||
cr.WriteString(getCanonicalizedAccountName(accountName))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(u.Path) > 0 {
|
|
||||||
// Any portion of the CanonicalizedResource string that is derived from
|
|
||||||
// the resource's URI should be encoded exactly as it is in the URI.
|
|
||||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
|
||||||
cr.WriteString(u.EscapedPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
params, err := url.ParseQuery(u.RawQuery)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf(errMsg, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
|
|
||||||
if keyType == SharedKey {
|
|
||||||
if len(params) > 0 {
|
|
||||||
cr.WriteString("\n")
|
|
||||||
|
|
||||||
keys := []string{}
|
|
||||||
for key := range params {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
completeParams := []string{}
|
|
||||||
for _, key := range keys {
|
|
||||||
if len(params[key]) > 1 {
|
|
||||||
sort.Strings(params[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
|
|
||||||
}
|
|
||||||
cr.WriteString(strings.Join(completeParams, "\n"))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// search for "comp" parameter, if exists then add it to canonicalizedresource
|
|
||||||
if v, ok := params["comp"]; ok {
|
|
||||||
cr.WriteString("?comp=" + v[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(cr.Bytes()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCanonicalizedAccountName(accountName string) string {
|
|
||||||
// since we may be trying to access a secondary storage account, we need to
|
|
||||||
// remove the -secondary part of the storage name
|
|
||||||
return strings.TrimSuffix(accountName, "-secondary")
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) {
|
|
||||||
contentLength := headers.Get(headerContentLength)
|
|
||||||
if contentLength == "0" {
|
|
||||||
contentLength = ""
|
|
||||||
}
|
|
||||||
date := headers.Get(headerDate)
|
|
||||||
if v := headers.Get(headerXMSDate); v != "" {
|
|
||||||
if keyType == SharedKey || keyType == SharedKeyLite {
|
|
||||||
date = ""
|
|
||||||
} else {
|
|
||||||
date = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var canString string
|
|
||||||
switch keyType {
|
|
||||||
case SharedKey:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
verb,
|
|
||||||
headers.Get(headerContentEncoding),
|
|
||||||
headers.Get(headerContentLanguage),
|
|
||||||
contentLength,
|
|
||||||
headers.Get(headerContentMD5),
|
|
||||||
headers.Get(headerContentType),
|
|
||||||
date,
|
|
||||||
headers.Get(headerIfModifiedSince),
|
|
||||||
headers.Get(headerIfMatch),
|
|
||||||
headers.Get(headerIfNoneMatch),
|
|
||||||
headers.Get(headerIfUnmodifiedSince),
|
|
||||||
headers.Get(headerRange),
|
|
||||||
buildCanonicalizedHeader(headers),
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
case SharedKeyForTable:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
verb,
|
|
||||||
headers.Get(headerContentMD5),
|
|
||||||
headers.Get(headerContentType),
|
|
||||||
date,
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
case SharedKeyLite:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
verb,
|
|
||||||
headers.Get(headerContentMD5),
|
|
||||||
headers.Get(headerContentType),
|
|
||||||
date,
|
|
||||||
buildCanonicalizedHeader(headers),
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
case SharedKeyLiteForTable:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
date,
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("key type '%s' is not supported", keyType)
|
|
||||||
}
|
|
||||||
return canString, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCanonicalizedHeader(headers http.Header) string {
|
|
||||||
cm := make(map[string]string)
|
|
||||||
|
|
||||||
for k := range headers {
|
|
||||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
|
||||||
if strings.HasPrefix(headerName, "x-ms-") {
|
|
||||||
cm[headerName] = headers.Get(k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cm) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
keys := []string{}
|
|
||||||
for key := range cm {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
ch := bytes.NewBufferString("")
|
|
||||||
|
|
||||||
for _, key := range keys {
|
|
||||||
ch.WriteString(key)
|
|
||||||
ch.WriteRune(':')
|
|
||||||
ch.WriteString(cm[key])
|
|
||||||
ch.WriteRune('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.TrimSuffix(string(ch.Bytes()), "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string {
|
|
||||||
h := hmac.New(sha256.New, accountKey)
|
|
||||||
h.Write([]byte(canonicalizedString))
|
|
||||||
signature := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
|
||||||
var key string
|
|
||||||
switch keyType {
|
|
||||||
case SharedKey, SharedKeyForTable:
|
|
||||||
key = "SharedKey"
|
|
||||||
case SharedKeyLite, SharedKeyLiteForTable:
|
|
||||||
key = "SharedKeyLite"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature)
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user